
This patch adds an emergency spill slot when ran out of registers. PR #139201 introduces `vstelm` instructions with only 8-bit imm offset, it causes no spill slot to store the spill registers.
1629 lines
70 KiB
LLVM
1629 lines
70 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32S
|
|
; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32S
|
|
; RUN: llc --mtriple=loongarch32 --mattr=+f -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32F-ILP32D
|
|
; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32S
|
|
; RUN: llc --mtriple=loongarch32 --mattr=+d -target-abi=ilp32d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA32D-ILP32D
|
|
; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64S
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64S
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+f -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64F-LP64D
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64s --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64S
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+d -target-abi=lp64d --verify-machineinstrs < %s | FileCheck %s --check-prefix=LA64D-LP64D
|
|
|
|
define i32 @callee_half_in_fregs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
|
|
; LA32S-LABEL: callee_half_in_fregs:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32S-NEXT: ld.hu $a1, $sp, 16
|
|
; LA32S-NEXT: move $fp, $a0
|
|
; LA32S-NEXT: move $a0, $a1
|
|
; LA32S-NEXT: bl __extendhfsf2
|
|
; LA32S-NEXT: bl __fixsfsi
|
|
; LA32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: callee_half_in_fregs:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: ld.hu $a1, $sp, 16
|
|
; LA32F-ILP32S-NEXT: move $fp, $a0
|
|
; LA32F-ILP32S-NEXT: move $a0, $a1
|
|
; LA32F-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: callee_half_in_fregs:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: move $fp, $a0
|
|
; LA32F-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: callee_half_in_fregs:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: ld.hu $a1, $sp, 16
|
|
; LA32D-ILP32S-NEXT: move $fp, $a0
|
|
; LA32D-ILP32S-NEXT: move $a0, $a1
|
|
; LA32D-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: callee_half_in_fregs:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: move $fp, $a0
|
|
; LA32D-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: callee_half_in_fregs:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64S-NEXT: move $fp, $a0
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: callee_half_in_fregs:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: ld.hu $a1, $sp, 16
|
|
; LA64F-LP64S-NEXT: move $fp, $a0
|
|
; LA64F-LP64S-NEXT: move $a0, $a1
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: callee_half_in_fregs:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: move $fp, $a0
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: callee_half_in_fregs:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: ld.hu $a1, $sp, 16
|
|
; LA64D-LP64S-NEXT: move $fp, $a0
|
|
; LA64D-LP64S-NEXT: move $a0, $a1
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: callee_half_in_fregs:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: move $fp, $a0
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = fptosi half %i to i32
|
|
%2 = add i32 %a, %1
|
|
ret i32 %2
|
|
}
|
|
|
|
define i32 @caller_half_in_fregs() nounwind {
|
|
; LA32S-LABEL: caller_half_in_fregs:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: lu12i.w $t0, 4
|
|
; LA32S-NEXT: ori $a0, $zero, 1
|
|
; LA32S-NEXT: ori $a1, $zero, 2
|
|
; LA32S-NEXT: ori $a2, $zero, 3
|
|
; LA32S-NEXT: ori $a3, $zero, 4
|
|
; LA32S-NEXT: ori $a4, $zero, 5
|
|
; LA32S-NEXT: ori $a5, $zero, 6
|
|
; LA32S-NEXT: ori $a6, $zero, 7
|
|
; LA32S-NEXT: ori $a7, $zero, 8
|
|
; LA32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32S-NEXT: bl callee_half_in_fregs
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: caller_half_in_fregs:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: lu12i.w $t0, -12
|
|
; LA32F-ILP32S-NEXT: ori $a0, $zero, 1
|
|
; LA32F-ILP32S-NEXT: ori $a1, $zero, 2
|
|
; LA32F-ILP32S-NEXT: ori $a2, $zero, 3
|
|
; LA32F-ILP32S-NEXT: ori $a3, $zero, 4
|
|
; LA32F-ILP32S-NEXT: ori $a4, $zero, 5
|
|
; LA32F-ILP32S-NEXT: ori $a5, $zero, 6
|
|
; LA32F-ILP32S-NEXT: ori $a6, $zero, 7
|
|
; LA32F-ILP32S-NEXT: ori $a7, $zero, 8
|
|
; LA32F-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32F-ILP32S-NEXT: bl callee_half_in_fregs
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: caller_half_in_fregs:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
|
|
; LA32F-ILP32D-NEXT: ori $a0, $zero, 1
|
|
; LA32F-ILP32D-NEXT: ori $a1, $zero, 2
|
|
; LA32F-ILP32D-NEXT: ori $a2, $zero, 3
|
|
; LA32F-ILP32D-NEXT: ori $a3, $zero, 4
|
|
; LA32F-ILP32D-NEXT: ori $a4, $zero, 5
|
|
; LA32F-ILP32D-NEXT: ori $a5, $zero, 6
|
|
; LA32F-ILP32D-NEXT: ori $a6, $zero, 7
|
|
; LA32F-ILP32D-NEXT: ori $a7, $zero, 8
|
|
; LA32F-ILP32D-NEXT: bl callee_half_in_fregs
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: caller_half_in_fregs:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: lu12i.w $t0, -12
|
|
; LA32D-ILP32S-NEXT: ori $a0, $zero, 1
|
|
; LA32D-ILP32S-NEXT: ori $a1, $zero, 2
|
|
; LA32D-ILP32S-NEXT: ori $a2, $zero, 3
|
|
; LA32D-ILP32S-NEXT: ori $a3, $zero, 4
|
|
; LA32D-ILP32S-NEXT: ori $a4, $zero, 5
|
|
; LA32D-ILP32S-NEXT: ori $a5, $zero, 6
|
|
; LA32D-ILP32S-NEXT: ori $a6, $zero, 7
|
|
; LA32D-ILP32S-NEXT: ori $a7, $zero, 8
|
|
; LA32D-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32D-ILP32S-NEXT: bl callee_half_in_fregs
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: caller_half_in_fregs:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
|
|
; LA32D-ILP32D-NEXT: ori $a0, $zero, 1
|
|
; LA32D-ILP32D-NEXT: ori $a1, $zero, 2
|
|
; LA32D-ILP32D-NEXT: ori $a2, $zero, 3
|
|
; LA32D-ILP32D-NEXT: ori $a3, $zero, 4
|
|
; LA32D-ILP32D-NEXT: ori $a4, $zero, 5
|
|
; LA32D-ILP32D-NEXT: ori $a5, $zero, 6
|
|
; LA32D-ILP32D-NEXT: ori $a6, $zero, 7
|
|
; LA32D-ILP32D-NEXT: ori $a7, $zero, 8
|
|
; LA32D-ILP32D-NEXT: bl callee_half_in_fregs
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: caller_half_in_fregs:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
|
|
; LA64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
|
|
; LA64S-NEXT: ori $a0, $zero, 1
|
|
; LA64S-NEXT: ori $a1, $zero, 2
|
|
; LA64S-NEXT: ori $a2, $zero, 3
|
|
; LA64S-NEXT: ori $a3, $zero, 4
|
|
; LA64S-NEXT: ori $a4, $zero, 5
|
|
; LA64S-NEXT: ori $a5, $zero, 6
|
|
; LA64S-NEXT: ori $a6, $zero, 7
|
|
; LA64S-NEXT: ori $a7, $zero, 8
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_fregs)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: caller_half_in_fregs:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: lu12i.w $t0, -12
|
|
; LA64F-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64F-LP64S-NEXT: ori $a0, $zero, 1
|
|
; LA64F-LP64S-NEXT: ori $a1, $zero, 2
|
|
; LA64F-LP64S-NEXT: ori $a2, $zero, 3
|
|
; LA64F-LP64S-NEXT: ori $a3, $zero, 4
|
|
; LA64F-LP64S-NEXT: ori $a4, $zero, 5
|
|
; LA64F-LP64S-NEXT: ori $a5, $zero, 6
|
|
; LA64F-LP64S-NEXT: ori $a6, $zero, 7
|
|
; LA64F-LP64S-NEXT: ori $a7, $zero, 8
|
|
; LA64F-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_fregs)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: caller_half_in_fregs:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
|
|
; LA64F-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
|
|
; LA64F-LP64D-NEXT: ori $a0, $zero, 1
|
|
; LA64F-LP64D-NEXT: ori $a1, $zero, 2
|
|
; LA64F-LP64D-NEXT: ori $a2, $zero, 3
|
|
; LA64F-LP64D-NEXT: ori $a3, $zero, 4
|
|
; LA64F-LP64D-NEXT: ori $a4, $zero, 5
|
|
; LA64F-LP64D-NEXT: ori $a5, $zero, 6
|
|
; LA64F-LP64D-NEXT: ori $a6, $zero, 7
|
|
; LA64F-LP64D-NEXT: ori $a7, $zero, 8
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_fregs)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: caller_half_in_fregs:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: lu12i.w $t0, -12
|
|
; LA64D-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64D-LP64S-NEXT: ori $a0, $zero, 1
|
|
; LA64D-LP64S-NEXT: ori $a1, $zero, 2
|
|
; LA64D-LP64S-NEXT: ori $a2, $zero, 3
|
|
; LA64D-LP64S-NEXT: ori $a3, $zero, 4
|
|
; LA64D-LP64S-NEXT: ori $a4, $zero, 5
|
|
; LA64D-LP64S-NEXT: ori $a5, $zero, 6
|
|
; LA64D-LP64S-NEXT: ori $a6, $zero, 7
|
|
; LA64D-LP64S-NEXT: ori $a7, $zero, 8
|
|
; LA64D-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_fregs)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: caller_half_in_fregs:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0)
|
|
; LA64D-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI1_0)
|
|
; LA64D-LP64D-NEXT: ori $a0, $zero, 1
|
|
; LA64D-LP64D-NEXT: ori $a1, $zero, 2
|
|
; LA64D-LP64D-NEXT: ori $a2, $zero, 3
|
|
; LA64D-LP64D-NEXT: ori $a3, $zero, 4
|
|
; LA64D-LP64D-NEXT: ori $a4, $zero, 5
|
|
; LA64D-LP64D-NEXT: ori $a5, $zero, 6
|
|
; LA64D-LP64D-NEXT: ori $a6, $zero, 7
|
|
; LA64D-LP64D-NEXT: ori $a7, $zero, 8
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_fregs)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = call i32 @callee_half_in_fregs(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 2.0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @callee_half_in_gregs(half %a, half %b, half %c, half %d, half %e, half %f, half %g, half %h, half %i, i32 %j) nounwind {
|
|
; LA32S-LABEL: callee_half_in_gregs:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32S-NEXT: ld.w $fp, $sp, 20
|
|
; LA32S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA32S-NEXT: bl __extendhfsf2
|
|
; LA32S-NEXT: bl __fixsfsi
|
|
; LA32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: callee_half_in_gregs:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: ld.w $fp, $sp, 20
|
|
; LA32F-ILP32S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA32F-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: callee_half_in_gregs:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: move $fp, $a1
|
|
; LA32F-ILP32D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: callee_half_in_gregs:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: ld.w $fp, $sp, 20
|
|
; LA32D-ILP32S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA32D-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: callee_half_in_gregs:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: move $fp, $a1
|
|
; LA32D-ILP32D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: callee_half_in_gregs:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64S-NEXT: move $fp, $a1
|
|
; LA64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: callee_half_in_gregs:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: ld.w $fp, $sp, 24
|
|
; LA64F-LP64S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: callee_half_in_gregs:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: move $fp, $a1
|
|
; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: callee_half_in_gregs:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: ld.w $fp, $sp, 24
|
|
; LA64D-LP64S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: callee_half_in_gregs:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: move $fp, $a1
|
|
; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = fptosi half %i to i32
|
|
%2 = add i32 %j, %1
|
|
ret i32 %2
|
|
}
|
|
|
|
define i32 @caller_half_in_gregs() nounwind {
|
|
; LA32S-LABEL: caller_half_in_gregs:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: ori $a0, $zero, 10
|
|
; LA32S-NEXT: st.w $a0, $sp, 4
|
|
; LA32S-NEXT: lu12i.w $a1, 4
|
|
; LA32S-NEXT: ori $t0, $a1, 2176
|
|
; LA32S-NEXT: lu12i.w $a0, 3
|
|
; LA32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32S-NEXT: ori $a2, $a1, 512
|
|
; LA32S-NEXT: ori $a3, $a1, 1024
|
|
; LA32S-NEXT: ori $a4, $a1, 1280
|
|
; LA32S-NEXT: ori $a5, $a1, 1536
|
|
; LA32S-NEXT: ori $a6, $a1, 1792
|
|
; LA32S-NEXT: ori $a7, $a1, 2048
|
|
; LA32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32S-NEXT: bl callee_half_in_gregs
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: caller_half_in_gregs:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: ori $a0, $zero, 10
|
|
; LA32F-ILP32S-NEXT: st.w $a0, $sp, 4
|
|
; LA32F-ILP32S-NEXT: lu12i.w $a1, -12
|
|
; LA32F-ILP32S-NEXT: ori $t0, $a1, 2176
|
|
; LA32F-ILP32S-NEXT: lu12i.w $a0, -13
|
|
; LA32F-ILP32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32F-ILP32S-NEXT: ori $a2, $a1, 512
|
|
; LA32F-ILP32S-NEXT: ori $a3, $a1, 1024
|
|
; LA32F-ILP32S-NEXT: ori $a4, $a1, 1280
|
|
; LA32F-ILP32S-NEXT: ori $a5, $a1, 1536
|
|
; LA32F-ILP32S-NEXT: ori $a6, $a1, 1792
|
|
; LA32F-ILP32S-NEXT: ori $a7, $a1, 2048
|
|
; LA32F-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32F-ILP32S-NEXT: bl callee_half_in_gregs
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: caller_half_in_gregs:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
|
|
; LA32F-ILP32D-NEXT: lu12i.w $a0, -12
|
|
; LA32F-ILP32D-NEXT: ori $a0, $a0, 2176
|
|
; LA32F-ILP32D-NEXT: ori $a1, $zero, 10
|
|
; LA32F-ILP32D-NEXT: bl callee_half_in_gregs
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: caller_half_in_gregs:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: ori $a0, $zero, 10
|
|
; LA32D-ILP32S-NEXT: st.w $a0, $sp, 4
|
|
; LA32D-ILP32S-NEXT: lu12i.w $a1, -12
|
|
; LA32D-ILP32S-NEXT: ori $t0, $a1, 2176
|
|
; LA32D-ILP32S-NEXT: lu12i.w $a0, -13
|
|
; LA32D-ILP32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32D-ILP32S-NEXT: ori $a2, $a1, 512
|
|
; LA32D-ILP32S-NEXT: ori $a3, $a1, 1024
|
|
; LA32D-ILP32S-NEXT: ori $a4, $a1, 1280
|
|
; LA32D-ILP32S-NEXT: ori $a5, $a1, 1536
|
|
; LA32D-ILP32S-NEXT: ori $a6, $a1, 1792
|
|
; LA32D-ILP32S-NEXT: ori $a7, $a1, 2048
|
|
; LA32D-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32D-ILP32S-NEXT: bl callee_half_in_gregs
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: caller_half_in_gregs:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_2)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_3)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_4)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_5)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_6)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_7)
|
|
; LA32D-ILP32D-NEXT: lu12i.w $a0, -12
|
|
; LA32D-ILP32D-NEXT: ori $a0, $a0, 2176
|
|
; LA32D-ILP32D-NEXT: ori $a1, $zero, 10
|
|
; LA32D-ILP32D-NEXT: bl callee_half_in_gregs
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: caller_half_in_gregs:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA64S-NEXT: fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
|
|
; LA64S-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
|
|
; LA64S-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
|
|
; LA64S-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
|
|
; LA64S-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
|
|
; LA64S-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
|
|
; LA64S-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_8)
|
|
; LA64S-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
|
|
; LA64S-NEXT: movfr2gr.s $a0, $ft0
|
|
; LA64S-NEXT: ori $a1, $zero, 10
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: caller_half_in_gregs:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -32
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA64F-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA64F-LP64S-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA64F-LP64S-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_2)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a1, $fa1
|
|
; LA64F-LP64S-NEXT: pcalau12i $a2, %pc_hi20(.LCPI3_3)
|
|
; LA64F-LP64S-NEXT: fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a2, $fa0
|
|
; LA64F-LP64S-NEXT: pcalau12i $a3, %pc_hi20(.LCPI3_4)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a3, $fa1
|
|
; LA64F-LP64S-NEXT: pcalau12i $a4, %pc_hi20(.LCPI3_5)
|
|
; LA64F-LP64S-NEXT: fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a4, $fa0
|
|
; LA64F-LP64S-NEXT: pcalau12i $a5, %pc_hi20(.LCPI3_6)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a5, $fa1
|
|
; LA64F-LP64S-NEXT: ori $a6, $zero, 10
|
|
; LA64F-LP64S-NEXT: st.d $a6, $sp, 8
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a6, $fa0
|
|
; LA64F-LP64S-NEXT: pcalau12i $a7, %pc_hi20(.LCPI3_7)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
|
|
; LA64F-LP64S-NEXT: lu12i.w $a7, -12
|
|
; LA64F-LP64S-NEXT: ori $t0, $a7, 2176
|
|
; LA64F-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a7, $fa0
|
|
; LA64F-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 32
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: caller_half_in_gregs:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA64F-LP64D-NEXT: fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA64F-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
|
|
; LA64F-LP64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
|
|
; LA64F-LP64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
|
|
; LA64F-LP64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
|
|
; LA64F-LP64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
|
|
; LA64F-LP64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
|
|
; LA64F-LP64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_8)
|
|
; LA64F-LP64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
|
|
; LA64F-LP64D-NEXT: movfr2gr.s $a0, $ft0
|
|
; LA64F-LP64D-NEXT: ori $a1, $zero, 10
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: caller_half_in_gregs:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -32
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA64D-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA64D-LP64S-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA64D-LP64S-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_2)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a1, %pc_lo12(.LCPI3_2)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a1, $fa1
|
|
; LA64D-LP64S-NEXT: pcalau12i $a2, %pc_hi20(.LCPI3_3)
|
|
; LA64D-LP64S-NEXT: fld.s $fa1, $a2, %pc_lo12(.LCPI3_3)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a2, $fa0
|
|
; LA64D-LP64S-NEXT: pcalau12i $a3, %pc_hi20(.LCPI3_4)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a3, %pc_lo12(.LCPI3_4)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a3, $fa1
|
|
; LA64D-LP64S-NEXT: pcalau12i $a4, %pc_hi20(.LCPI3_5)
|
|
; LA64D-LP64S-NEXT: fld.s $fa1, $a4, %pc_lo12(.LCPI3_5)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a4, $fa0
|
|
; LA64D-LP64S-NEXT: pcalau12i $a5, %pc_hi20(.LCPI3_6)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a5, %pc_lo12(.LCPI3_6)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a5, $fa1
|
|
; LA64D-LP64S-NEXT: ori $a6, $zero, 10
|
|
; LA64D-LP64S-NEXT: st.d $a6, $sp, 8
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a6, $fa0
|
|
; LA64D-LP64S-NEXT: pcalau12i $a7, %pc_hi20(.LCPI3_7)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a7, %pc_lo12(.LCPI3_7)
|
|
; LA64D-LP64S-NEXT: lu12i.w $a7, -12
|
|
; LA64D-LP64S-NEXT: ori $t0, $a7, 2176
|
|
; LA64D-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a7, $fa0
|
|
; LA64D-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 32
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: caller_half_in_gregs:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
|
|
; LA64D-LP64D-NEXT: fld.s $ft0, $a0, %pc_lo12(.LCPI3_0)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
|
|
; LA64D-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI3_1)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
|
|
; LA64D-LP64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI3_2)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
|
|
; LA64D-LP64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI3_3)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
|
|
; LA64D-LP64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI3_4)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
|
|
; LA64D-LP64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI3_5)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
|
|
; LA64D-LP64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI3_6)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_7)
|
|
; LA64D-LP64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI3_7)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_8)
|
|
; LA64D-LP64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI3_8)
|
|
; LA64D-LP64D-NEXT: movfr2gr.s $a0, $ft0
|
|
; LA64D-LP64D-NEXT: ori $a1, $zero, 10
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_in_gregs)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = call i32 @callee_half_in_gregs(half 1.0, half 2.0, half 3.0, half 4.0, half 5.0, half 6.0, half 7.0, half 8.0, half 9.0, i32 10)
|
|
ret i32 %1
|
|
}
|
|
|
|
define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i, half %j, half %k, half %l, half %m, half %n, half %o, half %p, half %q) nounwind {
|
|
; LA32S-LABEL: callee_half_on_stack:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32S-NEXT: ld.hu $a0, $sp, 48
|
|
; LA32S-NEXT: move $fp, $a7
|
|
; LA32S-NEXT: bl __extendhfsf2
|
|
; LA32S-NEXT: bl __fixsfsi
|
|
; LA32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: callee_half_on_stack:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: ld.hu $a0, $sp, 48
|
|
; LA32F-ILP32S-NEXT: move $fp, $a7
|
|
; LA32F-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: callee_half_on_stack:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: ld.hu $a0, $sp, 16
|
|
; LA32F-ILP32D-NEXT: move $fp, $a7
|
|
; LA32F-ILP32D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32F-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: callee_half_on_stack:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: ld.hu $a0, $sp, 48
|
|
; LA32D-ILP32S-NEXT: move $fp, $a7
|
|
; LA32D-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32S-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32S-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: callee_half_on_stack:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: ld.hu $a0, $sp, 16
|
|
; LA32D-ILP32D-NEXT: move $fp, $a7
|
|
; LA32D-ILP32D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32D-NEXT: add.w $a0, $fp, $a0
|
|
; LA32D-ILP32D-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: callee_half_on_stack:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64S-NEXT: ld.hu $a0, $sp, 16
|
|
; LA64S-NEXT: move $fp, $a7
|
|
; LA64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: callee_half_on_stack:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: ld.hu $a0, $sp, 80
|
|
; LA64F-LP64S-NEXT: move $fp, $a7
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: callee_half_on_stack:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: ld.hu $a0, $sp, 16
|
|
; LA64F-LP64D-NEXT: move $fp, $a7
|
|
; LA64F-LP64D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64F-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: callee_half_on_stack:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: ld.hu $a0, $sp, 80
|
|
; LA64D-LP64S-NEXT: move $fp, $a7
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64S-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64S-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: callee_half_on_stack:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: ld.hu $a0, $sp, 16
|
|
; LA64D-LP64D-NEXT: move $fp, $a7
|
|
; LA64D-LP64D-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64D-NEXT: add.w $a0, $fp, $a0
|
|
; LA64D-LP64D-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = fptosi half %q to i32
|
|
%2 = add i32 %h, %1
|
|
ret i32 %2
|
|
}
|
|
|
|
define i32 @caller_half_on_stack() nounwind {
|
|
; LA32S-LABEL: caller_half_on_stack:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -48
|
|
; LA32S-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
|
|
; LA32S-NEXT: lu12i.w $a0, 4
|
|
; LA32S-NEXT: ori $a1, $a0, 3200
|
|
; LA32S-NEXT: st.w $a1, $sp, 32
|
|
; LA32S-NEXT: ori $a1, $a0, 3136
|
|
; LA32S-NEXT: st.w $a1, $sp, 28
|
|
; LA32S-NEXT: ori $a1, $a0, 3072
|
|
; LA32S-NEXT: st.w $a1, $sp, 24
|
|
; LA32S-NEXT: ori $a1, $a0, 2944
|
|
; LA32S-NEXT: st.w $a1, $sp, 20
|
|
; LA32S-NEXT: ori $a1, $a0, 2816
|
|
; LA32S-NEXT: st.w $a1, $sp, 16
|
|
; LA32S-NEXT: ori $a1, $a0, 2688
|
|
; LA32S-NEXT: st.w $a1, $sp, 12
|
|
; LA32S-NEXT: ori $a1, $a0, 2560
|
|
; LA32S-NEXT: st.w $a1, $sp, 8
|
|
; LA32S-NEXT: ori $a1, $a0, 2432
|
|
; LA32S-NEXT: st.w $a1, $sp, 4
|
|
; LA32S-NEXT: ori $t0, $a0, 2304
|
|
; LA32S-NEXT: ori $a0, $zero, 1
|
|
; LA32S-NEXT: ori $a1, $zero, 2
|
|
; LA32S-NEXT: ori $a2, $zero, 3
|
|
; LA32S-NEXT: ori $a3, $zero, 4
|
|
; LA32S-NEXT: ori $a4, $zero, 5
|
|
; LA32S-NEXT: ori $a5, $zero, 6
|
|
; LA32S-NEXT: ori $a6, $zero, 7
|
|
; LA32S-NEXT: ori $a7, $zero, 8
|
|
; LA32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32S-NEXT: bl callee_half_on_stack
|
|
; LA32S-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 48
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: caller_half_on_stack:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -48
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: lu12i.w $a0, -12
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 3200
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 32
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 3136
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 28
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 3072
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 24
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 2944
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 20
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 2816
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 2688
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 12
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 2560
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 8
|
|
; LA32F-ILP32S-NEXT: ori $a1, $a0, 2432
|
|
; LA32F-ILP32S-NEXT: st.w $a1, $sp, 4
|
|
; LA32F-ILP32S-NEXT: ori $t0, $a0, 2304
|
|
; LA32F-ILP32S-NEXT: ori $a0, $zero, 1
|
|
; LA32F-ILP32S-NEXT: ori $a1, $zero, 2
|
|
; LA32F-ILP32S-NEXT: ori $a2, $zero, 3
|
|
; LA32F-ILP32S-NEXT: ori $a3, $zero, 4
|
|
; LA32F-ILP32S-NEXT: ori $a4, $zero, 5
|
|
; LA32F-ILP32S-NEXT: ori $a5, $zero, 6
|
|
; LA32F-ILP32S-NEXT: ori $a6, $zero, 7
|
|
; LA32F-ILP32S-NEXT: ori $a7, $zero, 8
|
|
; LA32F-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32F-ILP32S-NEXT: bl callee_half_on_stack
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 48
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: caller_half_on_stack:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: lu12i.w $a0, -12
|
|
; LA32F-ILP32D-NEXT: ori $t0, $a0, 3200
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
|
|
; LA32F-ILP32D-NEXT: ori $a0, $zero, 1
|
|
; LA32F-ILP32D-NEXT: ori $a1, $zero, 2
|
|
; LA32F-ILP32D-NEXT: ori $a2, $zero, 3
|
|
; LA32F-ILP32D-NEXT: ori $a3, $zero, 4
|
|
; LA32F-ILP32D-NEXT: ori $a4, $zero, 5
|
|
; LA32F-ILP32D-NEXT: ori $a5, $zero, 6
|
|
; LA32F-ILP32D-NEXT: ori $a6, $zero, 7
|
|
; LA32F-ILP32D-NEXT: ori $a7, $zero, 8
|
|
; LA32F-ILP32D-NEXT: st.w $t0, $sp, 0
|
|
; LA32F-ILP32D-NEXT: bl callee_half_on_stack
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: caller_half_on_stack:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -48
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: lu12i.w $a0, -12
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 3200
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 32
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 3136
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 28
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 3072
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 24
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 2944
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 20
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 2816
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 2688
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 12
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 2560
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 8
|
|
; LA32D-ILP32S-NEXT: ori $a1, $a0, 2432
|
|
; LA32D-ILP32S-NEXT: st.w $a1, $sp, 4
|
|
; LA32D-ILP32S-NEXT: ori $t0, $a0, 2304
|
|
; LA32D-ILP32S-NEXT: ori $a0, $zero, 1
|
|
; LA32D-ILP32S-NEXT: ori $a1, $zero, 2
|
|
; LA32D-ILP32S-NEXT: ori $a2, $zero, 3
|
|
; LA32D-ILP32S-NEXT: ori $a3, $zero, 4
|
|
; LA32D-ILP32S-NEXT: ori $a4, $zero, 5
|
|
; LA32D-ILP32S-NEXT: ori $a5, $zero, 6
|
|
; LA32D-ILP32S-NEXT: ori $a6, $zero, 7
|
|
; LA32D-ILP32S-NEXT: ori $a7, $zero, 8
|
|
; LA32D-ILP32S-NEXT: st.w $t0, $sp, 0
|
|
; LA32D-ILP32S-NEXT: bl callee_half_on_stack
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 48
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: caller_half_on_stack:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: lu12i.w $a0, -12
|
|
; LA32D-ILP32D-NEXT: ori $t0, $a0, 3200
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
|
|
; LA32D-ILP32D-NEXT: ori $a0, $zero, 1
|
|
; LA32D-ILP32D-NEXT: ori $a1, $zero, 2
|
|
; LA32D-ILP32D-NEXT: ori $a2, $zero, 3
|
|
; LA32D-ILP32D-NEXT: ori $a3, $zero, 4
|
|
; LA32D-ILP32D-NEXT: ori $a4, $zero, 5
|
|
; LA32D-ILP32D-NEXT: ori $a5, $zero, 6
|
|
; LA32D-ILP32D-NEXT: ori $a6, $zero, 7
|
|
; LA32D-ILP32D-NEXT: ori $a7, $zero, 8
|
|
; LA32D-ILP32D-NEXT: st.w $t0, $sp, 0
|
|
; LA32D-ILP32D-NEXT: bl callee_half_on_stack
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: caller_half_on_stack:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: lu12i.w $a0, -12
|
|
; LA64S-NEXT: ori $t0, $a0, 3200
|
|
; LA64S-NEXT: lu32i.d $t0, 0
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
|
|
; LA64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
|
|
; LA64S-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
|
|
; LA64S-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
|
|
; LA64S-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
|
|
; LA64S-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
|
|
; LA64S-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
|
|
; LA64S-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
|
|
; LA64S-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
|
|
; LA64S-NEXT: ori $a0, $zero, 1
|
|
; LA64S-NEXT: ori $a1, $zero, 2
|
|
; LA64S-NEXT: ori $a2, $zero, 3
|
|
; LA64S-NEXT: ori $a3, $zero, 4
|
|
; LA64S-NEXT: ori $a4, $zero, 5
|
|
; LA64S-NEXT: ori $a5, $zero, 6
|
|
; LA64S-NEXT: ori $a6, $zero, 7
|
|
; LA64S-NEXT: ori $a7, $zero, 8
|
|
; LA64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: caller_half_on_stack:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -96
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: lu12i.w $a0, -12
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 3200
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 64
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 3136
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 56
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 3072
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 48
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 2944
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 40
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 2816
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 32
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 2688
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 24
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 2560
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 16
|
|
; LA64F-LP64S-NEXT: ori $a1, $a0, 2432
|
|
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64F-LP64S-NEXT: st.w $a1, $sp, 8
|
|
; LA64F-LP64S-NEXT: ori $t0, $a0, 2304
|
|
; LA64F-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64F-LP64S-NEXT: ori $a0, $zero, 1
|
|
; LA64F-LP64S-NEXT: ori $a1, $zero, 2
|
|
; LA64F-LP64S-NEXT: ori $a2, $zero, 3
|
|
; LA64F-LP64S-NEXT: ori $a3, $zero, 4
|
|
; LA64F-LP64S-NEXT: ori $a4, $zero, 5
|
|
; LA64F-LP64S-NEXT: ori $a5, $zero, 6
|
|
; LA64F-LP64S-NEXT: ori $a6, $zero, 7
|
|
; LA64F-LP64S-NEXT: ori $a7, $zero, 8
|
|
; LA64F-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 96
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: caller_half_on_stack:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: lu12i.w $a0, -12
|
|
; LA64F-LP64D-NEXT: ori $t0, $a0, 3200
|
|
; LA64F-LP64D-NEXT: lu32i.d $t0, 0
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
|
|
; LA64F-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
|
|
; LA64F-LP64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
|
|
; LA64F-LP64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
|
|
; LA64F-LP64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
|
|
; LA64F-LP64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
|
|
; LA64F-LP64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
|
|
; LA64F-LP64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
|
|
; LA64F-LP64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
|
|
; LA64F-LP64D-NEXT: ori $a0, $zero, 1
|
|
; LA64F-LP64D-NEXT: ori $a1, $zero, 2
|
|
; LA64F-LP64D-NEXT: ori $a2, $zero, 3
|
|
; LA64F-LP64D-NEXT: ori $a3, $zero, 4
|
|
; LA64F-LP64D-NEXT: ori $a4, $zero, 5
|
|
; LA64F-LP64D-NEXT: ori $a5, $zero, 6
|
|
; LA64F-LP64D-NEXT: ori $a6, $zero, 7
|
|
; LA64F-LP64D-NEXT: ori $a7, $zero, 8
|
|
; LA64F-LP64D-NEXT: st.w $t0, $sp, 0
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: caller_half_on_stack:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -96
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: lu12i.w $a0, -12
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 3200
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 64
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 3136
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 56
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 3072
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 48
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 2944
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 40
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 2816
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 32
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 2688
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 24
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 2560
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 16
|
|
; LA64D-LP64S-NEXT: ori $a1, $a0, 2432
|
|
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
|
|
; LA64D-LP64S-NEXT: st.w $a1, $sp, 8
|
|
; LA64D-LP64S-NEXT: ori $t0, $a0, 2304
|
|
; LA64D-LP64S-NEXT: lu32i.d $t0, 0
|
|
; LA64D-LP64S-NEXT: ori $a0, $zero, 1
|
|
; LA64D-LP64S-NEXT: ori $a1, $zero, 2
|
|
; LA64D-LP64S-NEXT: ori $a2, $zero, 3
|
|
; LA64D-LP64S-NEXT: ori $a3, $zero, 4
|
|
; LA64D-LP64S-NEXT: ori $a4, $zero, 5
|
|
; LA64D-LP64S-NEXT: ori $a5, $zero, 6
|
|
; LA64D-LP64S-NEXT: ori $a6, $zero, 7
|
|
; LA64D-LP64S-NEXT: ori $a7, $zero, 8
|
|
; LA64D-LP64S-NEXT: st.w $t0, $sp, 0
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 96
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: caller_half_on_stack:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: lu12i.w $a0, -12
|
|
; LA64D-LP64D-NEXT: ori $t0, $a0, 3200
|
|
; LA64D-LP64D-NEXT: lu32i.d $t0, 0
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
|
|
; LA64D-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI5_0)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_1)
|
|
; LA64D-LP64D-NEXT: fld.s $fa1, $a0, %pc_lo12(.LCPI5_1)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_2)
|
|
; LA64D-LP64D-NEXT: fld.s $fa2, $a0, %pc_lo12(.LCPI5_2)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_3)
|
|
; LA64D-LP64D-NEXT: fld.s $fa3, $a0, %pc_lo12(.LCPI5_3)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_4)
|
|
; LA64D-LP64D-NEXT: fld.s $fa4, $a0, %pc_lo12(.LCPI5_4)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_5)
|
|
; LA64D-LP64D-NEXT: fld.s $fa5, $a0, %pc_lo12(.LCPI5_5)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_6)
|
|
; LA64D-LP64D-NEXT: fld.s $fa6, $a0, %pc_lo12(.LCPI5_6)
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_7)
|
|
; LA64D-LP64D-NEXT: fld.s $fa7, $a0, %pc_lo12(.LCPI5_7)
|
|
; LA64D-LP64D-NEXT: ori $a0, $zero, 1
|
|
; LA64D-LP64D-NEXT: ori $a1, $zero, 2
|
|
; LA64D-LP64D-NEXT: ori $a2, $zero, 3
|
|
; LA64D-LP64D-NEXT: ori $a3, $zero, 4
|
|
; LA64D-LP64D-NEXT: ori $a4, $zero, 5
|
|
; LA64D-LP64D-NEXT: ori $a5, $zero, 6
|
|
; LA64D-LP64D-NEXT: ori $a6, $zero, 7
|
|
; LA64D-LP64D-NEXT: ori $a7, $zero, 8
|
|
; LA64D-LP64D-NEXT: st.w $t0, $sp, 0
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0, half 11.0, half 12.0, half 13.0, half 14.0, half 15.0, half 16.0, half 17.0, half 18.0)
|
|
ret i32 %1
|
|
}
|
|
|
|
define half @callee_half_ret() nounwind {
|
|
; LA32S-LABEL: callee_half_ret:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: lu12i.w $a0, 3
|
|
; LA32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: callee_half_ret:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: lu12i.w $a0, -13
|
|
; LA32F-ILP32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: callee_half_ret:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA32F-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: callee_half_ret:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: lu12i.w $a0, -13
|
|
; LA32D-ILP32S-NEXT: ori $a0, $a0, 3072
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: callee_half_ret:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA32D-ILP32D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: callee_half_ret:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: callee_half_ret:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA64F-LP64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA64F-LP64S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: callee_half_ret:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA64F-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: callee_half_ret:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA64D-LP64S-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA64D-LP64S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: callee_half_ret:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
|
|
; LA64D-LP64D-NEXT: fld.s $fa0, $a0, %pc_lo12(.LCPI6_0)
|
|
; LA64D-LP64D-NEXT: ret
|
|
ret half 1.0
|
|
}
|
|
|
|
define i32 @caller_half_ret() nounwind {
|
|
; LA32S-LABEL: caller_half_ret:
|
|
; LA32S: # %bb.0:
|
|
; LA32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32S-NEXT: bl callee_half_ret
|
|
; LA32S-NEXT: lu12i.w $a1, 15
|
|
; LA32S-NEXT: ori $a1, $a1, 4095
|
|
; LA32S-NEXT: and $a0, $a0, $a1
|
|
; LA32S-NEXT: bl __extendhfsf2
|
|
; LA32S-NEXT: bl __fixsfsi
|
|
; LA32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32S-LABEL: caller_half_ret:
|
|
; LA32F-ILP32S: # %bb.0:
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32S-NEXT: bl callee_half_ret
|
|
; LA32F-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32F-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32S-NEXT: ret
|
|
;
|
|
; LA32F-ILP32D-LABEL: caller_half_ret:
|
|
; LA32F-ILP32D: # %bb.0:
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32F-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32F-ILP32D-NEXT: bl callee_half_ret
|
|
; LA32F-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32F-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32F-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32F-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32F-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32F-ILP32D-NEXT: ret
|
|
;
|
|
; LA32D-ILP32S-LABEL: caller_half_ret:
|
|
; LA32D-ILP32S: # %bb.0:
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32S-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32S-NEXT: bl callee_half_ret
|
|
; LA32D-ILP32S-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA32D-ILP32S-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32S-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32S-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32S-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32S-NEXT: ret
|
|
;
|
|
; LA32D-ILP32D-LABEL: caller_half_ret:
|
|
; LA32D-ILP32D: # %bb.0:
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, -16
|
|
; LA32D-ILP32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
|
|
; LA32D-ILP32D-NEXT: bl callee_half_ret
|
|
; LA32D-ILP32D-NEXT: bl __extendhfsf2
|
|
; LA32D-ILP32D-NEXT: ftintrz.w.s $fa0, $fa0
|
|
; LA32D-ILP32D-NEXT: movfr2gr.s $a0, $fa0
|
|
; LA32D-ILP32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
|
|
; LA32D-ILP32D-NEXT: addi.w $sp, $sp, 16
|
|
; LA32D-ILP32D-NEXT: ret
|
|
;
|
|
; LA64S-LABEL: caller_half_ret:
|
|
; LA64S: # %bb.0:
|
|
; LA64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(callee_half_ret)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64S-LABEL: caller_half_ret:
|
|
; LA64F-LP64S: # %bb.0:
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_ret)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64F-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64S-NEXT: ret
|
|
;
|
|
; LA64F-LP64D-LABEL: caller_half_ret:
|
|
; LA64F-LP64D: # %bb.0:
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64F-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_ret)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64F-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64F-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64F-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64F-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64F-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64F-LP64D-NEXT: ret
|
|
;
|
|
; LA64D-LP64S-LABEL: caller_half_ret:
|
|
; LA64D-LP64S: # %bb.0:
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64S-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_ret)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64S-NEXT: movgr2fr.w $fa0, $a0
|
|
; LA64D-LP64S-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64S-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64S-NEXT: ret
|
|
;
|
|
; LA64D-LP64D-LABEL: caller_half_ret:
|
|
; LA64D-LP64D: # %bb.0:
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, -16
|
|
; LA64D-LP64D-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(callee_half_ret)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
|
|
; LA64D-LP64D-NEXT: jirl $ra, $ra, 0
|
|
; LA64D-LP64D-NEXT: ftintrz.l.s $fa0, $fa0
|
|
; LA64D-LP64D-NEXT: movfr2gr.d $a0, $fa0
|
|
; LA64D-LP64D-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64D-LP64D-NEXT: addi.d $sp, $sp, 16
|
|
; LA64D-LP64D-NEXT: ret
|
|
%1 = call half @callee_half_ret()
|
|
%2 = fptosi half %1 to i32
|
|
ret i32 %2
|
|
}
|