
According to the offical LoongArch reference manual, the 32-bit LoongArch is divied into two variants: the Reduced version (LA32R) and Standard version (LA32S). LA32S extends LA32R by adding additional instructions, and the 64-bit version (LA64) fully includes the LA32S instruction set. This patch introduces a new target feature `32s` for the LoongArch backend, enabling support for instructions specific to the LA32S variant. The LA32S exntension includes the following additional instructions: - ALSL.W - {AND,OR}N - B{EQ,NE}Z - BITREV.{4B,W} - BSTR{INS,PICK}.W - BYTEPICK.W - CL{O,Z}.W - CPUCFG - CT{O,Z}.W - EXT.W,{B,H} - F{LD,ST}X.{D,S} - MASK{EQ,NE}Z - PC{ADDI,ALAU12I} - REVB.2H - ROTR{I},W Additionally, LA32R defines three new instruction aliases: - RDCNTID.W RJ => RDTIMEL.W ZERO, RJ - RDCNTVH.W RD => RDTIMEH.W RD, ZERO - RDCNTVL.W RD => RDTIMEL.W RD, ZERO
347 lines
12 KiB
LLVM
347 lines
12 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32
|
|
; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64
|
|
|
|
define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) {
|
|
; LA32-LABEL: smuloi64:
|
|
; LA32: # %bb.0:
|
|
; LA32-NEXT: mulh.wu $a5, $a0, $a2
|
|
; LA32-NEXT: mul.w $a6, $a1, $a2
|
|
; LA32-NEXT: add.w $a5, $a6, $a5
|
|
; LA32-NEXT: sltu $a6, $a5, $a6
|
|
; LA32-NEXT: mulh.wu $a7, $a1, $a2
|
|
; LA32-NEXT: srai.w $t0, $a1, 31
|
|
; LA32-NEXT: mul.w $t0, $t0, $a2
|
|
; LA32-NEXT: add.w $a7, $a7, $t0
|
|
; LA32-NEXT: add.w $a6, $a7, $a6
|
|
; LA32-NEXT: mulh.wu $a7, $a0, $a3
|
|
; LA32-NEXT: srai.w $t0, $a3, 31
|
|
; LA32-NEXT: mul.w $t0, $a0, $t0
|
|
; LA32-NEXT: add.w $a7, $a7, $t0
|
|
; LA32-NEXT: mul.w $t0, $a0, $a3
|
|
; LA32-NEXT: add.w $a5, $t0, $a5
|
|
; LA32-NEXT: sltu $t0, $a5, $t0
|
|
; LA32-NEXT: add.w $a7, $a7, $t0
|
|
; LA32-NEXT: add.w $t0, $a6, $a7
|
|
; LA32-NEXT: sltu $t1, $t0, $a6
|
|
; LA32-NEXT: srai.w $a6, $a6, 31
|
|
; LA32-NEXT: srai.w $a7, $a7, 31
|
|
; LA32-NEXT: add.w $a6, $a6, $a7
|
|
; LA32-NEXT: add.w $a6, $a6, $t1
|
|
; LA32-NEXT: mulh.w $a7, $a1, $a3
|
|
; LA32-NEXT: add.w $a6, $a7, $a6
|
|
; LA32-NEXT: mul.w $a1, $a1, $a3
|
|
; LA32-NEXT: add.w $a3, $a1, $t0
|
|
; LA32-NEXT: sltu $a1, $a3, $a1
|
|
; LA32-NEXT: add.w $a1, $a6, $a1
|
|
; LA32-NEXT: srai.w $a6, $a5, 31
|
|
; LA32-NEXT: xor $a1, $a1, $a6
|
|
; LA32-NEXT: xor $a3, $a3, $a6
|
|
; LA32-NEXT: or $a1, $a3, $a1
|
|
; LA32-NEXT: sltu $a1, $zero, $a1
|
|
; LA32-NEXT: mul.w $a0, $a0, $a2
|
|
; LA32-NEXT: st.w $a0, $a4, 0
|
|
; LA32-NEXT: st.w $a5, $a4, 4
|
|
; LA32-NEXT: move $a0, $a1
|
|
; LA32-NEXT: ret
|
|
;
|
|
; LA64-LABEL: smuloi64:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: mulh.d $a3, $a0, $a1
|
|
; LA64-NEXT: mul.d $a1, $a0, $a1
|
|
; LA64-NEXT: srai.d $a0, $a1, 63
|
|
; LA64-NEXT: xor $a0, $a3, $a0
|
|
; LA64-NEXT: sltu $a0, $zero, $a0
|
|
; LA64-NEXT: st.d $a1, $a2, 0
|
|
; LA64-NEXT: ret
|
|
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
|
|
%val = extractvalue {i64, i1} %t, 0
|
|
%obit = extractvalue {i64, i1} %t, 1
|
|
store i64 %val, ptr %res
|
|
ret i1 %obit
|
|
}
|
|
|
|
define zeroext i1 @smuloi128(i128 %v1, i128 %v2, ptr %res) {
|
|
; LA32-LABEL: smuloi128:
|
|
; LA32: # %bb.0:
|
|
; LA32-NEXT: addi.w $sp, $sp, -48
|
|
; LA32-NEXT: .cfi_def_cfa_offset 48
|
|
; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s0, $sp, 36 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s1, $sp, 32 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s2, $sp, 28 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s3, $sp, 24 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s4, $sp, 20 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s5, $sp, 16 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s6, $sp, 12 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s7, $sp, 8 # 4-byte Folded Spill
|
|
; LA32-NEXT: st.w $s8, $sp, 4 # 4-byte Folded Spill
|
|
; LA32-NEXT: .cfi_offset 1, -4
|
|
; LA32-NEXT: .cfi_offset 22, -8
|
|
; LA32-NEXT: .cfi_offset 23, -12
|
|
; LA32-NEXT: .cfi_offset 24, -16
|
|
; LA32-NEXT: .cfi_offset 25, -20
|
|
; LA32-NEXT: .cfi_offset 26, -24
|
|
; LA32-NEXT: .cfi_offset 27, -28
|
|
; LA32-NEXT: .cfi_offset 28, -32
|
|
; LA32-NEXT: .cfi_offset 29, -36
|
|
; LA32-NEXT: .cfi_offset 30, -40
|
|
; LA32-NEXT: .cfi_offset 31, -44
|
|
; LA32-NEXT: ld.w $a5, $a1, 12
|
|
; LA32-NEXT: ld.w $a6, $a1, 8
|
|
; LA32-NEXT: ld.w $t1, $a0, 4
|
|
; LA32-NEXT: ld.w $a3, $a1, 0
|
|
; LA32-NEXT: ld.w $a7, $a0, 8
|
|
; LA32-NEXT: ld.w $t0, $a0, 12
|
|
; LA32-NEXT: ld.w $a4, $a0, 0
|
|
; LA32-NEXT: ld.w $t4, $a1, 4
|
|
; LA32-NEXT: mulh.wu $a0, $a7, $a3
|
|
; LA32-NEXT: mul.w $a1, $t0, $a3
|
|
; LA32-NEXT: add.w $a0, $a1, $a0
|
|
; LA32-NEXT: sltu $a1, $a0, $a1
|
|
; LA32-NEXT: mulh.wu $t2, $t0, $a3
|
|
; LA32-NEXT: add.w $a1, $t2, $a1
|
|
; LA32-NEXT: mul.w $t3, $a7, $t4
|
|
; LA32-NEXT: add.w $t2, $t3, $a0
|
|
; LA32-NEXT: sltu $a0, $t2, $t3
|
|
; LA32-NEXT: mulh.wu $t3, $a7, $t4
|
|
; LA32-NEXT: add.w $a0, $t3, $a0
|
|
; LA32-NEXT: add.w $t5, $a1, $a0
|
|
; LA32-NEXT: mul.w $t6, $t0, $t4
|
|
; LA32-NEXT: add.w $t7, $t6, $t5
|
|
; LA32-NEXT: srai.w $a0, $t0, 31
|
|
; LA32-NEXT: mul.w $t8, $a3, $a0
|
|
; LA32-NEXT: add.w $t3, $t7, $t8
|
|
; LA32-NEXT: sltu $fp, $t3, $t7
|
|
; LA32-NEXT: sltu $t6, $t7, $t6
|
|
; LA32-NEXT: sltu $a1, $t5, $a1
|
|
; LA32-NEXT: mulh.wu $t5, $t0, $t4
|
|
; LA32-NEXT: add.w $a1, $t5, $a1
|
|
; LA32-NEXT: add.w $a1, $a1, $t6
|
|
; LA32-NEXT: mulh.wu $t5, $a3, $a0
|
|
; LA32-NEXT: add.w $t5, $t5, $t8
|
|
; LA32-NEXT: mul.w $t6, $t4, $a0
|
|
; LA32-NEXT: add.w $t5, $t5, $t6
|
|
; LA32-NEXT: add.w $t8, $a1, $t5
|
|
; LA32-NEXT: mulh.wu $a1, $a4, $a3
|
|
; LA32-NEXT: mul.w $t5, $t1, $a3
|
|
; LA32-NEXT: add.w $a1, $t5, $a1
|
|
; LA32-NEXT: sltu $t5, $a1, $t5
|
|
; LA32-NEXT: mulh.wu $t6, $t1, $a3
|
|
; LA32-NEXT: add.w $t5, $t6, $t5
|
|
; LA32-NEXT: mul.w $t6, $a4, $t4
|
|
; LA32-NEXT: add.w $a1, $t6, $a1
|
|
; LA32-NEXT: sltu $t6, $a1, $t6
|
|
; LA32-NEXT: mulh.wu $t7, $a4, $t4
|
|
; LA32-NEXT: add.w $t6, $t7, $t6
|
|
; LA32-NEXT: add.w $t6, $t5, $t6
|
|
; LA32-NEXT: mul.w $t7, $t1, $t4
|
|
; LA32-NEXT: sltu $t5, $t6, $t5
|
|
; LA32-NEXT: add.w $t6, $t7, $t6
|
|
; LA32-NEXT: sltu $t7, $t6, $t7
|
|
; LA32-NEXT: mulh.wu $t4, $t1, $t4
|
|
; LA32-NEXT: add.w $t4, $t4, $t5
|
|
; LA32-NEXT: add.w $t4, $t4, $t7
|
|
; LA32-NEXT: add.w $t4, $t2, $t4
|
|
; LA32-NEXT: mul.w $t5, $a7, $a3
|
|
; LA32-NEXT: add.w $t6, $t5, $t6
|
|
; LA32-NEXT: sltu $t5, $t6, $t5
|
|
; LA32-NEXT: add.w $t7, $t4, $t5
|
|
; LA32-NEXT: add.w $t4, $t8, $fp
|
|
; LA32-NEXT: beq $t7, $t2, .LBB1_2
|
|
; LA32-NEXT: # %bb.1:
|
|
; LA32-NEXT: sltu $t5, $t7, $t2
|
|
; LA32-NEXT: .LBB1_2:
|
|
; LA32-NEXT: add.w $t5, $t3, $t5
|
|
; LA32-NEXT: sltu $t2, $t5, $t3
|
|
; LA32-NEXT: add.w $t4, $t4, $t2
|
|
; LA32-NEXT: mulh.wu $t2, $a4, $a6
|
|
; LA32-NEXT: mul.w $t3, $t1, $a6
|
|
; LA32-NEXT: add.w $t2, $t3, $t2
|
|
; LA32-NEXT: sltu $t3, $t2, $t3
|
|
; LA32-NEXT: mulh.wu $t8, $t1, $a6
|
|
; LA32-NEXT: add.w $s0, $t8, $t3
|
|
; LA32-NEXT: mul.w $t3, $a4, $a5
|
|
; LA32-NEXT: add.w $t8, $t3, $t2
|
|
; LA32-NEXT: sltu $t2, $t8, $t3
|
|
; LA32-NEXT: mulh.wu $t3, $a4, $a5
|
|
; LA32-NEXT: add.w $t2, $t3, $t2
|
|
; LA32-NEXT: add.w $t2, $s0, $t2
|
|
; LA32-NEXT: mul.w $s1, $t1, $a5
|
|
; LA32-NEXT: add.w $s2, $s1, $t2
|
|
; LA32-NEXT: srai.w $t3, $a5, 31
|
|
; LA32-NEXT: mul.w $s3, $t3, $a4
|
|
; LA32-NEXT: add.w $fp, $s2, $s3
|
|
; LA32-NEXT: sltu $s4, $fp, $s2
|
|
; LA32-NEXT: sltu $s1, $s2, $s1
|
|
; LA32-NEXT: sltu $t2, $t2, $s0
|
|
; LA32-NEXT: mulh.wu $s0, $t1, $a5
|
|
; LA32-NEXT: add.w $t2, $s0, $t2
|
|
; LA32-NEXT: add.w $t2, $t2, $s1
|
|
; LA32-NEXT: mul.w $t1, $t3, $t1
|
|
; LA32-NEXT: mulh.wu $s0, $t3, $a4
|
|
; LA32-NEXT: add.w $t1, $s0, $t1
|
|
; LA32-NEXT: add.w $t1, $t1, $s3
|
|
; LA32-NEXT: add.w $s0, $t2, $t1
|
|
; LA32-NEXT: add.w $t2, $t8, $t7
|
|
; LA32-NEXT: mul.w $t7, $a4, $a6
|
|
; LA32-NEXT: add.w $t1, $t7, $t6
|
|
; LA32-NEXT: sltu $t7, $t1, $t7
|
|
; LA32-NEXT: add.w $t2, $t2, $t7
|
|
; LA32-NEXT: add.w $t6, $s0, $s4
|
|
; LA32-NEXT: beq $t2, $t8, .LBB1_4
|
|
; LA32-NEXT: # %bb.3:
|
|
; LA32-NEXT: sltu $t7, $t2, $t8
|
|
; LA32-NEXT: .LBB1_4:
|
|
; LA32-NEXT: add.w $t7, $fp, $t7
|
|
; LA32-NEXT: sltu $t8, $t7, $fp
|
|
; LA32-NEXT: add.w $t8, $t6, $t8
|
|
; LA32-NEXT: add.w $t6, $t4, $t8
|
|
; LA32-NEXT: add.w $t7, $t5, $t7
|
|
; LA32-NEXT: sltu $s0, $t7, $t5
|
|
; LA32-NEXT: add.w $s4, $t6, $s0
|
|
; LA32-NEXT: mulh.wu $t5, $a7, $a6
|
|
; LA32-NEXT: mul.w $s1, $t0, $a6
|
|
; LA32-NEXT: add.w $s3, $s1, $t5
|
|
; LA32-NEXT: mul.w $fp, $a7, $a5
|
|
; LA32-NEXT: add.w $s2, $fp, $s3
|
|
; LA32-NEXT: add.w $t6, $s2, $s4
|
|
; LA32-NEXT: mul.w $s5, $a7, $a6
|
|
; LA32-NEXT: add.w $t5, $s5, $t7
|
|
; LA32-NEXT: sltu $t7, $t5, $s5
|
|
; LA32-NEXT: add.w $t6, $t6, $t7
|
|
; LA32-NEXT: beq $t6, $s2, .LBB1_6
|
|
; LA32-NEXT: # %bb.5:
|
|
; LA32-NEXT: sltu $t7, $t6, $s2
|
|
; LA32-NEXT: .LBB1_6:
|
|
; LA32-NEXT: beq $s4, $t4, .LBB1_8
|
|
; LA32-NEXT: # %bb.7:
|
|
; LA32-NEXT: sltu $s0, $s4, $t4
|
|
; LA32-NEXT: .LBB1_8:
|
|
; LA32-NEXT: srai.w $t4, $t4, 31
|
|
; LA32-NEXT: srai.w $t8, $t8, 31
|
|
; LA32-NEXT: add.w $t8, $t4, $t8
|
|
; LA32-NEXT: add.w $s0, $t8, $s0
|
|
; LA32-NEXT: sltu $s1, $s3, $s1
|
|
; LA32-NEXT: mulh.wu $s3, $t0, $a6
|
|
; LA32-NEXT: add.w $s1, $s3, $s1
|
|
; LA32-NEXT: sltu $fp, $s2, $fp
|
|
; LA32-NEXT: mulh.wu $s2, $a7, $a5
|
|
; LA32-NEXT: add.w $fp, $s2, $fp
|
|
; LA32-NEXT: add.w $fp, $s1, $fp
|
|
; LA32-NEXT: mul.w $s2, $t0, $a5
|
|
; LA32-NEXT: add.w $s3, $s2, $fp
|
|
; LA32-NEXT: mul.w $s4, $a6, $a0
|
|
; LA32-NEXT: mul.w $s5, $t3, $a7
|
|
; LA32-NEXT: add.w $s6, $s5, $s4
|
|
; LA32-NEXT: add.w $s7, $s3, $s6
|
|
; LA32-NEXT: add.w $s8, $s7, $s0
|
|
; LA32-NEXT: add.w $t7, $s8, $t7
|
|
; LA32-NEXT: sltu $ra, $t7, $s8
|
|
; LA32-NEXT: sltu $t4, $t8, $t4
|
|
; LA32-NEXT: add.w $t4, $t8, $t4
|
|
; LA32-NEXT: sltu $t8, $s0, $t8
|
|
; LA32-NEXT: add.w $t4, $t4, $t8
|
|
; LA32-NEXT: sltu $t8, $s7, $s3
|
|
; LA32-NEXT: sltu $s0, $s3, $s2
|
|
; LA32-NEXT: sltu $fp, $fp, $s1
|
|
; LA32-NEXT: mulh.wu $s1, $t0, $a5
|
|
; LA32-NEXT: add.w $fp, $s1, $fp
|
|
; LA32-NEXT: add.w $fp, $fp, $s0
|
|
; LA32-NEXT: mulh.wu $a6, $a6, $a0
|
|
; LA32-NEXT: add.w $a6, $a6, $s4
|
|
; LA32-NEXT: mul.w $a0, $a5, $a0
|
|
; LA32-NEXT: add.w $a0, $a6, $a0
|
|
; LA32-NEXT: mul.w $a5, $t3, $t0
|
|
; LA32-NEXT: mulh.wu $a6, $t3, $a7
|
|
; LA32-NEXT: add.w $a5, $a6, $a5
|
|
; LA32-NEXT: add.w $a5, $a5, $s5
|
|
; LA32-NEXT: add.w $a0, $a5, $a0
|
|
; LA32-NEXT: sltu $a5, $s6, $s5
|
|
; LA32-NEXT: add.w $a0, $a0, $a5
|
|
; LA32-NEXT: add.w $a0, $fp, $a0
|
|
; LA32-NEXT: add.w $a0, $a0, $t8
|
|
; LA32-NEXT: add.w $a0, $a0, $t4
|
|
; LA32-NEXT: sltu $a5, $s8, $s7
|
|
; LA32-NEXT: add.w $a0, $a0, $a5
|
|
; LA32-NEXT: add.w $a0, $a0, $ra
|
|
; LA32-NEXT: srai.w $a5, $t2, 31
|
|
; LA32-NEXT: xor $a0, $a0, $a5
|
|
; LA32-NEXT: xor $a6, $t6, $a5
|
|
; LA32-NEXT: or $a0, $a6, $a0
|
|
; LA32-NEXT: xor $a6, $t7, $a5
|
|
; LA32-NEXT: xor $a5, $t5, $a5
|
|
; LA32-NEXT: or $a5, $a5, $a6
|
|
; LA32-NEXT: or $a0, $a5, $a0
|
|
; LA32-NEXT: sltu $a0, $zero, $a0
|
|
; LA32-NEXT: mul.w $a3, $a4, $a3
|
|
; LA32-NEXT: st.w $a3, $a2, 0
|
|
; LA32-NEXT: st.w $a1, $a2, 4
|
|
; LA32-NEXT: st.w $t1, $a2, 8
|
|
; LA32-NEXT: st.w $t2, $a2, 12
|
|
; LA32-NEXT: ld.w $s8, $sp, 4 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s7, $sp, 8 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s6, $sp, 12 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s5, $sp, 16 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s4, $sp, 20 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s3, $sp, 24 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s2, $sp, 28 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s1, $sp, 32 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $s0, $sp, 36 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload
|
|
; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
|
|
; LA32-NEXT: addi.w $sp, $sp, 48
|
|
; LA32-NEXT: ret
|
|
;
|
|
; LA64-LABEL: smuloi128:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: mulh.du $a5, $a0, $a2
|
|
; LA64-NEXT: mul.d $a6, $a1, $a2
|
|
; LA64-NEXT: add.d $a5, $a6, $a5
|
|
; LA64-NEXT: sltu $a6, $a5, $a6
|
|
; LA64-NEXT: mulh.du $a7, $a1, $a2
|
|
; LA64-NEXT: srai.d $t0, $a1, 63
|
|
; LA64-NEXT: mul.d $t0, $t0, $a2
|
|
; LA64-NEXT: add.d $a7, $a7, $t0
|
|
; LA64-NEXT: add.d $a6, $a7, $a6
|
|
; LA64-NEXT: mulh.du $a7, $a0, $a3
|
|
; LA64-NEXT: srai.d $t0, $a3, 63
|
|
; LA64-NEXT: mul.d $t0, $a0, $t0
|
|
; LA64-NEXT: add.d $a7, $a7, $t0
|
|
; LA64-NEXT: mul.d $t0, $a0, $a3
|
|
; LA64-NEXT: add.d $a5, $t0, $a5
|
|
; LA64-NEXT: sltu $t0, $a5, $t0
|
|
; LA64-NEXT: add.d $a7, $a7, $t0
|
|
; LA64-NEXT: add.d $t0, $a6, $a7
|
|
; LA64-NEXT: sltu $t1, $t0, $a6
|
|
; LA64-NEXT: srai.d $a6, $a6, 63
|
|
; LA64-NEXT: srai.d $a7, $a7, 63
|
|
; LA64-NEXT: add.d $a6, $a6, $a7
|
|
; LA64-NEXT: add.d $a6, $a6, $t1
|
|
; LA64-NEXT: mulh.d $a7, $a1, $a3
|
|
; LA64-NEXT: add.d $a6, $a7, $a6
|
|
; LA64-NEXT: mul.d $a1, $a1, $a3
|
|
; LA64-NEXT: add.d $a3, $a1, $t0
|
|
; LA64-NEXT: sltu $a1, $a3, $a1
|
|
; LA64-NEXT: add.d $a1, $a6, $a1
|
|
; LA64-NEXT: srai.d $a6, $a5, 63
|
|
; LA64-NEXT: xor $a1, $a1, $a6
|
|
; LA64-NEXT: xor $a3, $a3, $a6
|
|
; LA64-NEXT: or $a1, $a3, $a1
|
|
; LA64-NEXT: sltu $a1, $zero, $a1
|
|
; LA64-NEXT: mul.d $a0, $a0, $a2
|
|
; LA64-NEXT: st.d $a0, $a4, 0
|
|
; LA64-NEXT: st.d $a5, $a4, 8
|
|
; LA64-NEXT: move $a0, $a1
|
|
; LA64-NEXT: ret
|
|
%t = call {i128, i1} @llvm.smul.with.overflow.i128(i128 %v1, i128 %v2)
|
|
%val = extractvalue {i128, i1} %t, 0
|
|
%obit = extractvalue {i128, i1} %t, 1
|
|
store i128 %val, ptr %res
|
|
ret i1 %obit
|
|
}
|
|
|
|
declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
|
declare {i128, i1} @llvm.smul.with.overflow.i128(i128, i128) nounwind readnone
|