
The logic in RISCVMatInt would previously produce lui+addiw on RV64 whenever a 32-bit integer must be materialised and the Hi20 and Lo12 parts are non-zero. However, sometimes addi can be used equivalently (whenever the sign extension behaviour of addiw would be a no-op). This patch moves to using addiw only when necessary. Although there is absolutely no advantage in terms of compressibility or performance, this has the following advantages: * It's more consistent with logic used elsewhere in the backend. For instance, RISCVOptWInstrs will try to convert addiw to addi on the basis it reduces test diffs vs RV32. * This matches the lowering GCC does in its codegen path. Unlike LLVM, GCC seems to have different expansion logic for the assembler vs codegen. For codegen it will use lui+addi if possible, but expanding `li` in the assembler will always produces lui+addiw as LLVM did prior to this commit. As someone who has been looking at a lot of gcc vs clang diffs lately, reducing unnecessary divergence is of at least some value. * As the diff for fold-mem-offset.ll shows, we can fold memory offsets in more cases when addi is used. Memory offset folding could be taught to recognise when the addiw could be replaced with an addi, but that seems unnecessary when we can simply change the logic in RISCVMatInt. As pointed out by @topperc during review, making this change without modifying RISCVOptWInstrs risks introducing some cases where we fail to remove a sext.w that we removed before. I've incorporated a patch based on a suggestion from Craig that avoids it, and also adds appropriate RISCVOptWInstrs test cases. The initial patch description noted that the main motivation was to avoid unnecessary differences both for RV32/RV64 and when comparing GCC, but noted that very occasionally we see a benefit from memory offset folding kicking in when it didn't before. Looking at the dynamic instruction count difference for SPEC benchmarks targeting rva22u64 and it shows we actually get a meaningful ~4.3% reduction in dynamic icount for 519.lbm_r. Looking at the data more closely, the codegen difference is in `LBM_performStreamCollideTRT` which as a function accounts for ~98% for dynamically executed instructions and the codegen diffs appear to be a knock-on effect of the address merging reducing register pressure right from function entry (for instance, we get a big reduction in dynamically executed loads in that function). Below is the icount data (rva22u64 -O3, no LTO): ``` Benchmark Baseline This PR Diff (%) ============================================================ 500.perlbench_r 174116601991 174115795810 -0.00% 502.gcc_r 218903280858 218903215788 -0.00% 505.mcf_r 131208029185 131207692803 -0.00% 508.namd_r 217497594322 217497594297 -0.00% 510.parest_r 289314486153 289313577652 -0.00% 511.povray_r 30640531048 30640765701 0.00% 519.lbm_r 95897914862 91712688050 -4.36% 520.omnetpp_r 134641549722 134867015683 0.17% 523.xalancbmk_r 281462762992 281432092673 -0.01% 525.x264_r 379776121941 379535558210 -0.06% 526.blender_r 659736022025 659738387343 0.00% 531.deepsjeng_r 349122867552 349122867481 -0.00% 538.imagick_r 238558760552 238558753269 -0.00% 541.leela_r 406578560612 406385135260 -0.05% 544.nab_r 400997131674 400996765827 -0.00% 557.xz_r 130079522194 129945515709 -0.10% ``` The instcounting setup I use doesn't have good support for drilling down into functions from outside the linked executable (e.g. libc). The difference in omnetpp all seems to come from there, and does not reflect any degradation in codegen quality. I can confirm with the current version of the PR there is no change in the number of static sext.w across all the SPEC 2017 benchmarks (rva22u64 O3) Co-authored-by: Craig Topper <craig.topper@sifive.com>
2135 lines
61 KiB
LLVM
2135 lines
61 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=CHECK,NOZBB,RV32I
|
|
; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=CHECK,NOZBB,RV64I
|
|
; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=CHECK,ZBB,RV32ZBB
|
|
; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=CHECK,ZBB,RV64ZBB
|
|
|
|
;
|
|
; trunc(abs(sub(zext(a),zext(b)))) -> abdu(a,b)
|
|
;
|
|
|
|
define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i8:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: zext.b a1, a1
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i8:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: zext.b a1, a1
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_ext_i8:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.b a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%aext = zext i8 %a to i64
|
|
%bext = zext i8 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i8
|
|
ret i8 %trunc
|
|
}
|
|
|
|
define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i8_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a1, a1, 16
|
|
; RV32I-NEXT: srli a1, a1, 16
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i8_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 48
|
|
; RV64I-NEXT: srli a1, a1, 48
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_ext_i8_i16:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%aext = zext i8 %a to i64
|
|
%bext = zext i16 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i8
|
|
ret i8 %trunc
|
|
}
|
|
|
|
define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i8_undef:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: zext.b a1, a1
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i8_undef:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: zext.b a1, a1
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_ext_i8_undef:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.b a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%aext = zext i8 %a to i64
|
|
%bext = zext i8 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
|
|
%trunc = trunc i64 %abs to i8
|
|
ret i8 %trunc
|
|
}
|
|
|
|
define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a2, a2, -1
|
|
; RV32I-NEXT: and a1, a1, a2
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a2, a2, -1
|
|
; RV64I-NEXT: and a1, a1, a2
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_ext_i16:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.h a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%aext = zext i16 %a to i64
|
|
%bext = zext i16 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i16
|
|
ret i16 %trunc
|
|
}
|
|
|
|
define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i16_i32:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srli a0, a0, 16
|
|
; RV32I-NEXT: bltu a1, a0, .LBB4_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB4_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i16_i32:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 48
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i16_i32:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: zext.h a0, a0
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i16_i32:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: zext.h a0, a0
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i16 %a to i64
|
|
%bext = zext i32 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i16
|
|
ret i16 %trunc
|
|
}
|
|
|
|
define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i16_undef:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a2, a2, -1
|
|
; RV32I-NEXT: and a1, a1, a2
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i16_undef:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a2, a2, -1
|
|
; RV64I-NEXT: and a1, a1, a2
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_ext_i16_undef:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.h a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%aext = zext i16 %a to i64
|
|
%bext = zext i16 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
|
|
%trunc = trunc i64 %abs to i16
|
|
ret i16 %trunc
|
|
}
|
|
|
|
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i32:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: bltu a1, a0, .LBB6_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB6_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i32:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i32:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i32:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i32 %a to i64
|
|
%bext = zext i32 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i32
|
|
ret i32 %trunc
|
|
}
|
|
|
|
define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i32_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a1, a1, 16
|
|
; RV32I-NEXT: srli a1, a1, 16
|
|
; RV32I-NEXT: bltu a1, a0, .LBB7_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB7_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i32_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: slli a1, a1, 48
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 48
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i32_i16:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: zext.h a1, a1
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i32_i16:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: zext.h a1, a1
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i32 %a to i64
|
|
%bext = zext i16 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
|
|
%trunc = trunc i64 %abs to i32
|
|
ret i32 %trunc
|
|
}
|
|
|
|
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i32_undef:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: bltu a1, a0, .LBB8_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB8_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i32_undef:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i32_undef:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i32_undef:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i32 %a to i64
|
|
%bext = zext i32 %b to i64
|
|
%sub = sub i64 %aext, %bext
|
|
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
|
|
%trunc = trunc i64 %abs to i32
|
|
ret i32 %trunc
|
|
}
|
|
|
|
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i64:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sltu a4, a0, a2
|
|
; RV32I-NEXT: sub a3, a1, a3
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sub a2, a0, a2
|
|
; RV32I-NEXT: beq a3, a1, .LBB9_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu a0, a1, a3
|
|
; RV32I-NEXT: j .LBB9_3
|
|
; RV32I-NEXT: .LBB9_2:
|
|
; RV32I-NEXT: sltu a0, a0, a2
|
|
; RV32I-NEXT: .LBB9_3:
|
|
; RV32I-NEXT: neg a1, a0
|
|
; RV32I-NEXT: xor a2, a2, a1
|
|
; RV32I-NEXT: xor a3, a3, a1
|
|
; RV32I-NEXT: sltu a1, a2, a1
|
|
; RV32I-NEXT: add a3, a3, a0
|
|
; RV32I-NEXT: sub a1, a3, a1
|
|
; RV32I-NEXT: add a0, a2, a0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i64:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: bltu a1, a0, .LBB9_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sub a0, a1, a0
|
|
; RV64I-NEXT: ret
|
|
; RV64I-NEXT: .LBB9_2:
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i64:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: sltu a4, a0, a2
|
|
; RV32ZBB-NEXT: sub a3, a1, a3
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sub a2, a0, a2
|
|
; RV32ZBB-NEXT: beq a3, a1, .LBB9_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu a0, a1, a3
|
|
; RV32ZBB-NEXT: j .LBB9_3
|
|
; RV32ZBB-NEXT: .LBB9_2:
|
|
; RV32ZBB-NEXT: sltu a0, a0, a2
|
|
; RV32ZBB-NEXT: .LBB9_3:
|
|
; RV32ZBB-NEXT: neg a1, a0
|
|
; RV32ZBB-NEXT: xor a2, a2, a1
|
|
; RV32ZBB-NEXT: xor a3, a3, a1
|
|
; RV32ZBB-NEXT: sltu a1, a2, a1
|
|
; RV32ZBB-NEXT: add a3, a3, a0
|
|
; RV32ZBB-NEXT: sub a1, a3, a1
|
|
; RV32ZBB-NEXT: add a0, a2, a0
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i64:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i64 %a to i128
|
|
%bext = zext i64 %b to i128
|
|
%sub = sub i128 %aext, %bext
|
|
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
|
|
%trunc = trunc i128 %abs to i64
|
|
ret i64 %trunc
|
|
}
|
|
|
|
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i64_undef:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sltu a4, a0, a2
|
|
; RV32I-NEXT: sub a3, a1, a3
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sub a2, a0, a2
|
|
; RV32I-NEXT: beq a3, a1, .LBB10_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu a0, a1, a3
|
|
; RV32I-NEXT: j .LBB10_3
|
|
; RV32I-NEXT: .LBB10_2:
|
|
; RV32I-NEXT: sltu a0, a0, a2
|
|
; RV32I-NEXT: .LBB10_3:
|
|
; RV32I-NEXT: neg a1, a0
|
|
; RV32I-NEXT: xor a2, a2, a1
|
|
; RV32I-NEXT: xor a3, a3, a1
|
|
; RV32I-NEXT: sltu a1, a2, a1
|
|
; RV32I-NEXT: add a3, a3, a0
|
|
; RV32I-NEXT: sub a1, a3, a1
|
|
; RV32I-NEXT: add a0, a2, a0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i64_undef:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: bltu a1, a0, .LBB10_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sub a0, a1, a0
|
|
; RV64I-NEXT: ret
|
|
; RV64I-NEXT: .LBB10_2:
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i64_undef:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: sltu a4, a0, a2
|
|
; RV32ZBB-NEXT: sub a3, a1, a3
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sub a2, a0, a2
|
|
; RV32ZBB-NEXT: beq a3, a1, .LBB10_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu a0, a1, a3
|
|
; RV32ZBB-NEXT: j .LBB10_3
|
|
; RV32ZBB-NEXT: .LBB10_2:
|
|
; RV32ZBB-NEXT: sltu a0, a0, a2
|
|
; RV32ZBB-NEXT: .LBB10_3:
|
|
; RV32ZBB-NEXT: neg a1, a0
|
|
; RV32ZBB-NEXT: xor a2, a2, a1
|
|
; RV32ZBB-NEXT: xor a3, a3, a1
|
|
; RV32ZBB-NEXT: sltu a1, a2, a1
|
|
; RV32ZBB-NEXT: add a3, a3, a0
|
|
; RV32ZBB-NEXT: sub a1, a3, a1
|
|
; RV32ZBB-NEXT: add a0, a2, a0
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i64_undef:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i64 %a to i128
|
|
%bext = zext i64 %b to i128
|
|
%sub = sub i128 %aext, %bext
|
|
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
|
|
%trunc = trunc i128 %abs to i64
|
|
ret i64 %trunc
|
|
}
|
|
|
|
define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i128:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a3, 0(a2)
|
|
; RV32I-NEXT: lw a5, 4(a2)
|
|
; RV32I-NEXT: lw a6, 8(a2)
|
|
; RV32I-NEXT: lw a7, 12(a2)
|
|
; RV32I-NEXT: lw a2, 8(a1)
|
|
; RV32I-NEXT: lw a4, 12(a1)
|
|
; RV32I-NEXT: lw t0, 0(a1)
|
|
; RV32I-NEXT: lw a1, 4(a1)
|
|
; RV32I-NEXT: sltu t1, a2, a6
|
|
; RV32I-NEXT: sub a7, a4, a7
|
|
; RV32I-NEXT: sltu t2, t0, a3
|
|
; RV32I-NEXT: sub a7, a7, t1
|
|
; RV32I-NEXT: mv t1, t2
|
|
; RV32I-NEXT: beq a1, a5, .LBB11_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu t1, a1, a5
|
|
; RV32I-NEXT: .LBB11_2:
|
|
; RV32I-NEXT: sub t3, a2, a6
|
|
; RV32I-NEXT: sltu a6, t3, t1
|
|
; RV32I-NEXT: sub a6, a7, a6
|
|
; RV32I-NEXT: sub a7, t3, t1
|
|
; RV32I-NEXT: beq a6, a4, .LBB11_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: sltu t1, a4, a6
|
|
; RV32I-NEXT: j .LBB11_5
|
|
; RV32I-NEXT: .LBB11_4:
|
|
; RV32I-NEXT: sltu t1, a2, a7
|
|
; RV32I-NEXT: .LBB11_5:
|
|
; RV32I-NEXT: sub a5, a1, a5
|
|
; RV32I-NEXT: sub a5, a5, t2
|
|
; RV32I-NEXT: sub a3, t0, a3
|
|
; RV32I-NEXT: beq a5, a1, .LBB11_7
|
|
; RV32I-NEXT: # %bb.6:
|
|
; RV32I-NEXT: sltu a1, a1, a5
|
|
; RV32I-NEXT: j .LBB11_8
|
|
; RV32I-NEXT: .LBB11_7:
|
|
; RV32I-NEXT: sltu a1, t0, a3
|
|
; RV32I-NEXT: .LBB11_8:
|
|
; RV32I-NEXT: xor a4, a6, a4
|
|
; RV32I-NEXT: xor a2, a7, a2
|
|
; RV32I-NEXT: or a2, a2, a4
|
|
; RV32I-NEXT: beqz a2, .LBB11_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a1, t1
|
|
; RV32I-NEXT: .LBB11_10:
|
|
; RV32I-NEXT: neg t0, a1
|
|
; RV32I-NEXT: xor a2, a7, t0
|
|
; RV32I-NEXT: xor a6, a6, t0
|
|
; RV32I-NEXT: xor a4, a3, t0
|
|
; RV32I-NEXT: sltu a3, a2, t0
|
|
; RV32I-NEXT: add a7, a6, a1
|
|
; RV32I-NEXT: sltu a6, a4, t0
|
|
; RV32I-NEXT: sub a3, a7, a3
|
|
; RV32I-NEXT: xor t1, a5, t0
|
|
; RV32I-NEXT: mv a7, a6
|
|
; RV32I-NEXT: beqz a5, .LBB11_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: sltu a7, t1, t0
|
|
; RV32I-NEXT: .LBB11_12:
|
|
; RV32I-NEXT: add a2, a2, a1
|
|
; RV32I-NEXT: add t1, t1, a1
|
|
; RV32I-NEXT: add a1, a4, a1
|
|
; RV32I-NEXT: sltu a4, a2, a7
|
|
; RV32I-NEXT: sub a2, a2, a7
|
|
; RV32I-NEXT: sub a5, t1, a6
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sw a1, 0(a0)
|
|
; RV32I-NEXT: sw a5, 4(a0)
|
|
; RV32I-NEXT: sw a2, 8(a0)
|
|
; RV32I-NEXT: sw a3, 12(a0)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i128:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sltu a4, a0, a2
|
|
; RV64I-NEXT: sub a3, a1, a3
|
|
; RV64I-NEXT: sub a3, a3, a4
|
|
; RV64I-NEXT: sub a2, a0, a2
|
|
; RV64I-NEXT: beq a3, a1, .LBB11_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sltu a0, a1, a3
|
|
; RV64I-NEXT: j .LBB11_3
|
|
; RV64I-NEXT: .LBB11_2:
|
|
; RV64I-NEXT: sltu a0, a0, a2
|
|
; RV64I-NEXT: .LBB11_3:
|
|
; RV64I-NEXT: neg a1, a0
|
|
; RV64I-NEXT: xor a2, a2, a1
|
|
; RV64I-NEXT: xor a3, a3, a1
|
|
; RV64I-NEXT: sltu a1, a2, a1
|
|
; RV64I-NEXT: add a3, a3, a0
|
|
; RV64I-NEXT: sub a1, a3, a1
|
|
; RV64I-NEXT: add a0, a2, a0
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i128:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: lw a3, 0(a2)
|
|
; RV32ZBB-NEXT: lw a5, 4(a2)
|
|
; RV32ZBB-NEXT: lw a6, 8(a2)
|
|
; RV32ZBB-NEXT: lw a7, 12(a2)
|
|
; RV32ZBB-NEXT: lw a2, 8(a1)
|
|
; RV32ZBB-NEXT: lw a4, 12(a1)
|
|
; RV32ZBB-NEXT: lw t0, 0(a1)
|
|
; RV32ZBB-NEXT: lw a1, 4(a1)
|
|
; RV32ZBB-NEXT: sltu t1, a2, a6
|
|
; RV32ZBB-NEXT: sub a7, a4, a7
|
|
; RV32ZBB-NEXT: sltu t2, t0, a3
|
|
; RV32ZBB-NEXT: sub a7, a7, t1
|
|
; RV32ZBB-NEXT: mv t1, t2
|
|
; RV32ZBB-NEXT: beq a1, a5, .LBB11_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu t1, a1, a5
|
|
; RV32ZBB-NEXT: .LBB11_2:
|
|
; RV32ZBB-NEXT: sub t3, a2, a6
|
|
; RV32ZBB-NEXT: sltu a6, t3, t1
|
|
; RV32ZBB-NEXT: sub a6, a7, a6
|
|
; RV32ZBB-NEXT: sub a7, t3, t1
|
|
; RV32ZBB-NEXT: beq a6, a4, .LBB11_4
|
|
; RV32ZBB-NEXT: # %bb.3:
|
|
; RV32ZBB-NEXT: sltu t1, a4, a6
|
|
; RV32ZBB-NEXT: j .LBB11_5
|
|
; RV32ZBB-NEXT: .LBB11_4:
|
|
; RV32ZBB-NEXT: sltu t1, a2, a7
|
|
; RV32ZBB-NEXT: .LBB11_5:
|
|
; RV32ZBB-NEXT: sub a5, a1, a5
|
|
; RV32ZBB-NEXT: sub a5, a5, t2
|
|
; RV32ZBB-NEXT: sub a3, t0, a3
|
|
; RV32ZBB-NEXT: beq a5, a1, .LBB11_7
|
|
; RV32ZBB-NEXT: # %bb.6:
|
|
; RV32ZBB-NEXT: sltu a1, a1, a5
|
|
; RV32ZBB-NEXT: j .LBB11_8
|
|
; RV32ZBB-NEXT: .LBB11_7:
|
|
; RV32ZBB-NEXT: sltu a1, t0, a3
|
|
; RV32ZBB-NEXT: .LBB11_8:
|
|
; RV32ZBB-NEXT: xor a4, a6, a4
|
|
; RV32ZBB-NEXT: xor a2, a7, a2
|
|
; RV32ZBB-NEXT: or a2, a2, a4
|
|
; RV32ZBB-NEXT: beqz a2, .LBB11_10
|
|
; RV32ZBB-NEXT: # %bb.9:
|
|
; RV32ZBB-NEXT: mv a1, t1
|
|
; RV32ZBB-NEXT: .LBB11_10:
|
|
; RV32ZBB-NEXT: neg t0, a1
|
|
; RV32ZBB-NEXT: xor a2, a7, t0
|
|
; RV32ZBB-NEXT: xor a6, a6, t0
|
|
; RV32ZBB-NEXT: xor a4, a3, t0
|
|
; RV32ZBB-NEXT: sltu a3, a2, t0
|
|
; RV32ZBB-NEXT: add a7, a6, a1
|
|
; RV32ZBB-NEXT: sltu a6, a4, t0
|
|
; RV32ZBB-NEXT: sub a3, a7, a3
|
|
; RV32ZBB-NEXT: xor t1, a5, t0
|
|
; RV32ZBB-NEXT: mv a7, a6
|
|
; RV32ZBB-NEXT: beqz a5, .LBB11_12
|
|
; RV32ZBB-NEXT: # %bb.11:
|
|
; RV32ZBB-NEXT: sltu a7, t1, t0
|
|
; RV32ZBB-NEXT: .LBB11_12:
|
|
; RV32ZBB-NEXT: add a2, a2, a1
|
|
; RV32ZBB-NEXT: add t1, t1, a1
|
|
; RV32ZBB-NEXT: add a1, a4, a1
|
|
; RV32ZBB-NEXT: sltu a4, a2, a7
|
|
; RV32ZBB-NEXT: sub a2, a2, a7
|
|
; RV32ZBB-NEXT: sub a5, t1, a6
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sw a1, 0(a0)
|
|
; RV32ZBB-NEXT: sw a5, 4(a0)
|
|
; RV32ZBB-NEXT: sw a2, 8(a0)
|
|
; RV32ZBB-NEXT: sw a3, 12(a0)
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i128:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: sltu a4, a0, a2
|
|
; RV64ZBB-NEXT: sub a3, a1, a3
|
|
; RV64ZBB-NEXT: sub a3, a3, a4
|
|
; RV64ZBB-NEXT: sub a2, a0, a2
|
|
; RV64ZBB-NEXT: beq a3, a1, .LBB11_2
|
|
; RV64ZBB-NEXT: # %bb.1:
|
|
; RV64ZBB-NEXT: sltu a0, a1, a3
|
|
; RV64ZBB-NEXT: j .LBB11_3
|
|
; RV64ZBB-NEXT: .LBB11_2:
|
|
; RV64ZBB-NEXT: sltu a0, a0, a2
|
|
; RV64ZBB-NEXT: .LBB11_3:
|
|
; RV64ZBB-NEXT: neg a1, a0
|
|
; RV64ZBB-NEXT: xor a2, a2, a1
|
|
; RV64ZBB-NEXT: xor a3, a3, a1
|
|
; RV64ZBB-NEXT: sltu a1, a2, a1
|
|
; RV64ZBB-NEXT: add a3, a3, a0
|
|
; RV64ZBB-NEXT: sub a1, a3, a1
|
|
; RV64ZBB-NEXT: add a0, a2, a0
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i128 %a to i256
|
|
%bext = zext i128 %b to i256
|
|
%sub = sub i256 %aext, %bext
|
|
%abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
|
|
%trunc = trunc i256 %abs to i128
|
|
ret i128 %trunc
|
|
}
|
|
|
|
define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
|
|
; RV32I-LABEL: abd_ext_i128_undef:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a3, 0(a2)
|
|
; RV32I-NEXT: lw a5, 4(a2)
|
|
; RV32I-NEXT: lw a6, 8(a2)
|
|
; RV32I-NEXT: lw a7, 12(a2)
|
|
; RV32I-NEXT: lw a2, 8(a1)
|
|
; RV32I-NEXT: lw a4, 12(a1)
|
|
; RV32I-NEXT: lw t0, 0(a1)
|
|
; RV32I-NEXT: lw a1, 4(a1)
|
|
; RV32I-NEXT: sltu t1, a2, a6
|
|
; RV32I-NEXT: sub a7, a4, a7
|
|
; RV32I-NEXT: sltu t2, t0, a3
|
|
; RV32I-NEXT: sub a7, a7, t1
|
|
; RV32I-NEXT: mv t1, t2
|
|
; RV32I-NEXT: beq a1, a5, .LBB12_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu t1, a1, a5
|
|
; RV32I-NEXT: .LBB12_2:
|
|
; RV32I-NEXT: sub t3, a2, a6
|
|
; RV32I-NEXT: sltu a6, t3, t1
|
|
; RV32I-NEXT: sub a6, a7, a6
|
|
; RV32I-NEXT: sub a7, t3, t1
|
|
; RV32I-NEXT: beq a6, a4, .LBB12_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: sltu t1, a4, a6
|
|
; RV32I-NEXT: j .LBB12_5
|
|
; RV32I-NEXT: .LBB12_4:
|
|
; RV32I-NEXT: sltu t1, a2, a7
|
|
; RV32I-NEXT: .LBB12_5:
|
|
; RV32I-NEXT: sub a5, a1, a5
|
|
; RV32I-NEXT: sub a5, a5, t2
|
|
; RV32I-NEXT: sub a3, t0, a3
|
|
; RV32I-NEXT: beq a5, a1, .LBB12_7
|
|
; RV32I-NEXT: # %bb.6:
|
|
; RV32I-NEXT: sltu a1, a1, a5
|
|
; RV32I-NEXT: j .LBB12_8
|
|
; RV32I-NEXT: .LBB12_7:
|
|
; RV32I-NEXT: sltu a1, t0, a3
|
|
; RV32I-NEXT: .LBB12_8:
|
|
; RV32I-NEXT: xor a4, a6, a4
|
|
; RV32I-NEXT: xor a2, a7, a2
|
|
; RV32I-NEXT: or a2, a2, a4
|
|
; RV32I-NEXT: beqz a2, .LBB12_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a1, t1
|
|
; RV32I-NEXT: .LBB12_10:
|
|
; RV32I-NEXT: neg t0, a1
|
|
; RV32I-NEXT: xor a2, a7, t0
|
|
; RV32I-NEXT: xor a6, a6, t0
|
|
; RV32I-NEXT: xor a4, a3, t0
|
|
; RV32I-NEXT: sltu a3, a2, t0
|
|
; RV32I-NEXT: add a7, a6, a1
|
|
; RV32I-NEXT: sltu a6, a4, t0
|
|
; RV32I-NEXT: sub a3, a7, a3
|
|
; RV32I-NEXT: xor t1, a5, t0
|
|
; RV32I-NEXT: mv a7, a6
|
|
; RV32I-NEXT: beqz a5, .LBB12_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: sltu a7, t1, t0
|
|
; RV32I-NEXT: .LBB12_12:
|
|
; RV32I-NEXT: add a2, a2, a1
|
|
; RV32I-NEXT: add t1, t1, a1
|
|
; RV32I-NEXT: add a1, a4, a1
|
|
; RV32I-NEXT: sltu a4, a2, a7
|
|
; RV32I-NEXT: sub a2, a2, a7
|
|
; RV32I-NEXT: sub a5, t1, a6
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sw a1, 0(a0)
|
|
; RV32I-NEXT: sw a5, 4(a0)
|
|
; RV32I-NEXT: sw a2, 8(a0)
|
|
; RV32I-NEXT: sw a3, 12(a0)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_ext_i128_undef:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sltu a4, a0, a2
|
|
; RV64I-NEXT: sub a3, a1, a3
|
|
; RV64I-NEXT: sub a3, a3, a4
|
|
; RV64I-NEXT: sub a2, a0, a2
|
|
; RV64I-NEXT: beq a3, a1, .LBB12_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sltu a0, a1, a3
|
|
; RV64I-NEXT: j .LBB12_3
|
|
; RV64I-NEXT: .LBB12_2:
|
|
; RV64I-NEXT: sltu a0, a0, a2
|
|
; RV64I-NEXT: .LBB12_3:
|
|
; RV64I-NEXT: neg a1, a0
|
|
; RV64I-NEXT: xor a2, a2, a1
|
|
; RV64I-NEXT: xor a3, a3, a1
|
|
; RV64I-NEXT: sltu a1, a2, a1
|
|
; RV64I-NEXT: add a3, a3, a0
|
|
; RV64I-NEXT: sub a1, a3, a1
|
|
; RV64I-NEXT: add a0, a2, a0
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_ext_i128_undef:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: lw a3, 0(a2)
|
|
; RV32ZBB-NEXT: lw a5, 4(a2)
|
|
; RV32ZBB-NEXT: lw a6, 8(a2)
|
|
; RV32ZBB-NEXT: lw a7, 12(a2)
|
|
; RV32ZBB-NEXT: lw a2, 8(a1)
|
|
; RV32ZBB-NEXT: lw a4, 12(a1)
|
|
; RV32ZBB-NEXT: lw t0, 0(a1)
|
|
; RV32ZBB-NEXT: lw a1, 4(a1)
|
|
; RV32ZBB-NEXT: sltu t1, a2, a6
|
|
; RV32ZBB-NEXT: sub a7, a4, a7
|
|
; RV32ZBB-NEXT: sltu t2, t0, a3
|
|
; RV32ZBB-NEXT: sub a7, a7, t1
|
|
; RV32ZBB-NEXT: mv t1, t2
|
|
; RV32ZBB-NEXT: beq a1, a5, .LBB12_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu t1, a1, a5
|
|
; RV32ZBB-NEXT: .LBB12_2:
|
|
; RV32ZBB-NEXT: sub t3, a2, a6
|
|
; RV32ZBB-NEXT: sltu a6, t3, t1
|
|
; RV32ZBB-NEXT: sub a6, a7, a6
|
|
; RV32ZBB-NEXT: sub a7, t3, t1
|
|
; RV32ZBB-NEXT: beq a6, a4, .LBB12_4
|
|
; RV32ZBB-NEXT: # %bb.3:
|
|
; RV32ZBB-NEXT: sltu t1, a4, a6
|
|
; RV32ZBB-NEXT: j .LBB12_5
|
|
; RV32ZBB-NEXT: .LBB12_4:
|
|
; RV32ZBB-NEXT: sltu t1, a2, a7
|
|
; RV32ZBB-NEXT: .LBB12_5:
|
|
; RV32ZBB-NEXT: sub a5, a1, a5
|
|
; RV32ZBB-NEXT: sub a5, a5, t2
|
|
; RV32ZBB-NEXT: sub a3, t0, a3
|
|
; RV32ZBB-NEXT: beq a5, a1, .LBB12_7
|
|
; RV32ZBB-NEXT: # %bb.6:
|
|
; RV32ZBB-NEXT: sltu a1, a1, a5
|
|
; RV32ZBB-NEXT: j .LBB12_8
|
|
; RV32ZBB-NEXT: .LBB12_7:
|
|
; RV32ZBB-NEXT: sltu a1, t0, a3
|
|
; RV32ZBB-NEXT: .LBB12_8:
|
|
; RV32ZBB-NEXT: xor a4, a6, a4
|
|
; RV32ZBB-NEXT: xor a2, a7, a2
|
|
; RV32ZBB-NEXT: or a2, a2, a4
|
|
; RV32ZBB-NEXT: beqz a2, .LBB12_10
|
|
; RV32ZBB-NEXT: # %bb.9:
|
|
; RV32ZBB-NEXT: mv a1, t1
|
|
; RV32ZBB-NEXT: .LBB12_10:
|
|
; RV32ZBB-NEXT: neg t0, a1
|
|
; RV32ZBB-NEXT: xor a2, a7, t0
|
|
; RV32ZBB-NEXT: xor a6, a6, t0
|
|
; RV32ZBB-NEXT: xor a4, a3, t0
|
|
; RV32ZBB-NEXT: sltu a3, a2, t0
|
|
; RV32ZBB-NEXT: add a7, a6, a1
|
|
; RV32ZBB-NEXT: sltu a6, a4, t0
|
|
; RV32ZBB-NEXT: sub a3, a7, a3
|
|
; RV32ZBB-NEXT: xor t1, a5, t0
|
|
; RV32ZBB-NEXT: mv a7, a6
|
|
; RV32ZBB-NEXT: beqz a5, .LBB12_12
|
|
; RV32ZBB-NEXT: # %bb.11:
|
|
; RV32ZBB-NEXT: sltu a7, t1, t0
|
|
; RV32ZBB-NEXT: .LBB12_12:
|
|
; RV32ZBB-NEXT: add a2, a2, a1
|
|
; RV32ZBB-NEXT: add t1, t1, a1
|
|
; RV32ZBB-NEXT: add a1, a4, a1
|
|
; RV32ZBB-NEXT: sltu a4, a2, a7
|
|
; RV32ZBB-NEXT: sub a2, a2, a7
|
|
; RV32ZBB-NEXT: sub a5, t1, a6
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sw a1, 0(a0)
|
|
; RV32ZBB-NEXT: sw a5, 4(a0)
|
|
; RV32ZBB-NEXT: sw a2, 8(a0)
|
|
; RV32ZBB-NEXT: sw a3, 12(a0)
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_ext_i128_undef:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: sltu a4, a0, a2
|
|
; RV64ZBB-NEXT: sub a3, a1, a3
|
|
; RV64ZBB-NEXT: sub a3, a3, a4
|
|
; RV64ZBB-NEXT: sub a2, a0, a2
|
|
; RV64ZBB-NEXT: beq a3, a1, .LBB12_2
|
|
; RV64ZBB-NEXT: # %bb.1:
|
|
; RV64ZBB-NEXT: sltu a0, a1, a3
|
|
; RV64ZBB-NEXT: j .LBB12_3
|
|
; RV64ZBB-NEXT: .LBB12_2:
|
|
; RV64ZBB-NEXT: sltu a0, a0, a2
|
|
; RV64ZBB-NEXT: .LBB12_3:
|
|
; RV64ZBB-NEXT: neg a1, a0
|
|
; RV64ZBB-NEXT: xor a2, a2, a1
|
|
; RV64ZBB-NEXT: xor a3, a3, a1
|
|
; RV64ZBB-NEXT: sltu a1, a2, a1
|
|
; RV64ZBB-NEXT: add a3, a3, a0
|
|
; RV64ZBB-NEXT: sub a1, a3, a1
|
|
; RV64ZBB-NEXT: add a0, a2, a0
|
|
; RV64ZBB-NEXT: ret
|
|
%aext = zext i128 %a to i256
|
|
%bext = zext i128 %b to i256
|
|
%sub = sub i256 %aext, %bext
|
|
%abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
|
|
%trunc = trunc i256 %abs to i128
|
|
ret i128 %trunc
|
|
}
|
|
|
|
;
|
|
; sub(umax(a,b),umin(a,b)) -> abdu(a,b)
|
|
;
|
|
|
|
define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
|
|
; RV32I-LABEL: abd_minmax_i8:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: zext.b a1, a1
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_minmax_i8:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: zext.b a1, a1
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_minmax_i8:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.b a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
|
|
%max = call i8 @llvm.umax.i8(i8 %a, i8 %b)
|
|
%sub = sub i8 %max, %min
|
|
ret i8 %sub
|
|
}
|
|
|
|
define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_minmax_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a2, a2, -1
|
|
; RV32I-NEXT: and a1, a1, a2
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_minmax_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a2, a2, -1
|
|
; RV64I-NEXT: and a1, a1, a2
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_minmax_i16:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.h a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
|
|
%max = call i16 @llvm.umax.i16(i16 %a, i16 %b)
|
|
%sub = sub i16 %max, %min
|
|
ret i16 %sub
|
|
}
|
|
|
|
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_minmax_i32:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: bltu a1, a0, .LBB15_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB15_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_minmax_i32:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_minmax_i32:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_minmax_i32:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%min = call i32 @llvm.umin.i32(i32 %a, i32 %b)
|
|
%max = call i32 @llvm.umax.i32(i32 %a, i32 %b)
|
|
%sub = sub i32 %max, %min
|
|
ret i32 %sub
|
|
}
|
|
|
|
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
|
|
; RV32I-LABEL: abd_minmax_i64:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sltu a4, a0, a2
|
|
; RV32I-NEXT: sub a3, a1, a3
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sub a2, a0, a2
|
|
; RV32I-NEXT: beq a3, a1, .LBB16_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu a0, a1, a3
|
|
; RV32I-NEXT: j .LBB16_3
|
|
; RV32I-NEXT: .LBB16_2:
|
|
; RV32I-NEXT: sltu a0, a0, a2
|
|
; RV32I-NEXT: .LBB16_3:
|
|
; RV32I-NEXT: neg a1, a0
|
|
; RV32I-NEXT: xor a2, a2, a1
|
|
; RV32I-NEXT: xor a3, a3, a1
|
|
; RV32I-NEXT: sltu a1, a2, a1
|
|
; RV32I-NEXT: add a3, a3, a0
|
|
; RV32I-NEXT: sub a1, a3, a1
|
|
; RV32I-NEXT: add a0, a2, a0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_minmax_i64:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: bltu a1, a0, .LBB16_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sub a0, a1, a0
|
|
; RV64I-NEXT: ret
|
|
; RV64I-NEXT: .LBB16_2:
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_minmax_i64:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: sltu a4, a0, a2
|
|
; RV32ZBB-NEXT: sub a3, a1, a3
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sub a2, a0, a2
|
|
; RV32ZBB-NEXT: beq a3, a1, .LBB16_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu a0, a1, a3
|
|
; RV32ZBB-NEXT: j .LBB16_3
|
|
; RV32ZBB-NEXT: .LBB16_2:
|
|
; RV32ZBB-NEXT: sltu a0, a0, a2
|
|
; RV32ZBB-NEXT: .LBB16_3:
|
|
; RV32ZBB-NEXT: neg a1, a0
|
|
; RV32ZBB-NEXT: xor a2, a2, a1
|
|
; RV32ZBB-NEXT: xor a3, a3, a1
|
|
; RV32ZBB-NEXT: sltu a1, a2, a1
|
|
; RV32ZBB-NEXT: add a3, a3, a0
|
|
; RV32ZBB-NEXT: sub a1, a3, a1
|
|
; RV32ZBB-NEXT: add a0, a2, a0
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_minmax_i64:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%min = call i64 @llvm.umin.i64(i64 %a, i64 %b)
|
|
%max = call i64 @llvm.umax.i64(i64 %a, i64 %b)
|
|
%sub = sub i64 %max, %min
|
|
ret i64 %sub
|
|
}
|
|
|
|
define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
|
|
; RV32I-LABEL: abd_minmax_i128:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a3, 0(a2)
|
|
; RV32I-NEXT: lw a5, 4(a2)
|
|
; RV32I-NEXT: lw a6, 8(a2)
|
|
; RV32I-NEXT: lw a7, 12(a2)
|
|
; RV32I-NEXT: lw a2, 8(a1)
|
|
; RV32I-NEXT: lw a4, 12(a1)
|
|
; RV32I-NEXT: lw t0, 0(a1)
|
|
; RV32I-NEXT: lw a1, 4(a1)
|
|
; RV32I-NEXT: sltu t1, a2, a6
|
|
; RV32I-NEXT: sub a7, a4, a7
|
|
; RV32I-NEXT: sltu t2, t0, a3
|
|
; RV32I-NEXT: sub a7, a7, t1
|
|
; RV32I-NEXT: mv t1, t2
|
|
; RV32I-NEXT: beq a1, a5, .LBB17_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu t1, a1, a5
|
|
; RV32I-NEXT: .LBB17_2:
|
|
; RV32I-NEXT: sub t3, a2, a6
|
|
; RV32I-NEXT: sltu a6, t3, t1
|
|
; RV32I-NEXT: sub a6, a7, a6
|
|
; RV32I-NEXT: sub a7, t3, t1
|
|
; RV32I-NEXT: beq a6, a4, .LBB17_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: sltu t1, a4, a6
|
|
; RV32I-NEXT: j .LBB17_5
|
|
; RV32I-NEXT: .LBB17_4:
|
|
; RV32I-NEXT: sltu t1, a2, a7
|
|
; RV32I-NEXT: .LBB17_5:
|
|
; RV32I-NEXT: sub a5, a1, a5
|
|
; RV32I-NEXT: sub a5, a5, t2
|
|
; RV32I-NEXT: sub a3, t0, a3
|
|
; RV32I-NEXT: beq a5, a1, .LBB17_7
|
|
; RV32I-NEXT: # %bb.6:
|
|
; RV32I-NEXT: sltu a1, a1, a5
|
|
; RV32I-NEXT: j .LBB17_8
|
|
; RV32I-NEXT: .LBB17_7:
|
|
; RV32I-NEXT: sltu a1, t0, a3
|
|
; RV32I-NEXT: .LBB17_8:
|
|
; RV32I-NEXT: xor a4, a6, a4
|
|
; RV32I-NEXT: xor a2, a7, a2
|
|
; RV32I-NEXT: or a2, a2, a4
|
|
; RV32I-NEXT: beqz a2, .LBB17_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a1, t1
|
|
; RV32I-NEXT: .LBB17_10:
|
|
; RV32I-NEXT: neg t0, a1
|
|
; RV32I-NEXT: xor a2, a7, t0
|
|
; RV32I-NEXT: xor a6, a6, t0
|
|
; RV32I-NEXT: xor a4, a3, t0
|
|
; RV32I-NEXT: sltu a3, a2, t0
|
|
; RV32I-NEXT: add a7, a6, a1
|
|
; RV32I-NEXT: sltu a6, a4, t0
|
|
; RV32I-NEXT: sub a3, a7, a3
|
|
; RV32I-NEXT: xor t1, a5, t0
|
|
; RV32I-NEXT: mv a7, a6
|
|
; RV32I-NEXT: beqz a5, .LBB17_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: sltu a7, t1, t0
|
|
; RV32I-NEXT: .LBB17_12:
|
|
; RV32I-NEXT: add a2, a2, a1
|
|
; RV32I-NEXT: add t1, t1, a1
|
|
; RV32I-NEXT: add a1, a4, a1
|
|
; RV32I-NEXT: sltu a4, a2, a7
|
|
; RV32I-NEXT: sub a2, a2, a7
|
|
; RV32I-NEXT: sub a5, t1, a6
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sw a1, 0(a0)
|
|
; RV32I-NEXT: sw a5, 4(a0)
|
|
; RV32I-NEXT: sw a2, 8(a0)
|
|
; RV32I-NEXT: sw a3, 12(a0)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_minmax_i128:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sltu a4, a0, a2
|
|
; RV64I-NEXT: sub a3, a1, a3
|
|
; RV64I-NEXT: sub a3, a3, a4
|
|
; RV64I-NEXT: sub a2, a0, a2
|
|
; RV64I-NEXT: beq a3, a1, .LBB17_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sltu a0, a1, a3
|
|
; RV64I-NEXT: j .LBB17_3
|
|
; RV64I-NEXT: .LBB17_2:
|
|
; RV64I-NEXT: sltu a0, a0, a2
|
|
; RV64I-NEXT: .LBB17_3:
|
|
; RV64I-NEXT: neg a1, a0
|
|
; RV64I-NEXT: xor a2, a2, a1
|
|
; RV64I-NEXT: xor a3, a3, a1
|
|
; RV64I-NEXT: sltu a1, a2, a1
|
|
; RV64I-NEXT: add a3, a3, a0
|
|
; RV64I-NEXT: sub a1, a3, a1
|
|
; RV64I-NEXT: add a0, a2, a0
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_minmax_i128:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: lw a3, 0(a2)
|
|
; RV32ZBB-NEXT: lw a5, 4(a2)
|
|
; RV32ZBB-NEXT: lw a6, 8(a2)
|
|
; RV32ZBB-NEXT: lw a7, 12(a2)
|
|
; RV32ZBB-NEXT: lw a2, 8(a1)
|
|
; RV32ZBB-NEXT: lw a4, 12(a1)
|
|
; RV32ZBB-NEXT: lw t0, 0(a1)
|
|
; RV32ZBB-NEXT: lw a1, 4(a1)
|
|
; RV32ZBB-NEXT: sltu t1, a2, a6
|
|
; RV32ZBB-NEXT: sub a7, a4, a7
|
|
; RV32ZBB-NEXT: sltu t2, t0, a3
|
|
; RV32ZBB-NEXT: sub a7, a7, t1
|
|
; RV32ZBB-NEXT: mv t1, t2
|
|
; RV32ZBB-NEXT: beq a1, a5, .LBB17_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu t1, a1, a5
|
|
; RV32ZBB-NEXT: .LBB17_2:
|
|
; RV32ZBB-NEXT: sub t3, a2, a6
|
|
; RV32ZBB-NEXT: sltu a6, t3, t1
|
|
; RV32ZBB-NEXT: sub a6, a7, a6
|
|
; RV32ZBB-NEXT: sub a7, t3, t1
|
|
; RV32ZBB-NEXT: beq a6, a4, .LBB17_4
|
|
; RV32ZBB-NEXT: # %bb.3:
|
|
; RV32ZBB-NEXT: sltu t1, a4, a6
|
|
; RV32ZBB-NEXT: j .LBB17_5
|
|
; RV32ZBB-NEXT: .LBB17_4:
|
|
; RV32ZBB-NEXT: sltu t1, a2, a7
|
|
; RV32ZBB-NEXT: .LBB17_5:
|
|
; RV32ZBB-NEXT: sub a5, a1, a5
|
|
; RV32ZBB-NEXT: sub a5, a5, t2
|
|
; RV32ZBB-NEXT: sub a3, t0, a3
|
|
; RV32ZBB-NEXT: beq a5, a1, .LBB17_7
|
|
; RV32ZBB-NEXT: # %bb.6:
|
|
; RV32ZBB-NEXT: sltu a1, a1, a5
|
|
; RV32ZBB-NEXT: j .LBB17_8
|
|
; RV32ZBB-NEXT: .LBB17_7:
|
|
; RV32ZBB-NEXT: sltu a1, t0, a3
|
|
; RV32ZBB-NEXT: .LBB17_8:
|
|
; RV32ZBB-NEXT: xor a4, a6, a4
|
|
; RV32ZBB-NEXT: xor a2, a7, a2
|
|
; RV32ZBB-NEXT: or a2, a2, a4
|
|
; RV32ZBB-NEXT: beqz a2, .LBB17_10
|
|
; RV32ZBB-NEXT: # %bb.9:
|
|
; RV32ZBB-NEXT: mv a1, t1
|
|
; RV32ZBB-NEXT: .LBB17_10:
|
|
; RV32ZBB-NEXT: neg t0, a1
|
|
; RV32ZBB-NEXT: xor a2, a7, t0
|
|
; RV32ZBB-NEXT: xor a6, a6, t0
|
|
; RV32ZBB-NEXT: xor a4, a3, t0
|
|
; RV32ZBB-NEXT: sltu a3, a2, t0
|
|
; RV32ZBB-NEXT: add a7, a6, a1
|
|
; RV32ZBB-NEXT: sltu a6, a4, t0
|
|
; RV32ZBB-NEXT: sub a3, a7, a3
|
|
; RV32ZBB-NEXT: xor t1, a5, t0
|
|
; RV32ZBB-NEXT: mv a7, a6
|
|
; RV32ZBB-NEXT: beqz a5, .LBB17_12
|
|
; RV32ZBB-NEXT: # %bb.11:
|
|
; RV32ZBB-NEXT: sltu a7, t1, t0
|
|
; RV32ZBB-NEXT: .LBB17_12:
|
|
; RV32ZBB-NEXT: add a2, a2, a1
|
|
; RV32ZBB-NEXT: add t1, t1, a1
|
|
; RV32ZBB-NEXT: add a1, a4, a1
|
|
; RV32ZBB-NEXT: sltu a4, a2, a7
|
|
; RV32ZBB-NEXT: sub a2, a2, a7
|
|
; RV32ZBB-NEXT: sub a5, t1, a6
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sw a1, 0(a0)
|
|
; RV32ZBB-NEXT: sw a5, 4(a0)
|
|
; RV32ZBB-NEXT: sw a2, 8(a0)
|
|
; RV32ZBB-NEXT: sw a3, 12(a0)
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_minmax_i128:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: sltu a4, a0, a2
|
|
; RV64ZBB-NEXT: sub a3, a1, a3
|
|
; RV64ZBB-NEXT: sub a3, a3, a4
|
|
; RV64ZBB-NEXT: sub a2, a0, a2
|
|
; RV64ZBB-NEXT: beq a3, a1, .LBB17_2
|
|
; RV64ZBB-NEXT: # %bb.1:
|
|
; RV64ZBB-NEXT: sltu a0, a1, a3
|
|
; RV64ZBB-NEXT: j .LBB17_3
|
|
; RV64ZBB-NEXT: .LBB17_2:
|
|
; RV64ZBB-NEXT: sltu a0, a0, a2
|
|
; RV64ZBB-NEXT: .LBB17_3:
|
|
; RV64ZBB-NEXT: neg a1, a0
|
|
; RV64ZBB-NEXT: xor a2, a2, a1
|
|
; RV64ZBB-NEXT: xor a3, a3, a1
|
|
; RV64ZBB-NEXT: sltu a1, a2, a1
|
|
; RV64ZBB-NEXT: add a3, a3, a0
|
|
; RV64ZBB-NEXT: sub a1, a3, a1
|
|
; RV64ZBB-NEXT: add a0, a2, a0
|
|
; RV64ZBB-NEXT: ret
|
|
%min = call i128 @llvm.umin.i128(i128 %a, i128 %b)
|
|
%max = call i128 @llvm.umax.i128(i128 %a, i128 %b)
|
|
%sub = sub i128 %max, %min
|
|
ret i128 %sub
|
|
}
|
|
|
|
;
|
|
; select(icmp(a,b),sub(a,b),sub(b,a)) -> abdu(a,b)
|
|
;
|
|
|
|
define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
|
|
; RV32I-LABEL: abd_cmp_i8:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: zext.b a1, a1
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_cmp_i8:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: zext.b a1, a1
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_cmp_i8:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.b a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%cmp = icmp ugt i8 %a, %b
|
|
%ab = sub i8 %a, %b
|
|
%ba = sub i8 %b, %a
|
|
%sel = select i1 %cmp, i8 %ab, i8 %ba
|
|
ret i8 %sel
|
|
}
|
|
|
|
define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_cmp_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a2, a2, -1
|
|
; RV32I-NEXT: and a1, a1, a2
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_cmp_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a2, a2, -1
|
|
; RV64I-NEXT: and a1, a1, a2
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_cmp_i16:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.h a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%cmp = icmp uge i16 %a, %b
|
|
%ab = sub i16 %a, %b
|
|
%ba = sub i16 %b, %a
|
|
%sel = select i1 %cmp, i16 %ab, i16 %ba
|
|
ret i16 %sel
|
|
}
|
|
|
|
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_cmp_i32:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: bltu a1, a0, .LBB20_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB20_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_cmp_i32:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_cmp_i32:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_cmp_i32:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp ult i32 %a, %b
|
|
%ab = sub i32 %a, %b
|
|
%ba = sub i32 %b, %a
|
|
%sel = select i1 %cmp, i32 %ba, i32 %ab
|
|
ret i32 %sel
|
|
}
|
|
|
|
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
|
|
; RV32I-LABEL: abd_cmp_i64:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sltu a4, a0, a2
|
|
; RV32I-NEXT: sub a3, a1, a3
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sub a2, a0, a2
|
|
; RV32I-NEXT: beq a3, a1, .LBB21_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu a0, a1, a3
|
|
; RV32I-NEXT: j .LBB21_3
|
|
; RV32I-NEXT: .LBB21_2:
|
|
; RV32I-NEXT: sltu a0, a0, a2
|
|
; RV32I-NEXT: .LBB21_3:
|
|
; RV32I-NEXT: neg a1, a0
|
|
; RV32I-NEXT: xor a2, a2, a1
|
|
; RV32I-NEXT: xor a3, a3, a1
|
|
; RV32I-NEXT: sltu a1, a2, a1
|
|
; RV32I-NEXT: add a3, a3, a0
|
|
; RV32I-NEXT: sub a1, a3, a1
|
|
; RV32I-NEXT: add a0, a2, a0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_cmp_i64:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: bltu a1, a0, .LBB21_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sub a0, a1, a0
|
|
; RV64I-NEXT: ret
|
|
; RV64I-NEXT: .LBB21_2:
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_cmp_i64:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: sltu a4, a0, a2
|
|
; RV32ZBB-NEXT: sub a3, a1, a3
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sub a2, a0, a2
|
|
; RV32ZBB-NEXT: beq a3, a1, .LBB21_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu a0, a1, a3
|
|
; RV32ZBB-NEXT: j .LBB21_3
|
|
; RV32ZBB-NEXT: .LBB21_2:
|
|
; RV32ZBB-NEXT: sltu a0, a0, a2
|
|
; RV32ZBB-NEXT: .LBB21_3:
|
|
; RV32ZBB-NEXT: neg a1, a0
|
|
; RV32ZBB-NEXT: xor a2, a2, a1
|
|
; RV32ZBB-NEXT: xor a3, a3, a1
|
|
; RV32ZBB-NEXT: sltu a1, a2, a1
|
|
; RV32ZBB-NEXT: add a3, a3, a0
|
|
; RV32ZBB-NEXT: sub a1, a3, a1
|
|
; RV32ZBB-NEXT: add a0, a2, a0
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_cmp_i64:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp uge i64 %a, %b
|
|
%ab = sub i64 %a, %b
|
|
%ba = sub i64 %b, %a
|
|
%sel = select i1 %cmp, i64 %ab, i64 %ba
|
|
ret i64 %sel
|
|
}
|
|
|
|
define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
|
|
; RV32I-LABEL: abd_cmp_i128:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a3, 0(a2)
|
|
; RV32I-NEXT: lw a5, 4(a2)
|
|
; RV32I-NEXT: lw a6, 8(a2)
|
|
; RV32I-NEXT: lw a7, 12(a2)
|
|
; RV32I-NEXT: lw a2, 8(a1)
|
|
; RV32I-NEXT: lw a4, 12(a1)
|
|
; RV32I-NEXT: lw t0, 0(a1)
|
|
; RV32I-NEXT: lw a1, 4(a1)
|
|
; RV32I-NEXT: sltu t1, a2, a6
|
|
; RV32I-NEXT: sub a7, a4, a7
|
|
; RV32I-NEXT: sltu t2, t0, a3
|
|
; RV32I-NEXT: sub a7, a7, t1
|
|
; RV32I-NEXT: mv t1, t2
|
|
; RV32I-NEXT: beq a1, a5, .LBB22_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu t1, a1, a5
|
|
; RV32I-NEXT: .LBB22_2:
|
|
; RV32I-NEXT: sub t3, a2, a6
|
|
; RV32I-NEXT: sltu a6, t3, t1
|
|
; RV32I-NEXT: sub a6, a7, a6
|
|
; RV32I-NEXT: sub a7, t3, t1
|
|
; RV32I-NEXT: beq a6, a4, .LBB22_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: sltu t1, a4, a6
|
|
; RV32I-NEXT: j .LBB22_5
|
|
; RV32I-NEXT: .LBB22_4:
|
|
; RV32I-NEXT: sltu t1, a2, a7
|
|
; RV32I-NEXT: .LBB22_5:
|
|
; RV32I-NEXT: sub a5, a1, a5
|
|
; RV32I-NEXT: sub a5, a5, t2
|
|
; RV32I-NEXT: sub a3, t0, a3
|
|
; RV32I-NEXT: beq a5, a1, .LBB22_7
|
|
; RV32I-NEXT: # %bb.6:
|
|
; RV32I-NEXT: sltu a1, a1, a5
|
|
; RV32I-NEXT: j .LBB22_8
|
|
; RV32I-NEXT: .LBB22_7:
|
|
; RV32I-NEXT: sltu a1, t0, a3
|
|
; RV32I-NEXT: .LBB22_8:
|
|
; RV32I-NEXT: xor a4, a6, a4
|
|
; RV32I-NEXT: xor a2, a7, a2
|
|
; RV32I-NEXT: or a2, a2, a4
|
|
; RV32I-NEXT: beqz a2, .LBB22_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a1, t1
|
|
; RV32I-NEXT: .LBB22_10:
|
|
; RV32I-NEXT: neg t0, a1
|
|
; RV32I-NEXT: xor a2, a7, t0
|
|
; RV32I-NEXT: xor a6, a6, t0
|
|
; RV32I-NEXT: xor a4, a3, t0
|
|
; RV32I-NEXT: sltu a3, a2, t0
|
|
; RV32I-NEXT: add a7, a6, a1
|
|
; RV32I-NEXT: sltu a6, a4, t0
|
|
; RV32I-NEXT: sub a3, a7, a3
|
|
; RV32I-NEXT: xor t1, a5, t0
|
|
; RV32I-NEXT: mv a7, a6
|
|
; RV32I-NEXT: beqz a5, .LBB22_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: sltu a7, t1, t0
|
|
; RV32I-NEXT: .LBB22_12:
|
|
; RV32I-NEXT: add a2, a2, a1
|
|
; RV32I-NEXT: add t1, t1, a1
|
|
; RV32I-NEXT: add a1, a4, a1
|
|
; RV32I-NEXT: sltu a4, a2, a7
|
|
; RV32I-NEXT: sub a2, a2, a7
|
|
; RV32I-NEXT: sub a5, t1, a6
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sw a1, 0(a0)
|
|
; RV32I-NEXT: sw a5, 4(a0)
|
|
; RV32I-NEXT: sw a2, 8(a0)
|
|
; RV32I-NEXT: sw a3, 12(a0)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_cmp_i128:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sltu a4, a0, a2
|
|
; RV64I-NEXT: sub a3, a1, a3
|
|
; RV64I-NEXT: sub a3, a3, a4
|
|
; RV64I-NEXT: sub a2, a0, a2
|
|
; RV64I-NEXT: beq a3, a1, .LBB22_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sltu a0, a1, a3
|
|
; RV64I-NEXT: j .LBB22_3
|
|
; RV64I-NEXT: .LBB22_2:
|
|
; RV64I-NEXT: sltu a0, a0, a2
|
|
; RV64I-NEXT: .LBB22_3:
|
|
; RV64I-NEXT: neg a1, a0
|
|
; RV64I-NEXT: xor a2, a2, a1
|
|
; RV64I-NEXT: xor a3, a3, a1
|
|
; RV64I-NEXT: sltu a1, a2, a1
|
|
; RV64I-NEXT: add a3, a3, a0
|
|
; RV64I-NEXT: sub a1, a3, a1
|
|
; RV64I-NEXT: add a0, a2, a0
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_cmp_i128:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: lw a3, 0(a2)
|
|
; RV32ZBB-NEXT: lw a5, 4(a2)
|
|
; RV32ZBB-NEXT: lw a6, 8(a2)
|
|
; RV32ZBB-NEXT: lw a7, 12(a2)
|
|
; RV32ZBB-NEXT: lw a2, 8(a1)
|
|
; RV32ZBB-NEXT: lw a4, 12(a1)
|
|
; RV32ZBB-NEXT: lw t0, 0(a1)
|
|
; RV32ZBB-NEXT: lw a1, 4(a1)
|
|
; RV32ZBB-NEXT: sltu t1, a2, a6
|
|
; RV32ZBB-NEXT: sub a7, a4, a7
|
|
; RV32ZBB-NEXT: sltu t2, t0, a3
|
|
; RV32ZBB-NEXT: sub a7, a7, t1
|
|
; RV32ZBB-NEXT: mv t1, t2
|
|
; RV32ZBB-NEXT: beq a1, a5, .LBB22_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu t1, a1, a5
|
|
; RV32ZBB-NEXT: .LBB22_2:
|
|
; RV32ZBB-NEXT: sub t3, a2, a6
|
|
; RV32ZBB-NEXT: sltu a6, t3, t1
|
|
; RV32ZBB-NEXT: sub a6, a7, a6
|
|
; RV32ZBB-NEXT: sub a7, t3, t1
|
|
; RV32ZBB-NEXT: beq a6, a4, .LBB22_4
|
|
; RV32ZBB-NEXT: # %bb.3:
|
|
; RV32ZBB-NEXT: sltu t1, a4, a6
|
|
; RV32ZBB-NEXT: j .LBB22_5
|
|
; RV32ZBB-NEXT: .LBB22_4:
|
|
; RV32ZBB-NEXT: sltu t1, a2, a7
|
|
; RV32ZBB-NEXT: .LBB22_5:
|
|
; RV32ZBB-NEXT: sub a5, a1, a5
|
|
; RV32ZBB-NEXT: sub a5, a5, t2
|
|
; RV32ZBB-NEXT: sub a3, t0, a3
|
|
; RV32ZBB-NEXT: beq a5, a1, .LBB22_7
|
|
; RV32ZBB-NEXT: # %bb.6:
|
|
; RV32ZBB-NEXT: sltu a1, a1, a5
|
|
; RV32ZBB-NEXT: j .LBB22_8
|
|
; RV32ZBB-NEXT: .LBB22_7:
|
|
; RV32ZBB-NEXT: sltu a1, t0, a3
|
|
; RV32ZBB-NEXT: .LBB22_8:
|
|
; RV32ZBB-NEXT: xor a4, a6, a4
|
|
; RV32ZBB-NEXT: xor a2, a7, a2
|
|
; RV32ZBB-NEXT: or a2, a2, a4
|
|
; RV32ZBB-NEXT: beqz a2, .LBB22_10
|
|
; RV32ZBB-NEXT: # %bb.9:
|
|
; RV32ZBB-NEXT: mv a1, t1
|
|
; RV32ZBB-NEXT: .LBB22_10:
|
|
; RV32ZBB-NEXT: neg t0, a1
|
|
; RV32ZBB-NEXT: xor a2, a7, t0
|
|
; RV32ZBB-NEXT: xor a6, a6, t0
|
|
; RV32ZBB-NEXT: xor a4, a3, t0
|
|
; RV32ZBB-NEXT: sltu a3, a2, t0
|
|
; RV32ZBB-NEXT: add a7, a6, a1
|
|
; RV32ZBB-NEXT: sltu a6, a4, t0
|
|
; RV32ZBB-NEXT: sub a3, a7, a3
|
|
; RV32ZBB-NEXT: xor t1, a5, t0
|
|
; RV32ZBB-NEXT: mv a7, a6
|
|
; RV32ZBB-NEXT: beqz a5, .LBB22_12
|
|
; RV32ZBB-NEXT: # %bb.11:
|
|
; RV32ZBB-NEXT: sltu a7, t1, t0
|
|
; RV32ZBB-NEXT: .LBB22_12:
|
|
; RV32ZBB-NEXT: add a2, a2, a1
|
|
; RV32ZBB-NEXT: add t1, t1, a1
|
|
; RV32ZBB-NEXT: add a1, a4, a1
|
|
; RV32ZBB-NEXT: sltu a4, a2, a7
|
|
; RV32ZBB-NEXT: sub a2, a2, a7
|
|
; RV32ZBB-NEXT: sub a5, t1, a6
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sw a1, 0(a0)
|
|
; RV32ZBB-NEXT: sw a5, 4(a0)
|
|
; RV32ZBB-NEXT: sw a2, 8(a0)
|
|
; RV32ZBB-NEXT: sw a3, 12(a0)
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_cmp_i128:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: sltu a4, a0, a2
|
|
; RV64ZBB-NEXT: sub a3, a1, a3
|
|
; RV64ZBB-NEXT: sub a3, a3, a4
|
|
; RV64ZBB-NEXT: sub a2, a0, a2
|
|
; RV64ZBB-NEXT: beq a3, a1, .LBB22_2
|
|
; RV64ZBB-NEXT: # %bb.1:
|
|
; RV64ZBB-NEXT: sltu a0, a1, a3
|
|
; RV64ZBB-NEXT: j .LBB22_3
|
|
; RV64ZBB-NEXT: .LBB22_2:
|
|
; RV64ZBB-NEXT: sltu a0, a0, a2
|
|
; RV64ZBB-NEXT: .LBB22_3:
|
|
; RV64ZBB-NEXT: neg a1, a0
|
|
; RV64ZBB-NEXT: xor a2, a2, a1
|
|
; RV64ZBB-NEXT: xor a3, a3, a1
|
|
; RV64ZBB-NEXT: sltu a1, a2, a1
|
|
; RV64ZBB-NEXT: add a3, a3, a0
|
|
; RV64ZBB-NEXT: sub a1, a3, a1
|
|
; RV64ZBB-NEXT: add a0, a2, a0
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp uge i128 %a, %b
|
|
%ab = sub i128 %a, %b
|
|
%ba = sub i128 %b, %a
|
|
%sel = select i1 %cmp, i128 %ab, i128 %ba
|
|
ret i128 %sel
|
|
}
|
|
|
|
;
|
|
; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abdu(a,b)
|
|
;
|
|
|
|
define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
|
|
; RV32I-LABEL: abd_select_i8:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: zext.b a1, a1
|
|
; RV32I-NEXT: zext.b a0, a0
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_select_i8:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: zext.b a1, a1
|
|
; RV64I-NEXT: zext.b a0, a0
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_select_i8:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.b a1, a1
|
|
; ZBB-NEXT: zext.b a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%cmp = icmp ult i8 %a, %b
|
|
%ab = select i1 %cmp, i8 %a, i8 %b
|
|
%ba = select i1 %cmp, i8 %b, i8 %a
|
|
%sub = sub i8 %ba, %ab
|
|
ret i8 %sub
|
|
}
|
|
|
|
define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
|
|
; RV32I-LABEL: abd_select_i16:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a2, a2, -1
|
|
; RV32I-NEXT: and a1, a1, a2
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: srai a1, a0, 31
|
|
; RV32I-NEXT: xor a0, a0, a1
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_select_i16:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a2, a2, -1
|
|
; RV64I-NEXT: and a1, a1, a2
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; ZBB-LABEL: abd_select_i16:
|
|
; ZBB: # %bb.0:
|
|
; ZBB-NEXT: zext.h a1, a1
|
|
; ZBB-NEXT: zext.h a0, a0
|
|
; ZBB-NEXT: minu a2, a0, a1
|
|
; ZBB-NEXT: maxu a0, a0, a1
|
|
; ZBB-NEXT: sub a0, a0, a2
|
|
; ZBB-NEXT: ret
|
|
%cmp = icmp ule i16 %a, %b
|
|
%ab = select i1 %cmp, i16 %a, i16 %b
|
|
%ba = select i1 %cmp, i16 %b, i16 %a
|
|
%sub = sub i16 %ba, %ab
|
|
ret i16 %sub
|
|
}
|
|
|
|
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
|
|
; RV32I-LABEL: abd_select_i32:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: bltu a1, a0, .LBB25_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sub a0, a1, a0
|
|
; RV32I-NEXT: ret
|
|
; RV32I-NEXT: .LBB25_2:
|
|
; RV32I-NEXT: sub a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_select_i32:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a1, a1, 32
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a1, a1, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: srai a1, a0, 63
|
|
; RV64I-NEXT: xor a0, a0, a1
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_select_i32:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: minu a2, a0, a1
|
|
; RV32ZBB-NEXT: maxu a0, a0, a1
|
|
; RV32ZBB-NEXT: sub a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_select_i32:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: slli a1, a1, 32
|
|
; RV64ZBB-NEXT: slli a0, a0, 32
|
|
; RV64ZBB-NEXT: srli a1, a1, 32
|
|
; RV64ZBB-NEXT: srli a0, a0, 32
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp ugt i32 %a, %b
|
|
%ab = select i1 %cmp, i32 %a, i32 %b
|
|
%ba = select i1 %cmp, i32 %b, i32 %a
|
|
%sub = sub i32 %ab, %ba
|
|
ret i32 %sub
|
|
}
|
|
|
|
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
|
|
; RV32I-LABEL: abd_select_i64:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sltu a4, a0, a2
|
|
; RV32I-NEXT: sub a3, a1, a3
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sub a2, a0, a2
|
|
; RV32I-NEXT: beq a3, a1, .LBB26_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu a0, a1, a3
|
|
; RV32I-NEXT: j .LBB26_3
|
|
; RV32I-NEXT: .LBB26_2:
|
|
; RV32I-NEXT: sltu a0, a0, a2
|
|
; RV32I-NEXT: .LBB26_3:
|
|
; RV32I-NEXT: neg a1, a0
|
|
; RV32I-NEXT: xor a2, a2, a1
|
|
; RV32I-NEXT: xor a3, a3, a1
|
|
; RV32I-NEXT: sltu a1, a2, a1
|
|
; RV32I-NEXT: add a3, a3, a0
|
|
; RV32I-NEXT: sub a1, a3, a1
|
|
; RV32I-NEXT: add a0, a2, a0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_select_i64:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: bltu a1, a0, .LBB26_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sub a0, a1, a0
|
|
; RV64I-NEXT: ret
|
|
; RV64I-NEXT: .LBB26_2:
|
|
; RV64I-NEXT: sub a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_select_i64:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: sltu a4, a0, a2
|
|
; RV32ZBB-NEXT: sub a3, a1, a3
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sub a2, a0, a2
|
|
; RV32ZBB-NEXT: beq a3, a1, .LBB26_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu a0, a1, a3
|
|
; RV32ZBB-NEXT: j .LBB26_3
|
|
; RV32ZBB-NEXT: .LBB26_2:
|
|
; RV32ZBB-NEXT: sltu a0, a0, a2
|
|
; RV32ZBB-NEXT: .LBB26_3:
|
|
; RV32ZBB-NEXT: neg a1, a0
|
|
; RV32ZBB-NEXT: xor a2, a2, a1
|
|
; RV32ZBB-NEXT: xor a3, a3, a1
|
|
; RV32ZBB-NEXT: sltu a1, a2, a1
|
|
; RV32ZBB-NEXT: add a3, a3, a0
|
|
; RV32ZBB-NEXT: sub a1, a3, a1
|
|
; RV32ZBB-NEXT: add a0, a2, a0
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_select_i64:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: minu a2, a0, a1
|
|
; RV64ZBB-NEXT: maxu a0, a0, a1
|
|
; RV64ZBB-NEXT: sub a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp uge i64 %a, %b
|
|
%ab = select i1 %cmp, i64 %a, i64 %b
|
|
%ba = select i1 %cmp, i64 %b, i64 %a
|
|
%sub = sub i64 %ab, %ba
|
|
ret i64 %sub
|
|
}
|
|
|
|
define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
|
|
; RV32I-LABEL: abd_select_i128:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a3, 0(a2)
|
|
; RV32I-NEXT: lw a5, 4(a2)
|
|
; RV32I-NEXT: lw a6, 8(a2)
|
|
; RV32I-NEXT: lw a7, 12(a2)
|
|
; RV32I-NEXT: lw a2, 8(a1)
|
|
; RV32I-NEXT: lw a4, 12(a1)
|
|
; RV32I-NEXT: lw t0, 0(a1)
|
|
; RV32I-NEXT: lw a1, 4(a1)
|
|
; RV32I-NEXT: sltu t1, a2, a6
|
|
; RV32I-NEXT: sub a7, a4, a7
|
|
; RV32I-NEXT: sltu t2, t0, a3
|
|
; RV32I-NEXT: sub a7, a7, t1
|
|
; RV32I-NEXT: mv t1, t2
|
|
; RV32I-NEXT: beq a1, a5, .LBB27_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: sltu t1, a1, a5
|
|
; RV32I-NEXT: .LBB27_2:
|
|
; RV32I-NEXT: sub t3, a2, a6
|
|
; RV32I-NEXT: sltu a6, t3, t1
|
|
; RV32I-NEXT: sub a6, a7, a6
|
|
; RV32I-NEXT: sub a7, t3, t1
|
|
; RV32I-NEXT: beq a6, a4, .LBB27_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: sltu t1, a4, a6
|
|
; RV32I-NEXT: j .LBB27_5
|
|
; RV32I-NEXT: .LBB27_4:
|
|
; RV32I-NEXT: sltu t1, a2, a7
|
|
; RV32I-NEXT: .LBB27_5:
|
|
; RV32I-NEXT: sub a5, a1, a5
|
|
; RV32I-NEXT: sub a5, a5, t2
|
|
; RV32I-NEXT: sub a3, t0, a3
|
|
; RV32I-NEXT: beq a5, a1, .LBB27_7
|
|
; RV32I-NEXT: # %bb.6:
|
|
; RV32I-NEXT: sltu a1, a1, a5
|
|
; RV32I-NEXT: j .LBB27_8
|
|
; RV32I-NEXT: .LBB27_7:
|
|
; RV32I-NEXT: sltu a1, t0, a3
|
|
; RV32I-NEXT: .LBB27_8:
|
|
; RV32I-NEXT: xor a4, a6, a4
|
|
; RV32I-NEXT: xor a2, a7, a2
|
|
; RV32I-NEXT: or a2, a2, a4
|
|
; RV32I-NEXT: beqz a2, .LBB27_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a1, t1
|
|
; RV32I-NEXT: .LBB27_10:
|
|
; RV32I-NEXT: neg t0, a1
|
|
; RV32I-NEXT: xor a2, a7, t0
|
|
; RV32I-NEXT: xor a6, a6, t0
|
|
; RV32I-NEXT: xor a4, a3, t0
|
|
; RV32I-NEXT: sltu a3, a2, t0
|
|
; RV32I-NEXT: add a7, a6, a1
|
|
; RV32I-NEXT: sltu a6, a4, t0
|
|
; RV32I-NEXT: sub a3, a7, a3
|
|
; RV32I-NEXT: xor t1, a5, t0
|
|
; RV32I-NEXT: mv a7, a6
|
|
; RV32I-NEXT: beqz a5, .LBB27_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: sltu a7, t1, t0
|
|
; RV32I-NEXT: .LBB27_12:
|
|
; RV32I-NEXT: add a2, a2, a1
|
|
; RV32I-NEXT: add t1, t1, a1
|
|
; RV32I-NEXT: add a1, a4, a1
|
|
; RV32I-NEXT: sltu a4, a2, a7
|
|
; RV32I-NEXT: sub a2, a2, a7
|
|
; RV32I-NEXT: sub a5, t1, a6
|
|
; RV32I-NEXT: sub a3, a3, a4
|
|
; RV32I-NEXT: sw a1, 0(a0)
|
|
; RV32I-NEXT: sw a5, 4(a0)
|
|
; RV32I-NEXT: sw a2, 8(a0)
|
|
; RV32I-NEXT: sw a3, 12(a0)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: abd_select_i128:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sltu a4, a0, a2
|
|
; RV64I-NEXT: sub a3, a1, a3
|
|
; RV64I-NEXT: sub a3, a3, a4
|
|
; RV64I-NEXT: sub a2, a0, a2
|
|
; RV64I-NEXT: beq a3, a1, .LBB27_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: sltu a0, a1, a3
|
|
; RV64I-NEXT: j .LBB27_3
|
|
; RV64I-NEXT: .LBB27_2:
|
|
; RV64I-NEXT: sltu a0, a0, a2
|
|
; RV64I-NEXT: .LBB27_3:
|
|
; RV64I-NEXT: neg a1, a0
|
|
; RV64I-NEXT: xor a2, a2, a1
|
|
; RV64I-NEXT: xor a3, a3, a1
|
|
; RV64I-NEXT: sltu a1, a2, a1
|
|
; RV64I-NEXT: add a3, a3, a0
|
|
; RV64I-NEXT: sub a1, a3, a1
|
|
; RV64I-NEXT: add a0, a2, a0
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: abd_select_i128:
|
|
; RV32ZBB: # %bb.0:
|
|
; RV32ZBB-NEXT: lw a3, 0(a2)
|
|
; RV32ZBB-NEXT: lw a5, 4(a2)
|
|
; RV32ZBB-NEXT: lw a6, 8(a2)
|
|
; RV32ZBB-NEXT: lw a7, 12(a2)
|
|
; RV32ZBB-NEXT: lw a2, 8(a1)
|
|
; RV32ZBB-NEXT: lw a4, 12(a1)
|
|
; RV32ZBB-NEXT: lw t0, 0(a1)
|
|
; RV32ZBB-NEXT: lw a1, 4(a1)
|
|
; RV32ZBB-NEXT: sltu t1, a2, a6
|
|
; RV32ZBB-NEXT: sub a7, a4, a7
|
|
; RV32ZBB-NEXT: sltu t2, t0, a3
|
|
; RV32ZBB-NEXT: sub a7, a7, t1
|
|
; RV32ZBB-NEXT: mv t1, t2
|
|
; RV32ZBB-NEXT: beq a1, a5, .LBB27_2
|
|
; RV32ZBB-NEXT: # %bb.1:
|
|
; RV32ZBB-NEXT: sltu t1, a1, a5
|
|
; RV32ZBB-NEXT: .LBB27_2:
|
|
; RV32ZBB-NEXT: sub t3, a2, a6
|
|
; RV32ZBB-NEXT: sltu a6, t3, t1
|
|
; RV32ZBB-NEXT: sub a6, a7, a6
|
|
; RV32ZBB-NEXT: sub a7, t3, t1
|
|
; RV32ZBB-NEXT: beq a6, a4, .LBB27_4
|
|
; RV32ZBB-NEXT: # %bb.3:
|
|
; RV32ZBB-NEXT: sltu t1, a4, a6
|
|
; RV32ZBB-NEXT: j .LBB27_5
|
|
; RV32ZBB-NEXT: .LBB27_4:
|
|
; RV32ZBB-NEXT: sltu t1, a2, a7
|
|
; RV32ZBB-NEXT: .LBB27_5:
|
|
; RV32ZBB-NEXT: sub a5, a1, a5
|
|
; RV32ZBB-NEXT: sub a5, a5, t2
|
|
; RV32ZBB-NEXT: sub a3, t0, a3
|
|
; RV32ZBB-NEXT: beq a5, a1, .LBB27_7
|
|
; RV32ZBB-NEXT: # %bb.6:
|
|
; RV32ZBB-NEXT: sltu a1, a1, a5
|
|
; RV32ZBB-NEXT: j .LBB27_8
|
|
; RV32ZBB-NEXT: .LBB27_7:
|
|
; RV32ZBB-NEXT: sltu a1, t0, a3
|
|
; RV32ZBB-NEXT: .LBB27_8:
|
|
; RV32ZBB-NEXT: xor a4, a6, a4
|
|
; RV32ZBB-NEXT: xor a2, a7, a2
|
|
; RV32ZBB-NEXT: or a2, a2, a4
|
|
; RV32ZBB-NEXT: beqz a2, .LBB27_10
|
|
; RV32ZBB-NEXT: # %bb.9:
|
|
; RV32ZBB-NEXT: mv a1, t1
|
|
; RV32ZBB-NEXT: .LBB27_10:
|
|
; RV32ZBB-NEXT: neg t0, a1
|
|
; RV32ZBB-NEXT: xor a2, a7, t0
|
|
; RV32ZBB-NEXT: xor a6, a6, t0
|
|
; RV32ZBB-NEXT: xor a4, a3, t0
|
|
; RV32ZBB-NEXT: sltu a3, a2, t0
|
|
; RV32ZBB-NEXT: add a7, a6, a1
|
|
; RV32ZBB-NEXT: sltu a6, a4, t0
|
|
; RV32ZBB-NEXT: sub a3, a7, a3
|
|
; RV32ZBB-NEXT: xor t1, a5, t0
|
|
; RV32ZBB-NEXT: mv a7, a6
|
|
; RV32ZBB-NEXT: beqz a5, .LBB27_12
|
|
; RV32ZBB-NEXT: # %bb.11:
|
|
; RV32ZBB-NEXT: sltu a7, t1, t0
|
|
; RV32ZBB-NEXT: .LBB27_12:
|
|
; RV32ZBB-NEXT: add a2, a2, a1
|
|
; RV32ZBB-NEXT: add t1, t1, a1
|
|
; RV32ZBB-NEXT: add a1, a4, a1
|
|
; RV32ZBB-NEXT: sltu a4, a2, a7
|
|
; RV32ZBB-NEXT: sub a2, a2, a7
|
|
; RV32ZBB-NEXT: sub a5, t1, a6
|
|
; RV32ZBB-NEXT: sub a3, a3, a4
|
|
; RV32ZBB-NEXT: sw a1, 0(a0)
|
|
; RV32ZBB-NEXT: sw a5, 4(a0)
|
|
; RV32ZBB-NEXT: sw a2, 8(a0)
|
|
; RV32ZBB-NEXT: sw a3, 12(a0)
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: abd_select_i128:
|
|
; RV64ZBB: # %bb.0:
|
|
; RV64ZBB-NEXT: sltu a4, a0, a2
|
|
; RV64ZBB-NEXT: sub a3, a1, a3
|
|
; RV64ZBB-NEXT: sub a3, a3, a4
|
|
; RV64ZBB-NEXT: sub a2, a0, a2
|
|
; RV64ZBB-NEXT: beq a3, a1, .LBB27_2
|
|
; RV64ZBB-NEXT: # %bb.1:
|
|
; RV64ZBB-NEXT: sltu a0, a1, a3
|
|
; RV64ZBB-NEXT: j .LBB27_3
|
|
; RV64ZBB-NEXT: .LBB27_2:
|
|
; RV64ZBB-NEXT: sltu a0, a0, a2
|
|
; RV64ZBB-NEXT: .LBB27_3:
|
|
; RV64ZBB-NEXT: neg a1, a0
|
|
; RV64ZBB-NEXT: xor a2, a2, a1
|
|
; RV64ZBB-NEXT: xor a3, a3, a1
|
|
; RV64ZBB-NEXT: sltu a1, a2, a1
|
|
; RV64ZBB-NEXT: add a3, a3, a0
|
|
; RV64ZBB-NEXT: sub a1, a3, a1
|
|
; RV64ZBB-NEXT: add a0, a2, a0
|
|
; RV64ZBB-NEXT: ret
|
|
%cmp = icmp ult i128 %a, %b
|
|
%ab = select i1 %cmp, i128 %a, i128 %b
|
|
%ba = select i1 %cmp, i128 %b, i128 %a
|
|
%sub = sub i128 %ba, %ab
|
|
ret i128 %sub
|
|
}
|
|
|
|
declare i8 @llvm.abs.i8(i8, i1)
|
|
declare i16 @llvm.abs.i16(i16, i1)
|
|
declare i32 @llvm.abs.i32(i32, i1)
|
|
declare i64 @llvm.abs.i64(i64, i1)
|
|
declare i128 @llvm.abs.i128(i128, i1)
|
|
|
|
declare i8 @llvm.umax.i8(i8, i8)
|
|
declare i16 @llvm.umax.i16(i16, i16)
|
|
declare i32 @llvm.umax.i32(i32, i32)
|
|
declare i64 @llvm.umax.i64(i64, i64)
|
|
|
|
declare i8 @llvm.umin.i8(i8, i8)
|
|
declare i16 @llvm.umin.i16(i16, i16)
|
|
declare i32 @llvm.umin.i32(i32, i32)
|
|
declare i64 @llvm.umin.i64(i64, i64)
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; CHECK: {{.*}}
|
|
; NOZBB: {{.*}}
|