Alex Bradbury 3d2650bdeb
[RISCV] Use addi rather than addiw for immediates materialised by lui+addi(w) pairs when possible (#141663)
The logic in RISCVMatInt would previously produce lui+addiw on RV64
whenever a 32-bit integer must be materialised and the Hi20 and Lo12
parts are non-zero. However, sometimes addi can be used equivalently
(whenever the sign extension behaviour of addiw would be a no-op). This
patch moves to using addiw only when necessary. Although there is
absolutely no advantage in terms of compressibility or performance, this
has the following advantages:
* It's more consistent with logic used elsewhere in the backend. For
instance, RISCVOptWInstrs will try to convert addiw to addi on the basis
it reduces test diffs vs RV32.
* This matches the lowering GCC does in its codegen path. Unlike LLVM,
GCC seems to have different expansion logic for the assembler vs
codegen. For codegen it will use lui+addi if possible, but expanding
`li` in the assembler will always produces lui+addiw as LLVM did prior
to this commit. As someone who has been looking at a lot of gcc vs clang
diffs lately, reducing unnecessary divergence is of at least some value.
* As the diff for fold-mem-offset.ll shows, we can fold memory offsets
in more cases when addi is used. Memory offset folding could be taught
to recognise when the addiw could be replaced with an addi, but that
seems unnecessary when we can simply change the logic in RISCVMatInt.

As pointed out by @topperc during review, making this change without
modifying RISCVOptWInstrs risks introducing some cases where we fail to
remove a sext.w that we removed before. I've incorporated a patch based
on a suggestion from Craig that avoids it, and also adds appropriate
RISCVOptWInstrs test cases.

The initial patch description noted that the main motivation was to
avoid unnecessary differences both for RV32/RV64 and when comparing GCC,
but noted that very occasionally we see a benefit from memory offset
folding kicking in when it didn't before. Looking at the dynamic
instruction count difference for SPEC benchmarks targeting rva22u64 and
it shows we actually get a meaningful
~4.3% reduction in dynamic icount for 519.lbm_r. Looking at the data
more closely, the codegen difference is in `LBM_performStreamCollideTRT`
which as a function accounts for ~98% for dynamically executed
instructions and the codegen diffs appear to be a knock-on effect of the
address merging reducing register pressure right from function entry
(for instance, we get a big reduction in dynamically executed loads in
that function).

Below is the icount data (rva22u64 -O3, no LTO):
```
Benchmark                Baseline            This PR   Diff (%)
============================================================
500.perlbench_r         174116601991    174115795810     -0.00%
502.gcc_r               218903280858    218903215788     -0.00%
505.mcf_r               131208029185    131207692803     -0.00%
508.namd_r              217497594322    217497594297     -0.00%
510.parest_r            289314486153    289313577652     -0.00%
511.povray_r             30640531048     30640765701      0.00%
519.lbm_r                95897914862     91712688050     -4.36%
520.omnetpp_r           134641549722    134867015683      0.17%
523.xalancbmk_r         281462762992    281432092673     -0.01%
525.x264_r              379776121941    379535558210     -0.06%
526.blender_r           659736022025    659738387343      0.00%
531.deepsjeng_r         349122867552    349122867481     -0.00%
538.imagick_r           238558760552    238558753269     -0.00%
541.leela_r             406578560612    406385135260     -0.05%
544.nab_r               400997131674    400996765827     -0.00%
557.xz_r                130079522194    129945515709     -0.10%

```

The instcounting setup I use doesn't have good support for drilling down
into functions from outside the linked executable (e.g. libc). The
difference in omnetpp all seems to come from there, and does not reflect
any degradation in codegen quality.

I can confirm with the current version of the PR there is no change in
the number of static sext.w across all the SPEC 2017 benchmarks
(rva22u64 O3)

Co-authored-by: Craig Topper <craig.topper@sifive.com>
2025-06-02 22:24:50 +01:00

2078 lines
54 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub
; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64I
; RUN: llc -mtriple=riscv64 -mattr=+m,+xtheadba -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=CHECK,RV64XTHEADBA
define signext i16 @th_addsl_1(i64 %0, ptr %1) {
; RV64I-LABEL: th_addsl_1:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: th_addsl_1:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = getelementptr inbounds i16, ptr %1, i64 %0
%4 = load i16, ptr %3
ret i16 %4
}
define signext i32 @th_addsl_2(i64 %0, ptr %1) {
; RV64I-LABEL: th_addsl_2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: th_addsl_2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = getelementptr inbounds i32, ptr %1, i64 %0
%4 = load i32, ptr %3
ret i32 %4
}
define i64 @th_addsl_3(i64 %0, ptr %1) {
; RV64I-LABEL: th_addsl_3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: th_addsl_3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = getelementptr inbounds i64, ptr %1, i64 %0
%4 = load i64, ptr %3
ret i64 %4
}
; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as th.addsl which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
; sext_inreg to become sraiw. This leaves the sext_inreg only used by the shl.
; If the shl is selected as sllw, we don't need the sext_inreg.
define i64 @th_addsl_2_extra_sext(i32 %x, i32 %y, i32 %z) {
; RV64I-LABEL: th_addsl_2_extra_sext:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: sllw a1, a2, a0
; RV64I-NEXT: sraiw a0, a0, 2
; RV64I-NEXT: mul a0, a1, a0
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: th_addsl_2_extra_sext:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: sllw a1, a2, a0
; RV64XTHEADBA-NEXT: sraiw a0, a0, 2
; RV64XTHEADBA-NEXT: mul a0, a1, a0
; RV64XTHEADBA-NEXT: ret
%a = shl i32 %x, 2
%b = add i32 %a, %y
%c = shl i32 %z, %b
%d = ashr i32 %b, 2
%e = sext i32 %c to i64
%f = sext i32 %d to i64
%g = mul i64 %e, %f
ret i64 %g
}
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul6:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 6
%d = add i64 %c, %b
ret i64 %d
}
define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64I-LABEL: disjointormul6:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: disjointormul6:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 6
%d = or disjoint i64 %c, %b
ret i64 %d
}
define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul10:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 10
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 2
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul12:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 12
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul18(i64 %a, i64 %b) {
; RV64I-LABEL: addmul18:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 1
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul18:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 18
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul20(i64 %a, i64 %b) {
; RV64I-LABEL: addmul20:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 2
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul20:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 20
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul22(i64 %a, i64 %b) {
; CHECK-LABEL: addmul22:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 22
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = mul i64 %a, 22
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 3
; RV64I-NEXT: slli a0, a0, 5
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul24:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 24
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul36(i64 %a, i64 %b) {
; RV64I-LABEL: addmul36:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 2
; RV64I-NEXT: slli a0, a0, 5
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul36:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 36
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul40(i64 %a, i64 %b) {
; RV64I-LABEL: addmul40:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 3
; RV64I-NEXT: slli a0, a0, 5
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul40:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 40
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul72(i64 %a, i64 %b) {
; RV64I-LABEL: addmul72:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a2, a0, 3
; RV64I-NEXT: slli a0, a0, 6
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul72:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 72
%d = add i64 %c, %b
ret i64 %d
}
define i64 @mul50(i64 %a) {
; RV64I-LABEL: mul50:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 50
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul50:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 50
ret i64 %c
}
define i64 @addmul50(i64 %a, i64 %b) {
; RV64I-LABEL: addmul50:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 50
; RV64I-NEXT: mul a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul50:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 50
%d = add i64 %c, %b
ret i64 %d
}
define i64 @mul100(i64 %a) {
; RV64I-LABEL: mul100:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 100
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul100:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 100
ret i64 %c
}
define i64 @addmul100(i64 %a, i64 %b) {
; RV64I-LABEL: addmul100:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 100
; RV64I-NEXT: mul a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul100:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 100
%d = add i64 %c, %b
ret i64 %d
}
define i64 @mul162(i64 %a) {
; RV64I-LABEL: mul162:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 162
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul162:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 162
ret i64 %c
}
define i64 @addmul162(i64 %a, i64 %b) {
; RV64I-LABEL: addmul162:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 162
; RV64I-NEXT: mul a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul162:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 162
%d = add i64 %c, %b
ret i64 %d
}
define i64 @mul180(i64 %a) {
; RV64I-LABEL: mul180:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 180
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul180:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 180
ret i64 %c
}
define i64 @addmul180(i64 %a, i64 %b) {
; RV64I-LABEL: addmul180:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 180
; RV64I-NEXT: mul a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul180:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 180
%d = add i64 %c, %b
ret i64 %d
}
define i64 @add255mul180(i64 %a) {
; RV64I-LABEL: add255mul180:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 180
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: addi a0, a0, 255
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: add255mul180:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 2
; RV64XTHEADBA-NEXT: addi a0, a0, 255
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 180
%d = add i64 %c, 255
ret i64 %d
}
define i64 @mul200(i64 %a) {
; RV64I-LABEL: mul200:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 200
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul200:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 200
ret i64 %c
}
define i64 @addmul200(i64 %a, i64 %b) {
; RV64I-LABEL: addmul200:
; RV64I: # %bb.0:
; RV64I-NEXT: li a2, 200
; RV64I-NEXT: mul a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: addmul200:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 200
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul4096(i64 %a, i64 %b) {
; CHECK-LABEL: addmul4096:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = mul i64 %a, 4096
%d = add i64 %c, %b
ret i64 %d
}
define i64 @addmul4230(i64 %a, i64 %b) {
; CHECK-LABEL: addmul4230:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: addi a2, a2, 134
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = mul i64 %a, 4230
%d = add i64 %c, %b
ret i64 %d
}
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 5
; RV64I-NEXT: slli a0, a0, 7
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul96:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: slli a0, a0, 5
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 96
ret i64 %c
}
define i64 @mul119(i64 %a) {
; RV64I-LABEL: mul119:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 119
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul119:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: sub a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 119
ret i64 %c
}
define i64 @mul123(i64 %a) {
; RV64I-LABEL: mul123:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 123
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul123:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: sub a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 123
ret i64 %c
}
define i64 @mul125(i64 %a) {
; RV64I-LABEL: mul125:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 125
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul125:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 1
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: sub a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 125
ret i64 %c
}
define i64 @mul131(i64 %a) {
; RV64I-LABEL: mul131:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 131
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul131:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 1
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 131
ret i64 %c
}
define i64 @mul133(i64 %a) {
; RV64I-LABEL: mul133:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 133
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul133:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 133
ret i64 %c
}
define i64 @mul137(i64 %a) {
; RV64I-LABEL: mul137:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 137
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul137:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 7
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 137
ret i64 %c
}
define i64 @mul160(i64 %a) {
; RV64I-LABEL: mul160:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 5
; RV64I-NEXT: slli a0, a0, 7
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul160:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: slli a0, a0, 5
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 160
ret i64 %c
}
define i64 @mul288(i64 %a) {
; RV64I-LABEL: mul288:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 5
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul288:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: slli a0, a0, 5
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 288
ret i64 %c
}
define i64 @sh1add_imm(i64 %0) {
; CHECK-LABEL: sh1add_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, 5
; CHECK-NEXT: ret
%a = shl i64 %0, 1
%b = add i64 %a, 5
ret i64 %b
}
define i64 @sh2add_imm(i64 %0) {
; CHECK-LABEL: sh2add_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -6
; CHECK-NEXT: ret
%a = shl i64 %0, 2
%b = add i64 %a, -6
ret i64 %b
}
define i64 @sh3add_imm(i64 %0) {
; CHECK-LABEL: sh3add_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: addi a0, a0, 7
; CHECK-NEXT: ret
%a = shl i64 %0, 3
%b = add i64 %a, 7
ret i64 %b
}
define i64 @mul258(i64 %a) {
; RV64I-LABEL: mul258:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 1
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul258:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 8
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 258
ret i64 %c
}
define i64 @mul260(i64 %a) {
; RV64I-LABEL: mul260:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul260:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 8
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 260
ret i64 %c
}
define i64 @mul264(i64 %a) {
; RV64I-LABEL: mul264:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul264:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 8
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 264
ret i64 %c
}
define i64 @mul11(i64 %a) {
; RV64I-LABEL: mul11:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 11
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul11:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 11
ret i64 %c
}
define i64 @mul19(i64 %a) {
; RV64I-LABEL: mul19:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 19
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul19:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 19
ret i64 %c
}
define i64 @mul13(i64 %a) {
; RV64I-LABEL: mul13:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 13
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul13:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 13
ret i64 %c
}
define i64 @mul21(i64 %a) {
; RV64I-LABEL: mul21:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 21
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul21:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 21
ret i64 %c
}
define i64 @mul37(i64 %a) {
; RV64I-LABEL: mul37:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 37
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul37:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 37
ret i64 %c
}
define i64 @mul25(i64 %a) {
; RV64I-LABEL: mul25:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 25
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul25:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 25
ret i64 %c
}
define i64 @mul41(i64 %a) {
; RV64I-LABEL: mul41:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 41
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul41:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 41
ret i64 %c
}
define i64 @mul73(i64 %a) {
; RV64I-LABEL: mul73:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 73
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul73:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a1, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 73
ret i64 %c
}
define i64 @mul27(i64 %a) {
; RV64I-LABEL: mul27:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 27
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul27:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 27
ret i64 %c
}
define i64 @mul45(i64 %a) {
; RV64I-LABEL: mul45:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 45
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul45:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 45
ret i64 %c
}
define i64 @mul81(i64 %a) {
; RV64I-LABEL: mul81:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 81
; RV64I-NEXT: mul a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul81:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 81
ret i64 %c
}
define i64 @mul4098(i64 %a) {
; RV64I-LABEL: mul4098:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 1
; RV64I-NEXT: slli a0, a0, 12
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul4098:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 12
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 1
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 4098
ret i64 %c
}
define i64 @mul4100(i64 %a) {
; RV64I-LABEL: mul4100:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
; RV64I-NEXT: slli a0, a0, 12
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul4100:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 12
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 2
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 4100
ret i64 %c
}
define i64 @mul4104(i64 %a) {
; RV64I-LABEL: mul4104:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 3
; RV64I-NEXT: slli a0, a0, 12
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul4104:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a0, 12
; RV64XTHEADBA-NEXT: th.addsl a0, a1, a0, 3
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, 4104
ret i64 %c
}
define signext i32 @mulw192(i32 signext %a) {
; RV64I-LABEL: mulw192:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 6
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mulw192:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: slliw a0, a0, 6
; RV64XTHEADBA-NEXT: ret
%c = mul i32 %a, 192
ret i32 %c
}
define signext i32 @mulw320(i32 signext %a) {
; RV64I-LABEL: mulw320:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 6
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mulw320:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: slliw a0, a0, 6
; RV64XTHEADBA-NEXT: ret
%c = mul i32 %a, 320
ret i32 %c
}
define signext i32 @mulw576(i32 signext %a) {
; RV64I-LABEL: mulw576:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 6
; RV64I-NEXT: slli a0, a0, 9
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mulw576:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 3
; RV64XTHEADBA-NEXT: slliw a0, a0, 6
; RV64XTHEADBA-NEXT: ret
%c = mul i32 %a, 576
ret i32 %c
}
define i64 @add4104(i64 %a) {
; RV64I-LABEL: add4104:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 1
; RV64I-NEXT: addi a1, a1, 8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: add4104:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: li a1, 1026
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: ret
%c = add i64 %a, 4104
ret i64 %c
}
define i64 @add4104_2(i64 %a) {
; RV64I-LABEL: add4104_2:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 1
; RV64I-NEXT: addi a1, a1, 8
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: add4104_2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: li a1, 1026
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: ret
%c = or disjoint i64 %a, 4104
ret i64 %c
}
define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 2
; RV64I-NEXT: addi a1, a1, 16
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: add8208:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: li a1, 1026
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ret
%c = add i64 %a, 8208
ret i64 %c
}
; Make sure we prefer LUI for the 8192 instead of using sh3add.
define signext i32 @add8192_i32(i32 signext %a) {
; CHECK-LABEL: add8192_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 2
; CHECK-NEXT: addw a0, a0, a1
; CHECK-NEXT: ret
%c = add i32 %a, 8192
ret i32 %c
}
; Make sure we prefer LUI for the 8192 instead of using sh3add.
define i64 @add8192(i64 %a) {
; CHECK-LABEL: add8192:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = add i64 %a, 8192
ret i64 %c
}
define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: addshl32_5_6:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 6
; CHECK-NEXT: addw a0, a0, a1
; CHECK-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 6
%e = add i32 %c, %d
ret i32 %e
}
define i64 @addshl64_5_6(i64 %a, i64 %b) {
; CHECK-LABEL: addshl64_5_6:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 6
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 6
%e = add i64 %c, %d
ret i64 %e
}
define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: addshl32_5_7:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 7
; CHECK-NEXT: addw a0, a0, a1
; CHECK-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 7
%e = add i32 %c, %d
ret i32 %e
}
define i64 @addshl64_5_7(i64 %a, i64 %b) {
; CHECK-LABEL: addshl64_5_7:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 7
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 7
%e = add i64 %c, %d
ret i64 %e
}
define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
; CHECK-LABEL: addshl32_5_8:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 8
; CHECK-NEXT: addw a0, a0, a1
; CHECK-NEXT: ret
%c = shl i32 %a, 5
%d = shl i32 %b, 8
%e = add i32 %c, %d
ret i32 %e
}
define i64 @addshl64_5_8(i64 %a, i64 %b) {
; CHECK-LABEL: addshl64_5_8:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: slli a1, a1, 8
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
%c = shl i64 %a, 5
%d = shl i64 %b, 8
%e = add i64 %c, %d
ret i64 %e
}
define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add1:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: sh6_sh3_add1:
; RV64XTHEADBA: # %bb.0: # %entry
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: th.addsl a1, a1, a2, 3
; RV64XTHEADBA-NEXT: add a0, a1, a0
; RV64XTHEADBA-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
%add = add nsw i64 %shl1, %shl
%add2 = add nsw i64 %add, %x
ret i64 %add2
}
define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add2:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: sh6_sh3_add2:
; RV64XTHEADBA: # %bb.0: # %entry
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: add a0, a1, a0
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
%add = add nsw i64 %shl1, %x
%add2 = add nsw i64 %add, %shl
ret i64 %add2
}
define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add3:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: sh6_sh3_add3:
; RV64XTHEADBA: # %bb.0: # %entry
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: th.addsl a1, a1, a2, 3
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
%add = add nsw i64 %shl1, %shl
%add2 = add nsw i64 %x, %add
ret i64 %add2
}
define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add4:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: sh6_sh3_add4:
; RV64XTHEADBA: # %bb.0: # %entry
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: ret
entry:
%shl = shl i64 %z, 3
%shl1 = shl i64 %y, 6
%add = add nsw i64 %x, %shl
%add2 = add nsw i64 %add, %shl1
ret i64 %add2
}
define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
; CHECK-LABEL: srliw_1_sh1add:
; CHECK: # %bb.0:
; CHECK-NEXT: srliw a1, a1, 1
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: lh a0, 0(a0)
; CHECK-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i16, ptr %0, i64 %4
%6 = load i16, ptr %5, align 2
ret i16 %6
}
define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
; CHECK-LABEL: srliw_2_sh2add:
; CHECK: # %bb.0:
; CHECK-NEXT: srliw a1, a1, 2
; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
%6 = load i32, ptr %5, align 4
ret i32 %6
}
define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
; CHECK-LABEL: srliw_3_sh3add:
; CHECK: # %bb.0:
; CHECK-NEXT: srliw a1, a1, 3
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ld a0, 0(a0)
; CHECK-NEXT: ret
%3 = lshr i32 %1, 3
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
%6 = load i64, ptr %5, align 8
ret i64 %6
}
define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_1_sh2add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 1
; RV64I-NEXT: slli a1, a1, 2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_1_sh2add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
%6 = load i32, ptr %5, align 4
ret i32 %6
}
define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_1_sh3add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 1
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_1_sh3add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 1
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
%6 = load i64, ptr %5, align 8
ret i64 %6
}
define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh3add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 2
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_2_sh3add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
%6 = load i64, ptr %5, align 8
ret i64 %6
}
define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh1add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 2
; RV64I-NEXT: slli a1, a1, 1
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_2_sh1add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 2
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i16, ptr %0, i64 %4
%6 = load i16, ptr %5, align 2
ret i16 %6
}
define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_3_sh2add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 3
; RV64I-NEXT: slli a1, a1, 2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_3_sh2add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 3
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i32, ptr %0, i64 %4
%6 = load i32, ptr %5, align 4
ret i32 %6
}
define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_4_sh3add:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 4
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srliw_4_sh3add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srliw a1, a1, 4
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i32 %1, 4
%4 = zext i32 %3 to i64
%5 = getelementptr inbounds i64, ptr %0, i64 %4
%6 = load i64, ptr %5, align 8
ret i64 %6
}
define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_1_sh2add:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 1
; RV64I-NEXT: andi a1, a1, -4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srli_1_sh2add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i64 %1, 1
%4 = getelementptr inbounds i32, ptr %0, i64 %3
%5 = load i32, ptr %4, align 4
ret i32 %5
}
define i64 @srli_2_sh3add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_2_sh3add:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 1
; RV64I-NEXT: andi a1, a1, -8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srli_2_sh3add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i64 %1, 2
%4 = getelementptr inbounds i64, ptr %0, i64 %3
%5 = load i64, ptr %4, align 8
ret i64 %5
}
define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_2_sh1add:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 1
; RV64I-NEXT: andi a1, a1, -2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srli_2_sh1add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i64 %1, 2
%4 = getelementptr inbounds i16, ptr %0, i64 %3
%5 = load i16, ptr %4, align 2
ret i16 %5
}
define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_3_sh2add:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 1
; RV64I-NEXT: andi a1, a1, -4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srli_3_sh2add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i64 %1, 3
%4 = getelementptr inbounds i32, ptr %0, i64 %3
%5 = load i32, ptr %4, align 4
ret i32 %5
}
define i64 @srli_4_sh3add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_4_sh3add:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 1
; RV64I-NEXT: andi a1, a1, -8
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srli_4_sh3add:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 4
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%3 = lshr i64 %1, 4
%4 = getelementptr inbounds i64, ptr %0, i64 %3
%5 = load i64, ptr %4, align 8
ret i64 %5
}
define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 1
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh1_sh0:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 1
; RV64XTHEADBA-NEXT: add a0, a0, a2
; RV64XTHEADBA-NEXT: lbu a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
}
define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh1:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 1
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh1_sh1:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
}
define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 2
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh1_sh2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
}
define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh1_sh3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 4
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
}
define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 2
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh2_sh0:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 2
; RV64XTHEADBA-NEXT: add a0, a0, a2
; RV64XTHEADBA-NEXT: lbu a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
}
define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh1:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 1
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh2_sh1:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
}
define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 2
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh2_sh2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 4
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
}
define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 5
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh2_sh3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 5
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
}
define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh0:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh3_sh0:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: add a0, a0, a2
; RV64XTHEADBA-NEXT: lbu a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
}
define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh1:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 1
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh3_sh1:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 4
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
}
define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 5
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 2
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh3_sh2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 5
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
}
define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh3_sh3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
}
; Similar to above, but with a lshr on one of the indices. This requires
; special handling during isel to form a shift pair.
define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_lshr_sh3_sh3:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 58
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_lshr_sh3_sh3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: srli a1, a1, 58
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%shr = lshr i64 %idx1, 58
%a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
}
define i8 @array_index_sh4_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: array_index_sh4_sh0:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: ret
%a = getelementptr inbounds [16 x i8], ptr %p, i64 %idx1, i64 %idx2
%b = load i8, ptr %a, align 1
ret i8 %b
}
define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh1:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 5
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 1
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh4_sh1:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 5
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 1
; RV64XTHEADBA-NEXT: lh a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
%b = load i16, ptr %a, align 2
ret i16 %b
}
define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 6
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 2
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh4_sh2:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 6
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 2
; RV64XTHEADBA-NEXT: lw a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
%b = load i32, ptr %a, align 4
ret i32 %b
}
define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 7
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a2, a2, 3
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: array_index_sh4_sh3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: slli a1, a1, 7
; RV64XTHEADBA-NEXT: add a0, a0, a1
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a2, 3
; RV64XTHEADBA-NEXT: ld a0, 0(a0)
; RV64XTHEADBA-NEXT: ret
%a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
%b = load i64, ptr %a, align 8
ret i64 %b
}
define i64 @mul_neg1(i64 %a) {
; CHECK-LABEL: mul_neg1:
; CHECK: # %bb.0:
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
%c = mul i64 %a, -1
ret i64 %c
}
define i64 @mul_neg2(i64 %a) {
; CHECK-LABEL: mul_neg2:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
%c = mul i64 %a, -2
ret i64 %c
}
define i64 @mul_neg3(i64 %a) {
; RV64I-LABEL: mul_neg3:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 1
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul_neg3:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 1
; RV64XTHEADBA-NEXT: neg a0, a0
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, -3
ret i64 %c
}
define i64 @mul_neg4(i64 %a) {
; CHECK-LABEL: mul_neg4:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
%c = mul i64 %a, -4
ret i64 %c
}
define i64 @mul_neg5(i64 %a) {
; RV64I-LABEL: mul_neg5:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 2
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul_neg5:
; RV64XTHEADBA: # %bb.0:
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a0, 2
; RV64XTHEADBA-NEXT: neg a0, a0
; RV64XTHEADBA-NEXT: ret
%c = mul i64 %a, -5
ret i64 %c
}
define i64 @mul_neg6(i64 %a) {
; CHECK-LABEL: mul_neg6:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, -6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: ret
%c = mul i64 %a, -6
ret i64 %c
}
define i64 @mul_neg7(i64 %a) {
; CHECK-LABEL: mul_neg7:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: ret
%c = mul i64 %a, -7
ret i64 %c
}
define i64 @mul_neg8(i64 %a) {
; CHECK-LABEL: mul_neg8:
; CHECK: # %bb.0:
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
%c = mul i64 %a, -8
ret i64 %c
}
define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
; RV64I-LABEL: srai_srli_sh3add:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a1, 32
; RV64I-NEXT: srli a1, a1, 6
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: srai_srli_sh3add:
; RV64XTHEADBA: # %bb.0: # %entry
; RV64XTHEADBA-NEXT: srai a1, a1, 32
; RV64XTHEADBA-NEXT: srli a1, a1, 6
; RV64XTHEADBA-NEXT: th.addsl a0, a0, a1, 3
; RV64XTHEADBA-NEXT: ret
entry:
%2 = ashr i64 %1, 32
%3 = lshr i64 %2, 6
%4 = getelementptr i64, ptr %0, i64 %3
ret ptr %4
}
define ptr @srai_srli_slli(ptr %0, i64 %1) nounwind {
; CHECK-LABEL: srai_srli_slli:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: srai a1, a1, 32
; CHECK-NEXT: srli a1, a1, 6
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: ret
entry:
%2 = ashr i64 %1, 32
%3 = lshr i64 %2, 6
%4 = getelementptr i128, ptr %0, i64 %3
ret ptr %4
}
; Negative to make sure the peephole added for srai_srli_slli and
; srai_srli_sh3add doesn't break this.
define i64 @srai_andi(i64 %x) nounwind {
; CHECK-LABEL: srai_andi:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: srai a0, a0, 8
; CHECK-NEXT: andi a0, a0, -8
; CHECK-NEXT: ret
entry:
%y = ashr i64 %x, 8
%z = and i64 %y, -8
ret i64 %z
}
; Negative to make sure the peephole added for srai_srli_slli and
; srai_srli_sh3add doesn't break this.
define i64 @srai_lui_and(i64 %x) nounwind {
; CHECK-LABEL: srai_lui_and:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: srai a0, a0, 8
; CHECK-NEXT: lui a1, 1048574
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
entry:
%y = ashr i64 %x, 8
%z = and i64 %y, -8192
ret i64 %z
}