
The motivation of this change is simply to reduce test duplication. As can be seen in the (massive) test delta, we have many tests whose output differ only due to the use of addi on rv32 vs addiw on rv64 when the high bits are don't care. As an aside, we don't need to worry about the non-zero immediate restriction on the compressed variants because we're not directly forming the compressed variants. If we happen to get a zero immediate for the ADDI, then either a later optimization will strip the useless instruction or the encoder is responsible for not compressing the instruction.
252 lines
6.5 KiB
LLVM
252 lines
6.5 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32I
|
|
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64I
|
|
|
|
define void @sext_shl_trunc_same_size(i16 %x, i32 %y, ptr %res) {
|
|
; RV32I-LABEL: sext_shl_trunc_same_size:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: sh a0, 0(a2)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: sext_shl_trunc_same_size:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: sh a0, 0(a2)
|
|
; RV64I-NEXT: ret
|
|
%conv = sext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i16
|
|
store i16 %t, ptr %res
|
|
ret void
|
|
}
|
|
|
|
define void @zext_shl_trunc_same_size(i16 %x, i32 %y, ptr %res) {
|
|
; RV32I-LABEL: zext_shl_trunc_same_size:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: sh a0, 0(a2)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: zext_shl_trunc_same_size:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: sh a0, 0(a2)
|
|
; RV64I-NEXT: ret
|
|
%conv = zext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i16
|
|
store i16 %t, ptr %res
|
|
ret void
|
|
}
|
|
|
|
define void @sext_shl_trunc_smaller(i16 %x, i32 %y, ptr %res) {
|
|
; RV32I-LABEL: sext_shl_trunc_smaller:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: sb a0, 0(a2)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: sext_shl_trunc_smaller:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: sb a0, 0(a2)
|
|
; RV64I-NEXT: ret
|
|
%conv = sext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i8
|
|
store i8 %t, ptr %res
|
|
ret void
|
|
}
|
|
|
|
define void @zext_shl_trunc_smaller(i16 %x, i32 %y, ptr %res) {
|
|
; RV32I-LABEL: zext_shl_trunc_smaller:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: sb a0, 0(a2)
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: zext_shl_trunc_smaller:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: sb a0, 0(a2)
|
|
; RV64I-NEXT: ret
|
|
%conv = zext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i8
|
|
store i8 %t, ptr %res
|
|
ret void
|
|
}
|
|
|
|
; negative test - demanding 1 high-bit too many to change the extend
|
|
|
|
define signext i17 @sext_shl_trunc_larger(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: sext_shl_trunc_larger:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srai a0, a0, 16
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: slli a0, a0, 15
|
|
; RV32I-NEXT: srai a0, a0, 15
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: sext_shl_trunc_larger:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srai a0, a0, 48
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: slli a0, a0, 47
|
|
; RV64I-NEXT: srai a0, a0, 47
|
|
; RV64I-NEXT: ret
|
|
%conv = sext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i17
|
|
ret i17 %t
|
|
}
|
|
|
|
; negative test - demanding 1 high-bit too many to change the extend
|
|
|
|
define zeroext i17 @zext_shl_trunc_larger(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: zext_shl_trunc_larger:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srli a0, a0, 16
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: slli a0, a0, 15
|
|
; RV32I-NEXT: srli a0, a0, 15
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: zext_shl_trunc_larger:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srli a0, a0, 48
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: slli a0, a0, 47
|
|
; RV64I-NEXT: srli a0, a0, 47
|
|
; RV64I-NEXT: ret
|
|
%conv = zext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = trunc i32 %shl to i17
|
|
ret i17 %t
|
|
}
|
|
|
|
define i32 @sext_shl_mask(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: sext_shl_mask:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srli a0, a0, 16
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: sext_shl_mask:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srli a0, a0, 48
|
|
; RV64I-NEXT: ret
|
|
%conv = sext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = and i32 %shl, 65535
|
|
ret i32 %t
|
|
}
|
|
|
|
define i32 @zext_shl_mask(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: zext_shl_mask:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srli a0, a0, 16
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: zext_shl_mask:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srli a0, a0, 48
|
|
; RV64I-NEXT: ret
|
|
%conv = zext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = and i32 %shl, 65535
|
|
ret i32 %t
|
|
}
|
|
|
|
; negative test - demanding a bit that could change with sext
|
|
|
|
define i32 @sext_shl_mask_higher(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: sext_shl_mask_higher:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srai a0, a0, 16
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: lui a1, 16
|
|
; RV32I-NEXT: and a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: sext_shl_mask_higher:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srai a0, a0, 48
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: lui a1, 16
|
|
; RV64I-NEXT: and a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
%conv = sext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = and i32 %shl, 65536
|
|
ret i32 %t
|
|
}
|
|
|
|
; negative test - demanding a bit that could change with zext
|
|
|
|
define i32 @zext_shl_mask_higher(i16 %x, i32 %y) {
|
|
; RV32I-LABEL: zext_shl_mask_higher:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: slli a0, a0, 16
|
|
; RV32I-NEXT: srli a0, a0, 16
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: lui a1, 16
|
|
; RV32I-NEXT: and a0, a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: zext_shl_mask_higher:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: slli a0, a0, 48
|
|
; RV64I-NEXT: srli a0, a0, 48
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: lui a1, 16
|
|
; RV64I-NEXT: and a0, a0, a1
|
|
; RV64I-NEXT: ret
|
|
%conv = zext i16 %x to i32
|
|
%shl = shl i32 %conv, %y
|
|
%t = and i32 %shl, 65536
|
|
ret i32 %t
|
|
}
|
|
|
|
; May need some, but not all of the bits set by the 'or'.
|
|
|
|
define i32 @set_shl_mask(i32 %x, i32 %y) {
|
|
; RV32I-LABEL: set_shl_mask:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a2, 16
|
|
; RV32I-NEXT: addi a3, a2, 1
|
|
; RV32I-NEXT: or a0, a0, a3
|
|
; RV32I-NEXT: sll a0, a0, a1
|
|
; RV32I-NEXT: and a0, a0, a2
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: set_shl_mask:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a2, 16
|
|
; RV64I-NEXT: addi a3, a2, 1
|
|
; RV64I-NEXT: or a0, a0, a3
|
|
; RV64I-NEXT: sllw a0, a0, a1
|
|
; RV64I-NEXT: and a0, a0, a2
|
|
; RV64I-NEXT: ret
|
|
%z = or i32 %x, 196609
|
|
%s = shl i32 %z, %y
|
|
%r = and i32 %s, 65536
|
|
ret i32 %r
|
|
}
|