
When performing a 64-bit sra of a negative value with a shift range from [32-63], create the hi-half with a move of -1. Alive verification: https://alive2.llvm.org/ce/z/kXd7Ac Also, preserve exact flag. Alive verification: https://alive2.llvm.org/ce/z/L86tXf. --------- Signed-off-by: John Lu <John.Lu@amd.com>
44 lines
2.1 KiB
LLVM
44 lines
2.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -stop-after=finalize-isel -o - %s | FileCheck %s
|
|
|
|
;; Test that reduction of:
|
|
;;
|
|
;; DST = ashr i64 X, Y
|
|
;;
|
|
;; where Y is in the range [63-32] to:
|
|
;;
|
|
;; DST = [ashr i32 HI(X), (Y & 0x1F), ashr i32 HI(X), 31]
|
|
;;
|
|
;; preserves flags
|
|
|
|
define i64 @ashr_exact(i64 %arg0, i64 %shift_amt) {
|
|
; CHECK-LABEL: name: ashr_exact
|
|
; CHECK: bb.0 (%ir-block.0):
|
|
; CHECK-NEXT: liveins: $vgpr1, $vgpr2
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[DEF]]
|
|
; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
|
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1
|
|
; CHECK-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
|
|
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[DEF3]]
|
|
; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY4]], %subreg.sub1
|
|
; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
|
|
; CHECK-NEXT: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = exact V_ASHRREV_I32_e64 killed [[COPY5]], [[COPY3]], implicit $exec
|
|
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 31
|
|
; CHECK-NEXT: [[V_ASHRREV_I32_e64_1:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 killed [[S_MOV_B32_]], [[COPY3]], implicit $exec
|
|
; CHECK-NEXT: $vgpr0 = COPY [[V_ASHRREV_I32_e64_]]
|
|
; CHECK-NEXT: $vgpr1 = COPY [[V_ASHRREV_I32_e64_1]]
|
|
; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
|
|
%or = or i64 %shift_amt, 32
|
|
%ashr = ashr exact i64 %arg0, %or
|
|
ret i64 %ashr
|
|
}
|
|
|