[AMDGPU] Convert more 64-bit lshr to 32-bit if shift amt>=32 (#138204)

Convert vector 64-bit lshr to 32-bit if shift amt is known to be >= 32.
Also convert scalar 64-bit lshr to 32-bit if shift amt is variable but
known to be >=32.

---------

Signed-off-by: John Lu <John.Lu@amd.com>
This commit is contained in:
LU-JOHN 2025-06-13 03:03:06 -05:00 committed by GitHub
parent 02b6ed0bf1
commit c4caf00bfb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 196 additions and 161 deletions

View File

@ -4097,7 +4097,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
if (VT.getScalarType() != MVT::i64)
return SDValue();
// i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
// i64 (shl x, C) -> (build_pair 0, (shl x, C - 32))
// On some subtargets, 64-bit shift is a quarter rate instruction. In the
// common case, splitting this into a move and a 32-bit shift is faster and
@ -4117,12 +4117,12 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
ShiftAmt = DAG.getConstant(RHSVal - TargetScalarType.getSizeInBits(), SL,
TargetType);
} else {
SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
SDValue TruncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
const SDValue ShiftMask =
DAG.getConstant(TargetScalarType.getSizeInBits() - 1, SL, TargetType);
// This AND instruction will clamp out of bounds shift values.
// It will also be removed during later instruction selection.
ShiftAmt = DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
ShiftAmt = DAG.getNode(ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
}
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
@ -4181,50 +4181,105 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!RHS)
return SDValue();
SDValue RHS = N->getOperand(1);
ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
unsigned ShiftAmt = RHS->getZExtValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
unsigned RHSVal;
// fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
// this improves the ability to match BFE patterns in isel.
if (LHS.getOpcode() == ISD::AND) {
if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
unsigned MaskIdx, MaskLen;
if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
MaskIdx == ShiftAmt) {
return DAG.getNode(
ISD::AND, SL, VT,
DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
if (CRHS) {
RHSVal = CRHS->getZExtValue();
// fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
// this improves the ability to match BFE patterns in isel.
if (LHS.getOpcode() == ISD::AND) {
if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
unsigned MaskIdx, MaskLen;
if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
MaskIdx == RHSVal) {
return DAG.getNode(ISD::AND, SL, VT,
DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0),
N->getOperand(1)),
DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1),
N->getOperand(1)));
}
}
}
}
if (VT != MVT::i64)
if (VT.getScalarType() != MVT::i64)
return SDValue();
if (ShiftAmt < 32)
// for C >= 32
// i64 (srl x, C) -> (build_pair (srl hi_32(x), C -32), 0)
// On some subtargets, 64-bit shift is a quarter rate instruction. In the
// common case, splitting this into a move and a 32-bit shift is faster and
// the same code size.
KnownBits Known = DAG.computeKnownBits(RHS);
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
EVT TargetType = VT.isVector() ? VT.changeVectorElementType(TargetScalarType)
: TargetScalarType;
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
return SDValue();
// srl i64:x, C for C >= 32
// =>
// build_pair (srl hi_32(x), C - 32), 0
SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
SDValue ShiftAmt;
if (CRHS) {
ShiftAmt = DAG.getConstant(RHSVal - TargetScalarType.getSizeInBits(), SL,
TargetType);
} else {
SDValue TruncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
const SDValue ShiftMask =
DAG.getConstant(TargetScalarType.getSizeInBits() - 1, SL, TargetType);
// This AND instruction will clamp out of bounds shift values.
// It will also be removed during later instruction selection.
ShiftAmt = DAG.getNode(ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
}
SDValue Hi = getHiHalf64(LHS, DAG);
const SDValue Zero = DAG.getConstant(0, SL, TargetScalarType);
EVT ConcatType;
SDValue Hi;
SDLoc LHSSL(LHS);
// Bitcast LHS into ConcatType so hi-half of source can be extracted into Hi
if (VT.isVector()) {
unsigned NElts = TargetType.getVectorNumElements();
ConcatType = TargetType.getDoubleNumVectorElementsVT(*DAG.getContext());
SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
SmallVector<SDValue, 8> HiOps(NElts);
SmallVector<SDValue, 16> HiAndLoOps;
SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
DAG.ExtractVectorElements(SplitLHS, HiAndLoOps, /*Start=*/0, NElts * 2);
for (unsigned I = 0; I != NElts; ++I)
HiOps[I] = HiAndLoOps[2 * I + 1];
Hi = DAG.getNode(ISD::BUILD_VECTOR, LHSSL, TargetType, HiOps);
} else {
const SDValue One = DAG.getConstant(1, LHSSL, TargetScalarType);
ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, LHSSL, TargetType, SplitLHS, One);
}
SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
SDValue NewShift = DAG.getNode(ISD::SRL, SL, TargetType, Hi, ShiftAmt);
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
SDValue Vec;
if (VT.isVector()) {
unsigned NElts = TargetType.getVectorNumElements();
SmallVector<SDValue, 8> LoOps;
SmallVector<SDValue, 16> HiAndLoOps(NElts * 2, Zero);
DAG.ExtractVectorElements(NewShift, LoOps, 0, NElts);
for (unsigned I = 0; I != NElts; ++I)
HiAndLoOps[2 * I] = LoOps[I];
Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, ConcatType, HiAndLoOps);
} else {
Vec = DAG.getBuildVector(ConcatType, SL, {NewShift, Zero});
}
return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
}
SDValue AMDGPUTargetLowering::performTruncateCombine(
@ -5209,21 +5264,18 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
case ISD::SHL: {
case ISD::SHL:
case ISD::SRL: {
// Range metadata can be invalidated when loads are converted to legal types
// (e.g. v2i64 -> v4i32).
// Try to convert vector shl before type legalization so that range metadata
// can be utilized.
// Try to convert vector shl/srl before type legalization so that range
// metadata can be utilized.
if (!(N->getValueType(0).isVector() &&
DCI.getDAGCombineLevel() == BeforeLegalizeTypes) &&
DCI.getDAGCombineLevel() < AfterLegalizeDAG)
break;
return performShlCombine(N, DCI);
}
case ISD::SRL: {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
break;
if (N->getOpcode() == ISD::SHL)
return performShlCombine(N, DCI);
return performSrlCombine(N, DCI);
}
case ISD::SRA: {

View File

@ -1945,16 +1945,14 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
; CI-LABEL: lshr_mad_i64_vec:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_mov_b32_e32 v6, v3
; CI-NEXT: v_mov_b32_e32 v3, v1
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: s_mov_b32 s4, 0xffff1c18
; CI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1]
; CI-NEXT: v_mov_b32_e32 v3, v1
; CI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1]
; CI-NEXT: s_mov_b32 s4, 0xffff1118
; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3]
; CI-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3]
; CI-NEXT: v_sub_i32_e32 v1, vcc, v5, v1
; CI-NEXT: v_sub_i32_e32 v3, vcc, v7, v3
; CI-NEXT: v_mov_b32_e32 v0, v4
; CI-NEXT: v_mov_b32_e32 v1, v5
; CI-NEXT: v_mov_b32_e32 v2, v6
; CI-NEXT: s_setpc_b64 s[30:31]
;
; SI-LABEL: lshr_mad_i64_vec:
@ -1977,44 +1975,28 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
; GFX9-LABEL: lshr_mad_i64_vec:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v6, v3
; GFX9-NEXT: v_mov_b32_e32 v3, v1
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_mov_b32 s4, 0xffff1c18
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v3, v1
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1]
; GFX9-NEXT: s_mov_b32 s4, 0xffff1118
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3]
; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3]
; GFX9-NEXT: v_sub_u32_e32 v1, v5, v1
; GFX9-NEXT: v_sub_u32_e32 v3, v7, v3
; GFX9-NEXT: v_mov_b32_e32 v0, v4
; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: v_mov_b32_e32 v2, v6
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX1100-LABEL: lshr_mad_i64_vec:
; GFX1100: ; %bb.0:
; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT: v_mov_b32_e32 v8, v3
; GFX1100-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v1, 0
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT: v_mad_u64_u32 v[4:5], null, 0xffff1c18, v6, v[0:1]
; GFX1100-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v0, v4
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT: v_mad_u64_u32 v[6:7], null, 0xffff1118, v8, v[2:3]
; GFX1100-NEXT: v_dual_mov_b32 v1, v5 :: v_dual_mov_b32 v2, v6
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1100-NEXT: v_mov_b32_e32 v3, v7
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: lshr_mad_i64_vec:
; GFX1150: ; %bb.0:
; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1150-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1
; GFX1150-NEXT: v_mov_b32_e32 v1, 0
; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1150-NEXT: v_mov_b32_e32 v3, v1
; GFX1150-NEXT: v_mad_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1]
; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1150-NEXT: v_mad_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3]
; GFX1150-NEXT: s_setpc_b64 s[30:31]
; GFX11-LABEL: lshr_mad_i64_vec:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1]
; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_sub_nc_u32_e32 v1, v5, v1
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_sub_nc_u32_e32 v3, v7, v3
; GFX11-NEXT: v_mov_b32_e32 v2, v6
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX12-LABEL: lshr_mad_i64_vec:
; GFX12: ; %bb.0:
@ -2023,13 +2005,14 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1
; GFX12-NEXT: v_mov_b32_e32 v1, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-NEXT: v_mov_b32_e32 v3, v1
; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1]
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3]
; GFX12-NEXT: v_mad_co_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1]
; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3]
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-NEXT: v_sub_nc_u32_e32 v1, v5, v1
; GFX12-NEXT: v_mov_b32_e32 v0, v4
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-NEXT: v_sub_nc_u32_e32 v3, v7, v3
; GFX12-NEXT: v_mov_b32_e32 v2, v6
; GFX12-NEXT: s_setpc_b64 s[30:31]
%lsh = lshr <2 x i64> %arg0, <i64 32, i64 32>
%mul = mul <2 x i64> %lsh, <i64 s0xffffffffffff1c18, i64 s0xffffffffffff1118>

View File

@ -17,9 +17,9 @@ define i64 @srl_metadata(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: srl_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dword v2, v[2:3]
; CHECK-NEXT: flat_load_dword v0, v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
@ -30,9 +30,9 @@ define i64 @srl_metadata(i64 %arg0, ptr %arg1.ptr) {
define amdgpu_ps i64 @srl_metadata_sgpr_return(i64 inreg %arg0, ptr addrspace(1) inreg %arg1.ptr) {
; CHECK-LABEL: srl_metadata_sgpr_return:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dword s2, s[2:3], 0x0
; CHECK-NEXT: s_load_dword s0, s[2:3], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
; CHECK-NEXT: s_lshr_b32 s0, s1, s0
; CHECK-NEXT: s_mov_b32 s1, 0
; CHECK-NEXT: ; return to shader part epilog
%shift.amt = load i64, ptr addrspace(1) %arg1.ptr, !range !0, !noundef !{}
@ -45,9 +45,9 @@ define i64 @srl_exact_metadata(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: srl_exact_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dword v2, v[2:3]
; CHECK-NEXT: flat_load_dword v0, v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
@ -59,9 +59,9 @@ define i64 @srl_metadata_two_ranges(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: srl_metadata_two_ranges:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dword v2, v[2:3]
; CHECK-NEXT: flat_load_dword v0, v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
@ -106,8 +106,10 @@ define <2 x i64> @srl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v4, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v6, v[2:3]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v4, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v6, v3
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%srl = lshr <2 x i64> %arg0, %shift.amt
@ -121,8 +123,10 @@ define <2 x i64> @srl_exact_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v4, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v6, v[2:3]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v4, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v6, v3
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%srl = lshr exact <2 x i64> %arg0, %shift.amt
@ -133,12 +137,15 @@ define <3 x i64> @srl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: srl_v3_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dword v12, v[6:7] offset:16
; CHECK-NEXT: flat_load_dword v0, v[6:7] offset:16
; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[6:7]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[4:5], v12, v[4:5]
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v8, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v10, v[2:3]
; CHECK-NEXT: v_lshrrev_b32_e32 v4, v0, v5
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v8, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v10, v3
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%srl = lshr <3 x i64> %arg0, %shift.amt
@ -153,11 +160,15 @@ define <4 x i64> @srl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_dwordx4 v[13:16], v[8:9] offset:16
; CHECK-NEXT: ; kill: killed $vgpr8 killed $vgpr9
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v10, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v12, v[2:3]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v10, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v12, v3
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b64 v[4:5], v13, v[4:5]
; CHECK-NEXT: v_lshrrev_b64 v[6:7], v15, v[6:7]
; CHECK-NEXT: v_lshrrev_b32_e32 v4, v13, v5
; CHECK-NEXT: v_lshrrev_b32_e32 v6, v15, v7
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: v_mov_b32_e32 v7, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
%srl = lshr <4 x i64> %arg0, %shift.amt
@ -337,8 +348,7 @@ define i64 @srl_or32(i64 %arg0, i64 %shift_amt) {
; CHECK-LABEL: srl_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v2, 32, v2
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v2, v[0:1]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v2, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %shift_amt, 32
@ -350,10 +360,10 @@ define <2 x i64> @srl_v2_or32(<2 x i64> %arg0, <2 x i64> %shift_amt) {
; CHECK-LABEL: srl_v2_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v5, 32, v6
; CHECK-NEXT: v_or_b32_e32 v4, 32, v4
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v4, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v5, v[2:3]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v4, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v6, v3
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <2 x i64> %shift_amt, splat (i64 32)
%srl = lshr <2 x i64> %arg0, %or
@ -364,12 +374,12 @@ define <3 x i64> @srl_v3_or32(<3 x i64> %arg0, <3 x i64> %shift_amt) {
; CHECK-LABEL: srl_v3_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v7, 32, v10
; CHECK-NEXT: v_or_b32_e32 v8, 32, v8
; CHECK-NEXT: v_or_b32_e32 v6, 32, v6
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v6, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v8, v[2:3]
; CHECK-NEXT: v_lshrrev_b64 v[4:5], v7, v[4:5]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v6, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v8, v3
; CHECK-NEXT: v_lshrrev_b32_e32 v4, v10, v5
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <3 x i64> %shift_amt, splat (i64 32)
%srl = lshr <3 x i64> %arg0, %or
@ -380,14 +390,14 @@ define <4 x i64> @srl_v4_or32(<4 x i64> %arg0, <4 x i64> %shift_amt) {
; CHECK-LABEL: srl_v4_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v9, 32, v14
; CHECK-NEXT: v_or_b32_e32 v11, 32, v12
; CHECK-NEXT: v_or_b32_e32 v10, 32, v10
; CHECK-NEXT: v_or_b32_e32 v8, 32, v8
; CHECK-NEXT: v_lshrrev_b64 v[0:1], v8, v[0:1]
; CHECK-NEXT: v_lshrrev_b64 v[2:3], v10, v[2:3]
; CHECK-NEXT: v_lshrrev_b64 v[4:5], v11, v[4:5]
; CHECK-NEXT: v_lshrrev_b64 v[6:7], v9, v[6:7]
; CHECK-NEXT: v_lshrrev_b32_e32 v0, v8, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v2, v10, v3
; CHECK-NEXT: v_lshrrev_b32_e32 v4, v12, v5
; CHECK-NEXT: v_lshrrev_b32_e32 v6, v14, v7
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: v_mov_b32_e32 v7, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <4 x i64> %shift_amt, splat (i64 32)
%srl = lshr <4 x i64> %arg0, %or
@ -400,8 +410,7 @@ define i64 @srl_or32_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
; CHECK-LABEL: srl_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_or_b32 s4, s18, 32
; CHECK-NEXT: s_lshr_b64 s[4:5], s[16:17], s4
; CHECK-NEXT: s_lshr_b32 s4, s17, s18
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
@ -413,8 +422,7 @@ define i64 @srl_or32_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
define amdgpu_ps i64 @srl_or32_sgpr_return(i64 inreg %arg0, i64 inreg %shift_amt) {
; CHECK-LABEL: srl_or32_sgpr_return:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b32 s2, s2, 32
; CHECK-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
; CHECK-NEXT: s_lshr_b32 s0, s1, s2
; CHECK-NEXT: s_mov_b32 s1, 0
; CHECK-NEXT: ; return to shader part epilog
%or = or i64 %shift_amt, 32
@ -426,14 +434,12 @@ define <2 x i64> @srl_v2_or32_sgpr(<2 x i64> inreg %arg0, <2 x i64> inreg %shift
; CHECK-LABEL: srl_v2_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_or_b32 s6, s22, 32
; CHECK-NEXT: s_or_b32 s4, s20, 32
; CHECK-NEXT: s_lshr_b64 s[4:5], s[16:17], s4
; CHECK-NEXT: s_lshr_b64 s[6:7], s[18:19], s6
; CHECK-NEXT: s_lshr_b32 s4, s17, s20
; CHECK-NEXT: s_lshr_b32 s5, s19, s22
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s5
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <2 x i64> %shift_amt, splat (i64 32)
%srl = lshr <2 x i64> %arg0, %or
@ -444,18 +450,15 @@ define <3 x i64> @srl_v3_or32_sgpr(<3 x i64> inreg %arg0, <3 x i64> inreg %shift
; CHECK-LABEL: srl_v3_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_or_b32 s8, s26, 32
; CHECK-NEXT: s_or_b32 s6, s24, 32
; CHECK-NEXT: s_or_b32 s4, s22, 32
; CHECK-NEXT: s_lshr_b64 s[4:5], s[16:17], s4
; CHECK-NEXT: s_lshr_b64 s[6:7], s[18:19], s6
; CHECK-NEXT: s_lshr_b64 s[8:9], s[20:21], s8
; CHECK-NEXT: s_lshr_b32 s4, s17, s22
; CHECK-NEXT: s_lshr_b32 s5, s19, s24
; CHECK-NEXT: s_lshr_b32 s6, s21, s26
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: v_mov_b32_e32 v4, s8
; CHECK-NEXT: v_mov_b32_e32 v5, s9
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s5
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v4, s6
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <3 x i64> %shift_amt, splat (i64 32)
%srl = lshr <3 x i64> %arg0, %or
@ -466,20 +469,17 @@ define <4 x i64> @srl_v4_or32_sgpr(<4 x i64> inreg %arg0, <4 x i64> inreg %shift
; CHECK-LABEL: srl_v4_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v0, 32, v0
; CHECK-NEXT: s_or_b32 s8, s28, 32
; CHECK-NEXT: s_or_b32 s6, s26, 32
; CHECK-NEXT: s_or_b32 s4, s24, 32
; CHECK-NEXT: s_lshr_b64 s[4:5], s[16:17], s4
; CHECK-NEXT: s_lshr_b64 s[6:7], s[18:19], s6
; CHECK-NEXT: s_lshr_b64 s[8:9], s[20:21], s8
; CHECK-NEXT: v_lshrrev_b64 v[6:7], v0, s[22:23]
; CHECK-NEXT: s_lshr_b32 s4, s17, s24
; CHECK-NEXT: s_lshr_b32 s5, s19, s26
; CHECK-NEXT: s_lshr_b32 s6, s21, s28
; CHECK-NEXT: v_lshrrev_b32_e64 v6, v0, s23
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s6
; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: v_mov_b32_e32 v4, s8
; CHECK-NEXT: v_mov_b32_e32 v5, s9
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s5
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: v_mov_b32_e32 v4, s6
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: v_mov_b32_e32 v7, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <4 x i64> %shift_amt, splat (i64 32)
%srl = lshr <4 x i64> %arg0, %or