[AMDGPU][GlobalISel] Combine for breaking s64 and/or into two s32 insts (#151731)

When either one of the operands is all ones in high or low parts,
splitting these opens up other opportunities for combines. One of two
new instructions will either be removed or become a simple copy.
This commit is contained in:
Mirko Brkušanin 2025-08-20 17:32:29 +02:00 committed by GitHub
parent 2cfba9678d
commit 80f3b376b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 429 additions and 79 deletions

View File

@ -151,6 +151,25 @@ def zext_of_shift_amount_combines : GICombineGroup<[
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
]>;
// (and/or i64:x, i64:y) -> i64:(merge (and/or lo_32(x), lo_32(y)), (and/or hi_32(x), hi_32(y)))
// when either x or y is all ones in low or high parts
class combine_binop_s64_with_s32_mask<Instruction opcode> : GICombineRule<
(defs root:$dst),
(match (opcode $dst, i64:$x, i64:$y):$dst,
[{ return Helper.matchConstantIs32BitMask(${x}.getReg()) ||
Helper.matchConstantIs32BitMask(${y}.getReg()); }]),
(apply (G_UNMERGE_VALUES i32:$x_lo, i32:$x_hi, $x),
(G_UNMERGE_VALUES i32:$y_lo, i32:$y_hi, $y),
(opcode i32:$lo, $x_lo, $y_lo),
(opcode i32:$hi, $x_hi, $y_hi),
(G_MERGE_VALUES $dst, $lo, $hi))>;
def combine_or_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_OR>;
def combine_and_s64_with_s32_mask : combine_binop_s64_with_s32_mask<G_AND>;
def binop_s64_with_s32_mask_combines : GICombineGroup<[
combine_or_s64_with_s32_mask, combine_and_s64_with_s32_mask
]>;
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
// saves one instruction compared to the promotion.
@ -180,7 +199,8 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
def AMDGPUPreLegalizerCombiner: GICombiner<
"AMDGPUPreLegalizerCombinerImpl",
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
foldable_fneg, combine_shuffle_vector_to_build_vector]> {
foldable_fneg, combine_shuffle_vector_to_build_vector,
binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
@ -188,7 +208,8 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
binop_s64_with_s32_mask_combines]> {
let CombineAllMethodName = "tryCombineAllImpl";
}

View File

@ -516,3 +516,18 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
return true;
}
bool AMDGPUCombinerHelper::matchConstantIs32BitMask(Register Reg) const {
auto Res = getIConstantVRegValWithLookThrough(Reg, MRI);
if (!Res)
return false;
const uint64_t Val = Res->Value.getZExtValue();
unsigned MaskIdx = 0;
unsigned MaskLen = 0;
if (!isShiftedMask_64(Val, MaskIdx, MaskLen))
return false;
// Check if low 32 bits or high 32 bits are all ones.
return MaskLen >= 32 && ((MaskIdx == 0) || (MaskIdx == 64 - MaskLen));
}

View File

@ -43,6 +43,8 @@ public:
bool matchCombineFmulWithSelectToFldexp(
MachineInstr &MI, MachineInstr &Sel,
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
bool matchConstantIs32BitMask(Register Reg) const;
};
} // namespace llvm

View File

@ -0,0 +1,321 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
---
name: test_and_mask_hi_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_hi_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
%2:_(s64) = G_AND %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_hi_lhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_hi_lhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
%2:_(s64) = G_AND %1, %0
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_hi_48bit_mask_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_hi_48bit_mask_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -65536
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND]](s32), [[UV1]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -65536
%2:_(s64) = G_AND %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_hi_16bit_mask_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_hi_16bit_mask_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -281474976710656
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[AND]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -281474976710656
%2:_(s64) = G_AND %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_lo_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_lo_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 4294967295
%2:_(s64) = G_AND %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_lo_lhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_lo_lhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[ZEXT]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 4294967295
%2:_(s64) = G_AND %1, %0
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_lo_36bit_mask_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_lo_36bit_mask_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[AND]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 68719476735
%2:_(s64) = G_AND %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_and_mask_hi_with_merge_unmerge
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-LABEL: name: test_and_mask_hi_with_merge_unmerge
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%3:_(s64) = G_CONSTANT i64 -4294967296
%4:_(s64) = G_AND %2, %3
%5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
$sgpr0 = COPY %5(s32)
$sgpr1 = COPY %6(s32)
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
...
---
name: negative_and_test_incorrect_types
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
; CHECK-LABEL: name: negative_and_test_incorrect_types
; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s128) = G_AND [[COPY]], [[C]]
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[AND]](s128)
%0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s64) = COPY $vgpr4_vgpr5
%2:_(s128) = G_CONSTANT i128 -4294967296
%3:_(s128) = G_AND %0, %2
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
...
---
name: test_or_mask_hi_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_or_mask_hi_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
%2:_(s64) = G_OR %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_or_mask_hi_lhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_or_mask_hi_lhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s32), [[C]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
%2:_(s64) = G_OR %1, %0
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_or_mask_lo_rhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_or_mask_lo_rhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 4294967295
%2:_(s64) = G_OR %0, %1
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_or_mask_lo_lhs
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: test_or_mask_lo_lhs
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 4294967295
%2:_(s64) = G_OR %1, %0
$sgpr0_sgpr1 = COPY %2(s64)
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
...
---
name: test_or_mask_hi_with_merge_unmerge
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-LABEL: name: test_or_mask_hi_with_merge_unmerge
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK-NEXT: $sgpr0 = COPY [[COPY]](s32)
; CHECK-NEXT: $sgpr1 = COPY [[C]](s32)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
%3:_(s64) = G_CONSTANT i64 -4294967296
%4:_(s64) = G_OR %2, %3
%5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
$sgpr0 = COPY %5(s32)
$sgpr1 = COPY %6(s32)
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
...
---
name: negative_or_test_incorrect_types
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
; CHECK-LABEL: name: negative_or_test_incorrect_types
; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[COPY]], [[C]]
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
%0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s64) = COPY $vgpr4_vgpr5
%2:_(s128) = G_CONSTANT i128 -4294967296
%3:_(s128) = G_OR %0, %2
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
...

View File

@ -227,54 +227,52 @@ exit:
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
; GFX10-LABEL: single_lane_execution_attribute:
; GFX10: ; %bb.0: ; %.entry
; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: s_getpc_b64 s[4:5]
; GFX10-NEXT: s_mov_b32 s7, -1
; GFX10-NEXT: s_mov_b32 s2, s1
; GFX10-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_getpc_b64 s[12:13]
; GFX10-NEXT: s_mov_b32 s12, 0
; GFX10-NEXT: s_mov_b32 s2, s0
; GFX10-NEXT: s_mov_b32 s3, s12
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1]
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0
; GFX10-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo
; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
; GFX10-NEXT: s_and_b32 vcc_lo, s2, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
; GFX10-NEXT: s_mov_b32 s3, 0
; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: .LBB4_2: ; %.preheader
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: v_mov_b32_e32 v3, s1
; GFX10-NEXT: v_mov_b32_e32 v3, s12
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: s_add_i32 s1, s1, 4
; GFX10-NEXT: s_add_i32 s12, s12, 4
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_readfirstlane_b32 s12, v3
; GFX10-NEXT: s_add_i32 s3, s12, s3
; GFX10-NEXT: v_readfirstlane_b32 s3, v3
; GFX10-NEXT: s_add_i32 s2, s3, s2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s2, v2
; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
; GFX10-NEXT: s_branch .LBB4_6
; GFX10-NEXT: .LBB4_4:
; GFX10-NEXT: s_mov_b32 s1, exec_lo
; GFX10-NEXT: s_mov_b32 s2, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr1
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
; GFX10-NEXT: s_cbranch_vccz .LBB4_6
; GFX10-NEXT: ; %bb.5: ; %.19
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
; GFX10-NEXT: v_or_b32_e32 v1, 2, v1
; GFX10-NEXT: .LBB4_6: ; %.22
; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2
; GFX10-NEXT: v_add_lshl_u32 v0, v0, s1, 2
; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen
; GFX10-NEXT: s_endpgm
.entry:

View File

@ -14,9 +14,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
; CHECK-NEXT: %k:_(s64) = G_CONSTANT i64 4294967295
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[LOAD]], %k
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[AND]](s64)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LOAD]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = G_LOAD %0 :: (load (s64), align 8, addrspace 1)
%k:_(s64) = G_CONSTANT i64 4294967295

View File

@ -194,10 +194,8 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_sdiv_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
; CHECK-NEXT: s_mov_b32 s7, -1
; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
@ -218,7 +216,6 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s11
; CHECK-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@ -327,9 +324,10 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; CHECK-NEXT: v_xor_b32_e32 v0, s6, v0
; CHECK-NEXT: s_xor_b64 s[0:1], s[6:7], s[8:9]
; CHECK-NEXT: v_xor_b32_e32 v0, s0, v0
; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1

View File

@ -188,12 +188,10 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_srem_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
; CHECK-NEXT: s_mov_b32 s7, -1
; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
; CHECK-NEXT: s_mov_b32 s7, 1
; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_ashr_i32 s6, s3, 31
@ -212,7 +210,6 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s9
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@ -273,43 +270,43 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc
; CHECK-NEXT: v_mul_lo_u32 v2, s11, v0
; CHECK-NEXT: v_mul_lo_u32 v3, s10, v1
; CHECK-NEXT: v_mul_hi_u32 v5, s10, v0
; CHECK-NEXT: v_mul_hi_u32 v4, s10, v0
; CHECK-NEXT: v_mul_hi_u32 v0, s11, v0
; CHECK-NEXT: v_mul_hi_u32 v6, s11, v1
; CHECK-NEXT: v_mul_hi_u32 v5, s11, v1
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v5
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; CHECK-NEXT: v_mul_lo_u32 v5, s11, v1
; CHECK-NEXT: v_mul_lo_u32 v4, s11, v1
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
; CHECK-NEXT: v_mul_hi_u32 v3, s10, v1
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v4, v0
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v3, vcc, v5, v3
; CHECK-NEXT: v_add_i32_e32 v5, vcc, v0, v2
; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s8, v5, 0
; CHECK-NEXT: v_add_i32_e32 v3, vcc, v4, v3
; CHECK-NEXT: v_add_i32_e32 v4, vcc, v0, v2
; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s8, v4, 0
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v6, v2
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v5, v2
; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s8, v2, v[1:2]
; CHECK-NEXT: v_mov_b32_e32 v3, s11
; CHECK-NEXT: v_mov_b32_e32 v5, s11
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, s10, v0
; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s9, v5, v[1:2]
; CHECK-NEXT: v_mov_b32_e32 v4, s9
; CHECK-NEXT: v_subb_u32_e64 v2, s[0:1], v3, v1, vcc
; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s9, v4, v[1:2]
; CHECK-NEXT: v_mov_b32_e32 v3, s9
; CHECK-NEXT: v_subb_u32_e64 v2, s[0:1], v5, v1, vcc
; CHECK-NEXT: v_sub_i32_e64 v1, s[0:1], s11, v1
; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; CHECK-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v2
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s8, v0
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; CHECK-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v0
; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; CHECK-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v2
; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
; CHECK-NEXT: v_cndmask_b32_e64 v2, v3, v5, s[0:1]
; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s8, v0
; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CHECK-NEXT: v_cmp_le_u32_e32 vcc, s9, v1
; CHECK-NEXT: v_cndmask_b32_e64 v2, v4, v5, s[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
; CHECK-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
@ -322,11 +319,12 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; CHECK-NEXT: v_xor_b32_e32 v0, s6, v0
; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
; CHECK-NEXT: s_xor_b32 s0, s7, 1
; CHECK-NEXT: s_xor_b32 s0, s0, 1
; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:

View File

@ -187,11 +187,9 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_udiv_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b64 s[4:5], s[0:1], s[2:3]
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_mov_b32 s7, -1
; CHECK-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[4:5], 0
; CHECK-NEXT: s_mov_b32 s6, 1
; CHECK-NEXT: s_mov_b32 s4, 1
; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s2
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
@ -199,7 +197,6 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cvt_f32_u32_e32 v1, s3
; CHECK-NEXT: s_sub_u32 s4, 0, s2
; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: v_madmk_f32 v1, v1, 0x4f800000, v2
; CHECK-NEXT: s_subb_u32 s5, 0, s3
; CHECK-NEXT: v_rcp_iflag_f32_e32 v1, v1
@ -318,11 +315,12 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v9, v5, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
; CHECK-NEXT: s_xor_b32 s1, s6, 1
; CHECK-NEXT: s_xor_b32 s1, s4, 1
; CHECK-NEXT: s_cmp_lg_u32 s1, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:

View File

@ -184,18 +184,15 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_urem_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b64 s[4:5], s[0:1], s[2:3]
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_mov_b32 s7, -1
; CHECK-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[4:5], 0
; CHECK-NEXT: s_mov_b32 s6, 1
; CHECK-NEXT: s_mov_b32 s4, 1
; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s2
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_mov_b32_e32 v0, s3
; CHECK-NEXT: v_cvt_f32_u32_e32 v1, s3
; CHECK-NEXT: s_sub_u32 s4, 0, s2
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: v_madmk_f32 v1, v1, 0x4f800000, v2
; CHECK-NEXT: s_subb_u32 s5, 0, s3
@ -314,11 +311,12 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v3, v6, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
; CHECK-NEXT: s_xor_b32 s1, s6, 1
; CHECK-NEXT: s_xor_b32 s1, s4, 1
; CHECK-NEXT: s_cmp_lg_u32 s1, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:

View File

@ -223,9 +223,10 @@ define i128 @fptosi_f64_to_i128(double %x) {
; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
; GISEL-NEXT: v_mov_b32_e32 v1, 0
; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
; GISEL-NEXT: v_mov_b32_e32 v2, 0xfffff
; GISEL-NEXT: s_mov_b32 s6, 0x100000
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
; GISEL-NEXT: v_and_or_b32 v5, v5, v2, s6
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
@ -587,9 +588,10 @@ define i128 @fptoui_f64_to_i128(double %x) {
; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1
; GISEL-NEXT: v_mov_b32_e32 v0, 0x433
; GISEL-NEXT: v_mov_b32_e32 v1, 0
; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5
; GISEL-NEXT: v_mov_b32_e32 v2, 0xfffff
; GISEL-NEXT: s_mov_b32 s6, 0x100000
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1]
; GISEL-NEXT: v_or_b32_e32 v5, 0x100000, v2
; GISEL-NEXT: v_and_or_b32 v5, v5, v2, s6
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]

View File

@ -797,12 +797,11 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: .LBB2_13: ; %Flow4
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
; GISEL-NEXT: v_and_b32_e32 v1, 0x80000000, v6
; GISEL-NEXT: v_mov_b32_e32 v2, 0x3ff00000
; GISEL-NEXT: v_mov_b32_e32 v3, 0xfffff
; GISEL-NEXT: v_and_b32_e32 v1, 0x80000000, v6
; GISEL-NEXT: v_lshl_add_u32 v2, v8, 20, v2
; GISEL-NEXT: v_and_or_b32 v1, v10, v3, v1
; GISEL-NEXT: v_or3_b32 v1, v1, v2, 0
; GISEL-NEXT: v_and_b32_e32 v3, 0xfffff, v10
; GISEL-NEXT: v_or3_b32 v1, v3, v1, v2
; GISEL-NEXT: .LBB2_14: ; %Flow5
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: s_setpc_b64 s[30:31]
@ -1081,8 +1080,8 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
; GISEL-NEXT: v_mov_b32_e32 v0, 0x3ff00000
; GISEL-NEXT: v_lshl_add_u32 v0, v7, 20, v0
; GISEL-NEXT: v_and_b32_e32 v1, 0xfffff, v9
; GISEL-NEXT: v_or3_b32 v5, v1, v0, 0
; GISEL-NEXT: v_mov_b32_e32 v1, 0xfffff
; GISEL-NEXT: v_and_or_b32 v5, v9, v1, v0
; GISEL-NEXT: .LBB3_14: ; %Flow5
; GISEL-NEXT: s_or_b64 exec, exec, s[6:7]
; GISEL-NEXT: v_mov_b32_e32 v0, v4