Ruiling, Song 0487db1f13
MachineScheduler: Improve instruction clustering (#137784)
The existing way of managing clustered nodes was done through adding
weak edges between the neighbouring cluster nodes, which is a sort of
ordered queue. And this will be later recorded as `NextClusterPred` or
`NextClusterSucc` in `ScheduleDAGMI`.

But actually the instruction may be picked not in the exact order of the
queue. For example, we have a queue of cluster nodes A B C. But during
scheduling, node B might be picked first, then it will be very likely
that we only cluster B and C for Top-Down scheduling (leaving A alone).

Another issue is:
```
   if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
      std::swap(SUa, SUb);
   if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
```
may break the cluster queue.

For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3
2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2),
As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be
pred of 3. This makes both 1 and 2 become preds of 3, but there is no
edge between 1 and 2. Thus we get a broken cluster chain.

To fix both issues, we introduce an unordered set in the change. This
could help improve clustering in some hard case.

One key reason the change causes so many test check changes is: As the
cluster candidates are not ordered now, the candidates might be picked
in different order from before.

The most affected targets are: AMDGPU, AArch64, RISCV.

For RISCV, it seems to me most are just minor instruction reorder, don't
see obvious regression.

For AArch64, there were some combining of ldr into ldp being affected.
With two cases being regressed and two being improved. This has more
deeper reason that machine scheduler cannot cluster them well both
before and after the change, and the load combine algorithm later is
also not smart enough.

For AMDGPU, some cases have more v_dual instructions used while some are
regressed. It seems less critical. Seems like test `v_vselect_v32bf16`
gets more buffer_load being claused.
2025-06-05 15:28:04 +08:00

261 lines
6.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefix=RV64I
define i8 @ucmp.8.8(i8 zeroext %x, i8 zeroext %y) nounwind {
; RV32I-LABEL: ucmp.8.8:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.8.8:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
}
define i8 @ucmp.8.16(i16 zeroext %x, i16 zeroext %y) nounwind {
; RV32I-LABEL: ucmp.8.16:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.8.16:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
}
define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind {
; RV32I-LABEL: ucmp.8.32:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.8.32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
}
define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind {
; RV32I-LABEL: ucmp.8.64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a1, a3, .LBB3_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a4, a1, a3
; RV32I-NEXT: sltu a0, a3, a1
; RV32I-NEXT: sub a0, a0, a4
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB3_2:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: sub a0, a0, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.8.64:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
ret i8 %1
}
define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind {
; RV32I-LABEL: ucmp.8.128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 4(a1)
; RV32I-NEXT: lw a4, 8(a1)
; RV32I-NEXT: lw a5, 12(a1)
; RV32I-NEXT: lw a3, 4(a0)
; RV32I-NEXT: lw a6, 12(a0)
; RV32I-NEXT: lw a7, 8(a0)
; RV32I-NEXT: beq a6, a5, .LBB4_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu t2, a6, a5
; RV32I-NEXT: j .LBB4_3
; RV32I-NEXT: .LBB4_2:
; RV32I-NEXT: sltu t2, a7, a4
; RV32I-NEXT: .LBB4_3:
; RV32I-NEXT: lw a1, 0(a1)
; RV32I-NEXT: lw t0, 0(a0)
; RV32I-NEXT: beq a3, a2, .LBB4_5
; RV32I-NEXT: # %bb.4:
; RV32I-NEXT: sltu a0, a3, a2
; RV32I-NEXT: j .LBB4_6
; RV32I-NEXT: .LBB4_5:
; RV32I-NEXT: sltu a0, t0, a1
; RV32I-NEXT: .LBB4_6:
; RV32I-NEXT: xor t1, a6, a5
; RV32I-NEXT: xor t3, a7, a4
; RV32I-NEXT: or t1, t3, t1
; RV32I-NEXT: beqz t1, .LBB4_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv a0, t2
; RV32I-NEXT: .LBB4_8:
; RV32I-NEXT: beq a6, a5, .LBB4_11
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu a4, a5, a6
; RV32I-NEXT: bne a3, a2, .LBB4_12
; RV32I-NEXT: .LBB4_10:
; RV32I-NEXT: sltu a1, a1, t0
; RV32I-NEXT: bnez t1, .LBB4_13
; RV32I-NEXT: j .LBB4_14
; RV32I-NEXT: .LBB4_11:
; RV32I-NEXT: sltu a4, a4, a7
; RV32I-NEXT: beq a3, a2, .LBB4_10
; RV32I-NEXT: .LBB4_12:
; RV32I-NEXT: sltu a1, a2, a3
; RV32I-NEXT: beqz t1, .LBB4_14
; RV32I-NEXT: .LBB4_13:
; RV32I-NEXT: mv a1, a4
; RV32I-NEXT: .LBB4_14:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.8.128:
; RV64I: # %bb.0:
; RV64I-NEXT: beq a1, a3, .LBB4_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sltu a4, a1, a3
; RV64I-NEXT: sltu a0, a3, a1
; RV64I-NEXT: sub a0, a0, a4
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sltu a0, a2, a0
; RV64I-NEXT: sub a0, a0, a4
; RV64I-NEXT: ret
%1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
ret i8 %1
}
define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind {
; RV32I-LABEL: ucmp.32.32:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.32.32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @ucmp.32.32_sext(i32 signext %x, i32 signext %y) nounwind {
; RV32I-LABEL: ucmp.32.32_sext:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.32.32_sext:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @ucmp.32.32_zext(i32 zeroext %x, i32 zeroext %y) nounwind {
; RV32I-LABEL: ucmp.32.32_zext:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a2, a0, a1
; RV32I-NEXT: sltu a0, a1, a0
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.32.32_zext:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind {
; RV32I-LABEL: ucmp.32.64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a1, a3, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a4, a1, a3
; RV32I-NEXT: sltu a0, a3, a1
; RV32I-NEXT: sub a0, a0, a4
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: sub a0, a0, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.32.64:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
ret i32 %1
}
define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind {
; RV32I-LABEL: ucmp.64.64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a1, a3, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a4, a1, a3
; RV32I-NEXT: sltu a0, a3, a1
; RV32I-NEXT: j .LBB9_3
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: .LBB9_3:
; RV32I-NEXT: sub a0, a0, a4
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: ret
;
; RV64I-LABEL: ucmp.64.64:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a0, a1
; RV64I-NEXT: sltu a0, a1, a0
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
%1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
ret i64 %1
}