[CodeGen] Use range-based for loops (NFC) (#138488)

This is a reland of #138434 except that:

- the bits for llvm/lib/CodeGen/RenameIndependentSubregs.cpp
  have been dropped because they caused a test failure under asan, and

- the bits for llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp have
  been improved with structured bindings.
This commit is contained in:
Kazu Hirata 2025-05-05 10:08:49 -07:00 committed by GitHub
parent f81193ddfd
commit cdc9a4b5f8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 18 additions and 25 deletions

View File

@ -1114,8 +1114,8 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
MaskTy = LLT::scalar(PtrTy.getSizeInBits()); MaskTy = LLT::scalar(PtrTy.getSizeInBits());
else { else {
// Ensure that the type will fit the mask value. // Ensure that the type will fit the mask value.
for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { for (const SwitchCG::BitTestCase &Case : B.Cases) {
if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { if (!isUIntN(SwitchOpTy.getSizeInBits(), Case.Mask)) {
// Switch table case range are encoded into series of masks. // Switch table case range are encoded into series of masks.
// Just use pointer type, it's guaranteed to fit. // Just use pointer type, it's guaranteed to fit.
MaskTy = LLT::scalar(PtrTy.getSizeInBits()); MaskTy = LLT::scalar(PtrTy.getSizeInBits());

View File

@ -5498,9 +5498,8 @@ LegalizerHelper::fewerElementsBitcast(MachineInstr &MI, unsigned int TypeIdx,
// Build new smaller bitcast instructions // Build new smaller bitcast instructions
// Not supporting Leftover types for now but will have to // Not supporting Leftover types for now but will have to
for (unsigned i = 0; i < SrcVRegs.size(); i++) for (Register Reg : SrcVRegs)
BitcastVRegs.push_back( BitcastVRegs.push_back(MIRBuilder.buildBitcast(NarrowTy, Reg).getReg(0));
MIRBuilder.buildBitcast(NarrowTy, SrcVRegs[i]).getReg(0));
MIRBuilder.buildMergeLikeInstr(DstReg, BitcastVRegs); MIRBuilder.buildMergeLikeInstr(DstReg, BitcastVRegs);
MI.eraseFromParent(); MI.eraseFromParent();
@ -7379,9 +7378,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerTRUNC(MachineInstr &MI) {
InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2); InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
else else
InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits()); InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits());
for (unsigned I = 0; I < SplitSrcs.size(); ++I) { for (Register &Src : SplitSrcs)
SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0); Src = MIRBuilder.buildTrunc(InterTy, Src).getReg(0);
}
// Combine the new truncates into one vector // Combine the new truncates into one vector
auto Merge = MIRBuilder.buildMergeLikeInstr( auto Merge = MIRBuilder.buildMergeLikeInstr(

View File

@ -2588,8 +2588,7 @@ void InstrRefBasedLDV::placeMLocPHIs(
auto CollectPHIsForLoc = [&](LocIdx L) { auto CollectPHIsForLoc = [&](LocIdx L) {
// Collect the set of defs. // Collect the set of defs.
SmallPtrSet<MachineBasicBlock *, 32> DefBlocks; SmallPtrSet<MachineBasicBlock *, 32> DefBlocks;
for (unsigned int I = 0; I < OrderToBB.size(); ++I) { for (MachineBasicBlock *MBB : OrderToBB) {
MachineBasicBlock *MBB = OrderToBB[I];
const auto &TransferFunc = MLocTransfer[MBB->getNumber()]; const auto &TransferFunc = MLocTransfer[MBB->getNumber()];
if (TransferFunc.contains(L)) if (TransferFunc.contains(L))
DefBlocks.insert(MBB); DefBlocks.insert(MBB);
@ -3800,8 +3799,7 @@ bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF,
// To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise
// the order is unimportant, it just has to be stable. // the order is unimportant, it just has to be stable.
unsigned VarAssignCount = 0; unsigned VarAssignCount = 0;
for (unsigned int I = 0; I < OrderToBB.size(); ++I) { for (MachineBasicBlock *MBB : OrderToBB) {
auto *MBB = OrderToBB[I];
auto *VTracker = &vlocs[MBB->getNumber()]; auto *VTracker = &vlocs[MBB->getNumber()];
// Collect each variable with a DBG_VALUE in this block. // Collect each variable with a DBG_VALUE in this block.
for (auto &idx : VTracker->Vars) { for (auto &idx : VTracker->Vars) {

View File

@ -325,9 +325,8 @@ bool MachineCSEImpl::hasLivePhysRegDefUses(const MachineInstr *MI,
} }
// Finally, add all defs to PhysRefs as well. // Finally, add all defs to PhysRefs as well.
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) for (const auto &Def : PhysDefs)
for (MCRegAliasIterator AI(PhysDefs[i].second, TRI, true); AI.isValid(); for (MCRegAliasIterator AI(Def.second, TRI, true); AI.isValid(); ++AI)
++AI)
PhysRefs.insert(*AI); PhysRefs.insert(*AI);
return !PhysRefs.empty(); return !PhysRefs.empty();
@ -348,9 +347,8 @@ bool MachineCSEImpl::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB) if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
return false; return false;
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) { for (const auto &PhysDef : PhysDefs) {
if (MRI->isAllocatable(PhysDefs[i].second) || if (MRI->isAllocatable(PhysDef.second) || MRI->isReserved(PhysDef.second))
MRI->isReserved(PhysDefs[i].second))
// Avoid extending live range of physical registers if they are // Avoid extending live range of physical registers if they are
//allocatable or reserved. //allocatable or reserved.
return false; return false;

View File

@ -354,8 +354,8 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
DelDeps.push_back(std::make_pair(SuccSU, D)); DelDeps.push_back(std::make_pair(SuccSU, D));
} }
} }
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) for (const auto &[Del, Dep] : DelDeps)
RemovePred(DelDeps[i].first, DelDeps[i].second); RemovePred(Del, Dep);
++NumDups; ++NumDups;
return NewSU; return NewSU;
@ -389,9 +389,8 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
DelDeps.push_back(std::make_pair(SuccSU, Succ)); DelDeps.push_back(std::make_pair(SuccSU, Succ));
} }
} }
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { for (const auto &[Del, Dep] : DelDeps)
RemovePred(DelDeps[i].first, DelDeps[i].second); RemovePred(Del, Dep);
}
SDep FromDep(SU, SDep::Data, Reg); SDep FromDep(SU, SDep::Data, Reg);
FromDep.setLatency(SU->Latency); FromDep.setLatency(SU->Latency);
AddPred(CopyFromSU, FromDep); AddPred(CopyFromSU, FromDep);

View File

@ -3161,8 +3161,8 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
if (!TLI.isTypeLegal(VT)) { if (!TLI.isTypeLegal(VT)) {
UsePtrType = true; UsePtrType = true;
} else { } else {
for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) for (const BitTestCase &Case : B.Cases)
if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { if (!isUIntN(VT.getSizeInBits(), Case.Mask)) {
// Switch table case range are encoded into series of masks. // Switch table case range are encoded into series of masks.
// Just use pointer type, it's guaranteed to fit. // Just use pointer type, it's guaranteed to fit.
UsePtrType = true; UsePtrType = true;