[CodeGen] Use range-based for loops (NFC) (#138488)
This is a reland of #138434 except that: - the bits for llvm/lib/CodeGen/RenameIndependentSubregs.cpp have been dropped because they caused a test failure under asan, and - the bits for llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp have been improved with structured bindings.
This commit is contained in:
parent
f81193ddfd
commit
cdc9a4b5f8
@ -1114,8 +1114,8 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
|
||||
MaskTy = LLT::scalar(PtrTy.getSizeInBits());
|
||||
else {
|
||||
// Ensure that the type will fit the mask value.
|
||||
for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
|
||||
if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
|
||||
for (const SwitchCG::BitTestCase &Case : B.Cases) {
|
||||
if (!isUIntN(SwitchOpTy.getSizeInBits(), Case.Mask)) {
|
||||
// Switch table case range are encoded into series of masks.
|
||||
// Just use pointer type, it's guaranteed to fit.
|
||||
MaskTy = LLT::scalar(PtrTy.getSizeInBits());
|
||||
|
@ -5498,9 +5498,8 @@ LegalizerHelper::fewerElementsBitcast(MachineInstr &MI, unsigned int TypeIdx,
|
||||
|
||||
// Build new smaller bitcast instructions
|
||||
// Not supporting Leftover types for now but will have to
|
||||
for (unsigned i = 0; i < SrcVRegs.size(); i++)
|
||||
BitcastVRegs.push_back(
|
||||
MIRBuilder.buildBitcast(NarrowTy, SrcVRegs[i]).getReg(0));
|
||||
for (Register Reg : SrcVRegs)
|
||||
BitcastVRegs.push_back(MIRBuilder.buildBitcast(NarrowTy, Reg).getReg(0));
|
||||
|
||||
MIRBuilder.buildMergeLikeInstr(DstReg, BitcastVRegs);
|
||||
MI.eraseFromParent();
|
||||
@ -7379,9 +7378,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerTRUNC(MachineInstr &MI) {
|
||||
InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
|
||||
else
|
||||
InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits());
|
||||
for (unsigned I = 0; I < SplitSrcs.size(); ++I) {
|
||||
SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0);
|
||||
}
|
||||
for (Register &Src : SplitSrcs)
|
||||
Src = MIRBuilder.buildTrunc(InterTy, Src).getReg(0);
|
||||
|
||||
// Combine the new truncates into one vector
|
||||
auto Merge = MIRBuilder.buildMergeLikeInstr(
|
||||
|
@ -2588,8 +2588,7 @@ void InstrRefBasedLDV::placeMLocPHIs(
|
||||
auto CollectPHIsForLoc = [&](LocIdx L) {
|
||||
// Collect the set of defs.
|
||||
SmallPtrSet<MachineBasicBlock *, 32> DefBlocks;
|
||||
for (unsigned int I = 0; I < OrderToBB.size(); ++I) {
|
||||
MachineBasicBlock *MBB = OrderToBB[I];
|
||||
for (MachineBasicBlock *MBB : OrderToBB) {
|
||||
const auto &TransferFunc = MLocTransfer[MBB->getNumber()];
|
||||
if (TransferFunc.contains(L))
|
||||
DefBlocks.insert(MBB);
|
||||
@ -3800,8 +3799,7 @@ bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF,
|
||||
// To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise
|
||||
// the order is unimportant, it just has to be stable.
|
||||
unsigned VarAssignCount = 0;
|
||||
for (unsigned int I = 0; I < OrderToBB.size(); ++I) {
|
||||
auto *MBB = OrderToBB[I];
|
||||
for (MachineBasicBlock *MBB : OrderToBB) {
|
||||
auto *VTracker = &vlocs[MBB->getNumber()];
|
||||
// Collect each variable with a DBG_VALUE in this block.
|
||||
for (auto &idx : VTracker->Vars) {
|
||||
|
@ -325,9 +325,8 @@ bool MachineCSEImpl::hasLivePhysRegDefUses(const MachineInstr *MI,
|
||||
}
|
||||
|
||||
// Finally, add all defs to PhysRefs as well.
|
||||
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
|
||||
for (MCRegAliasIterator AI(PhysDefs[i].second, TRI, true); AI.isValid();
|
||||
++AI)
|
||||
for (const auto &Def : PhysDefs)
|
||||
for (MCRegAliasIterator AI(Def.second, TRI, true); AI.isValid(); ++AI)
|
||||
PhysRefs.insert(*AI);
|
||||
|
||||
return !PhysRefs.empty();
|
||||
@ -348,9 +347,8 @@ bool MachineCSEImpl::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
|
||||
if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
|
||||
return false;
|
||||
|
||||
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
|
||||
if (MRI->isAllocatable(PhysDefs[i].second) ||
|
||||
MRI->isReserved(PhysDefs[i].second))
|
||||
for (const auto &PhysDef : PhysDefs) {
|
||||
if (MRI->isAllocatable(PhysDef.second) || MRI->isReserved(PhysDef.second))
|
||||
// Avoid extending live range of physical registers if they are
|
||||
//allocatable or reserved.
|
||||
return false;
|
||||
|
@ -354,8 +354,8 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
|
||||
DelDeps.push_back(std::make_pair(SuccSU, D));
|
||||
}
|
||||
}
|
||||
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
|
||||
RemovePred(DelDeps[i].first, DelDeps[i].second);
|
||||
for (const auto &[Del, Dep] : DelDeps)
|
||||
RemovePred(Del, Dep);
|
||||
|
||||
++NumDups;
|
||||
return NewSU;
|
||||
@ -389,9 +389,8 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
|
||||
DelDeps.push_back(std::make_pair(SuccSU, Succ));
|
||||
}
|
||||
}
|
||||
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
|
||||
RemovePred(DelDeps[i].first, DelDeps[i].second);
|
||||
}
|
||||
for (const auto &[Del, Dep] : DelDeps)
|
||||
RemovePred(Del, Dep);
|
||||
SDep FromDep(SU, SDep::Data, Reg);
|
||||
FromDep.setLatency(SU->Latency);
|
||||
AddPred(CopyFromSU, FromDep);
|
||||
|
@ -3161,8 +3161,8 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
|
||||
if (!TLI.isTypeLegal(VT)) {
|
||||
UsePtrType = true;
|
||||
} else {
|
||||
for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
|
||||
if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
|
||||
for (const BitTestCase &Case : B.Cases)
|
||||
if (!isUIntN(VT.getSizeInBits(), Case.Mask)) {
|
||||
// Switch table case range are encoded into series of masks.
|
||||
// Just use pointer type, it's guaranteed to fit.
|
||||
UsePtrType = true;
|
||||
|
Loading…
x
Reference in New Issue
Block a user