From 7896c9f436a4eda5ec15e882a7505ba482a2fcd0 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Thu, 3 Dec 2009 00:50:42 +0000 Subject: [PATCH] improve portability to avoid conflicting with std::next in c++'0x. Patch by Howard Hinnant! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@90365 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/DAGISelHeader.h | 2 +- .../llvm/CodeGen/LinkAllCodegenComponents.h | 1 + include/llvm/CodeGen/SelectionDAGNodes.h | 2 +- include/llvm/LinkAllVMCore.h | 1 + lib/Analysis/ScalarEvolutionExpander.cpp | 4 +-- lib/CodeGen/BranchFolding.cpp | 8 ++--- lib/CodeGen/CodePlacementOpt.cpp | 9 +++--- lib/CodeGen/LowerSubregs.cpp | 2 +- lib/CodeGen/MachineBasicBlock.cpp | 7 ++-- lib/CodeGen/MachineFunction.cpp | 4 +-- lib/CodeGen/MachineLoopInfo.cpp | 6 ++-- lib/CodeGen/PHIElimination.cpp | 6 ++-- lib/CodeGen/PreAllocSplitting.cpp | 4 +-- lib/CodeGen/PrologEpilogInserter.cpp | 2 +- lib/CodeGen/RegisterScavenging.cpp | 2 +- lib/CodeGen/SelectionDAG/FastISel.cpp | 2 +- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 2 +- .../SelectionDAG/LegalizeVectorOps.cpp | 2 +- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 2 +- lib/CodeGen/SimpleRegisterCoalescing.cpp | 32 +++++++++++-------- lib/CodeGen/Spiller.cpp | 4 +-- lib/CodeGen/StackSlotColoring.cpp | 2 +- lib/CodeGen/TargetInstrInfoImpl.cpp | 2 +- lib/CodeGen/TwoAddressInstructionPass.cpp | 8 ++--- lib/CodeGen/VirtRegRewriter.cpp | 28 ++++++++-------- lib/Target/ARM/ARMConstantIslandPass.cpp | 22 ++++++------- lib/Target/ARM/ARMExpandPseudoInsts.cpp | 2 +- lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 6 ++-- lib/Target/ARM/NEONMoveFix.cpp | 2 +- lib/Target/ARM/NEONPreAllocPass.cpp | 2 +- lib/Target/ARM/Thumb1RegisterInfo.cpp | 2 +- lib/Target/ARM/Thumb2SizeReduction.cpp | 2 +- lib/Target/CBackend/CBackend.cpp | 4 +-- lib/Target/MSP430/MSP430InstrInfo.cpp | 4 +-- lib/Target/MSP430/MSP430RegisterInfo.cpp | 6 ++-- lib/Target/SystemZ/SystemZInstrInfo.cpp | 4 +-- lib/Target/SystemZ/SystemZRegisterInfo.cpp | 2 +- lib/Target/TargetData.cpp | 1 + lib/Target/X86/X86COFFMachineModuleInfo.h | 1 + lib/Target/X86/X86FloatingPoint.cpp | 2 +- lib/Target/X86/X86InstrInfo.cpp | 4 +-- lib/Target/X86/X86RegisterInfo.cpp | 6 ++-- .../Scalar/SimplifyHalfPowrLibCalls.cpp | 2 +- lib/Transforms/Utils/LowerSwitch.cpp | 2 +- lib/VMCore/BasicBlock.cpp | 2 +- 45 files changed, 116 insertions(+), 106 deletions(-) diff --git a/include/llvm/CodeGen/DAGISelHeader.h b/include/llvm/CodeGen/DAGISelHeader.h index 624f18aba61..6a2b166c99a 100644 --- a/include/llvm/CodeGen/DAGISelHeader.h +++ b/include/llvm/CodeGen/DAGISelHeader.h @@ -93,7 +93,7 @@ void SelectRoot(SelectionDAG &DAG) { // a reference to the root node, preventing it from being deleted, // and tracking any changes of the root. HandleSDNode Dummy(CurDAG->getRoot()); - ISelPosition = next(SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode())); + ISelPosition = llvm::next(SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode())); // The AllNodes list is now topological-sorted. Visit the // nodes by starting at the end of the list (the root of the diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h index 4d2d0eec52d..5608c999e12 100644 --- a/include/llvm/CodeGen/LinkAllCodegenComponents.h +++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h @@ -19,6 +19,7 @@ #include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/CodeGen/GCs.h" #include "llvm/Target/TargetMachine.h" +#include namespace { struct ForceCodegenLinking { diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 79dbd73e97f..580986a1814 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1095,7 +1095,7 @@ public: /// hasOneUse - Return true if there is exactly one use of this node. /// bool hasOneUse() const { - return !use_empty() && next(use_begin()) == use_end(); + return !use_empty() && llvm::next(use_begin()) == use_end(); } /// use_size - Return the number of uses of this node. This method takes diff --git a/include/llvm/LinkAllVMCore.h b/include/llvm/LinkAllVMCore.h index 0ee18d57a04..2145bf88c37 100644 --- a/include/llvm/LinkAllVMCore.h +++ b/include/llvm/LinkAllVMCore.h @@ -35,6 +35,7 @@ #include "llvm/Support/Mangler.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/SlowOperationInformer.h" +#include namespace { struct ForceVMCoreLinking { diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index d674ee847f1..bcccb04a1e7 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -628,7 +628,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); BasicBlock::iterator NewInsertPt = - next(BasicBlock::iterator(cast(V))); + llvm::next(BasicBlock::iterator(cast(V))); while (isa(NewInsertPt)) ++NewInsertPt; V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, NewInsertPt); @@ -844,7 +844,7 @@ Value *SCEVExpander::expand(const SCEV *S) { if (L && S->hasComputableLoopEvolution(L)) InsertPt = L->getHeader()->getFirstNonPHI(); while (isInsertedInstruction(InsertPt)) - InsertPt = next(BasicBlock::iterator(InsertPt)); + InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); break; } diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 8a62eb20bbb..9fd95fde635 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -427,7 +427,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB, const TargetInstrInfo *TII) { MachineFunction *MF = CurMBB->getParent(); - MachineFunction::iterator I = next(MachineFunction::iterator(CurMBB)); + MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB)); MachineBasicBlock *TBB = 0, *FBB = 0; SmallVector Cond; if (I != MF->end() && @@ -805,7 +805,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { // a compile-time infinite loop repeatedly doing and undoing the same // transformations.) - for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) { if (I->pred_size() >= 2 && I->pred_size() < TailMergeThreshold) { SmallPtrSet UniquePreds; @@ -833,7 +833,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) { continue; // This is the QBB case described above if (!FBB) - FBB = next(MachineFunction::iterator(PBB)); + FBB = llvm::next(MachineFunction::iterator(PBB)); } // Failing case: the only way IBB can be reached from PBB is via // exception handling. Happens for landing pads. Would be nice @@ -1239,7 +1239,7 @@ ReoptimizeBlock: // B elsewhere // next: if (CurFallsThru) { - MachineBasicBlock *NextBB = next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); CurCond.clear(); TII->InsertBranch(*MBB, NextBB, 0, CurCond); } diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp index e9844d84c17..ff71f6b8067 100644 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ b/lib/CodeGen/CodePlacementOpt.cpp @@ -182,7 +182,7 @@ bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF, // Move it and all the blocks that can reach it via fallthrough edges // exclusively, to keep existing fallthrough edges intact. MachineFunction::iterator Begin = Pred; - MachineFunction::iterator End = next(Begin); + MachineFunction::iterator End = llvm::next(Begin); while (Begin != MF.begin()) { MachineFunction::iterator Prior = prior(Begin); if (Prior == MF.begin()) @@ -255,7 +255,8 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF, // to the top of the loop to avoid loosing that fallthrough. Otherwise append // them to the bottom, even if it previously had a fallthrough, on the theory // that it's worth an extra branch to keep the loop contiguous. - MachineFunction::iterator InsertPt = next(MachineFunction::iterator(BotMBB)); + MachineFunction::iterator InsertPt = + llvm::next(MachineFunction::iterator(BotMBB)); bool InsertAtTop = false; if (TopMBB != MF.begin() && !HasFallthrough(prior(MachineFunction::iterator(TopMBB))) && @@ -268,7 +269,7 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF, // with the loop header. SmallPtrSet ContiguousBlocks; for (MachineFunction::iterator I = TopMBB, - E = next(MachineFunction::iterator(BotMBB)); I != E; ++I) + E = llvm::next(MachineFunction::iterator(BotMBB)); I != E; ++I) ContiguousBlocks.insert(I); // Find non-contigous blocks and fix them. @@ -301,7 +302,7 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF, // Process this block and all loop blocks contiguous with it, to keep // them in their relative order. MachineFunction::iterator Begin = BB; - MachineFunction::iterator End = next(MachineFunction::iterator(BB)); + MachineFunction::iterator End = llvm::next(MachineFunction::iterator(BB)); for (; End != MF.end(); ++End) { if (!L->contains(End)) break; if (!HasAnalyzableTerminator(End)) break; diff --git a/lib/CodeGen/LowerSubregs.cpp b/lib/CodeGen/LowerSubregs.cpp index 30636a8edbb..80eb6cdcba6 100644 --- a/lib/CodeGen/LowerSubregs.cpp +++ b/lib/CodeGen/LowerSubregs.cpp @@ -312,7 +312,7 @@ bool LowerSubregsInstructionPass::runOnMachineFunction(MachineFunction &MF) { mbbi != mbbe; ++mbbi) { for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end(); mi != me;) { - MachineBasicBlock::iterator nmi = next(mi); + MachineBasicBlock::iterator nmi = llvm::next(mi); MachineInstr *MI = mi; if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) { MadeChange |= LowerExtract(MI); diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index e55e3694bcc..5ef25ca4eed 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -290,7 +290,7 @@ void MachineBasicBlock::updateTerminator() { } else { // The block has a fallthrough conditional branch. MachineBasicBlock *MBBA = *succ_begin(); - MachineBasicBlock *MBBB = *next(succ_begin()); + MachineBasicBlock *MBBB = *llvm::next(succ_begin()); if (MBBA == TBB) std::swap(MBBB, MBBA); if (isLayoutSuccessor(TBB)) { if (TII->ReverseBranchCondition(Cond)) { @@ -359,7 +359,7 @@ bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const { bool MachineBasicBlock::isLayoutSuccessor(const MachineBasicBlock *MBB) const { MachineFunction::const_iterator I(this); - return next(I) == MachineFunction::const_iterator(MBB); + return llvm::next(I) == MachineFunction::const_iterator(MBB); } bool MachineBasicBlock::canFallThrough() { @@ -461,7 +461,8 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA, bool MadeChange = false; bool AddedFallThrough = false; - MachineFunction::iterator FallThru = next(MachineFunction::iterator(this)); + MachineFunction::iterator FallThru = + llvm::next(MachineFunction::iterator(this)); // If this block ends with a conditional branch that falls through to its // successor, set DestB as the successor. diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index d20f4464e50..dd6fd7ea594 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -328,7 +328,7 @@ void MachineFunction::print(raw_ostream &OS) const { if (I->second) OS << " in reg%" << I->second; - if (next(I) != E) + if (llvm::next(I) != E) OS << ", "; } OS << '\n'; @@ -342,7 +342,7 @@ void MachineFunction::print(raw_ostream &OS) const { else OS << "%physreg" << *I; - if (next(I) != E) + if (llvm::next(I) != E) OS << " "; } OS << '\n'; diff --git a/lib/CodeGen/MachineLoopInfo.cpp b/lib/CodeGen/MachineLoopInfo.cpp index db77d192ccb..63f4f18e4d5 100644 --- a/lib/CodeGen/MachineLoopInfo.cpp +++ b/lib/CodeGen/MachineLoopInfo.cpp @@ -62,11 +62,11 @@ MachineBasicBlock *MachineLoop::getBottomBlock() { MachineBasicBlock *BotMBB = getHeader(); MachineFunction::iterator End = BotMBB->getParent()->end(); if (BotMBB != prior(End)) { - MachineBasicBlock *NextMBB = next(MachineFunction::iterator(BotMBB)); + MachineBasicBlock *NextMBB = llvm::next(MachineFunction::iterator(BotMBB)); while (contains(NextMBB)) { BotMBB = NextMBB; - if (BotMBB == next(MachineFunction::iterator(BotMBB))) break; - NextMBB = next(MachineFunction::iterator(BotMBB)); + if (BotMBB == llvm::next(MachineFunction::iterator(BotMBB))) break; + NextMBB = llvm::next(MachineFunction::iterator(BotMBB)); } } return BotMBB; diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp index 68a1c240c85..c62d17958d0 100644 --- a/lib/CodeGen/PHIElimination.cpp +++ b/lib/CodeGen/PHIElimination.cpp @@ -301,8 +301,8 @@ void llvm::PHIElimination::LowerAtomicPHINode( // Check that no other terminators use values. #ifndef NDEBUG - for (MachineBasicBlock::iterator TI = next(Term); TI != opBlock.end(); - ++TI) { + for (MachineBasicBlock::iterator TI = llvm::next(Term); + TI != opBlock.end(); ++TI) { assert(!TI->readsRegister(SrcReg) && "Terminator instructions cannot use virtual registers unless" "they are the first terminator in a block!"); @@ -377,7 +377,7 @@ MachineBasicBlock *PHIElimination::SplitCriticalEdge(MachineBasicBlock *A, ++NumSplits; MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock(); - MF->insert(next(MachineFunction::iterator(A)), NMBB); + MF->insert(llvm::next(MachineFunction::iterator(A)), NMBB); DEBUG(errs() << "PHIElimination splitting critical edge:" " BB#" << A->getNumber() << " -- BB#" << NMBB->getNumber() diff --git a/lib/CodeGen/PreAllocSplitting.cpp b/lib/CodeGen/PreAllocSplitting.cpp index 8f623452e27..afd7b882c70 100644 --- a/lib/CodeGen/PreAllocSplitting.cpp +++ b/lib/CodeGen/PreAllocSplitting.cpp @@ -876,7 +876,7 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo, if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB) KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB); else - KillPt = next(MachineBasicBlock::iterator(DefMI)); + KillPt = llvm::next(MachineBasicBlock::iterator(DefMI)); if (KillPt == DefMI->getParent()->end()) return false; @@ -1118,7 +1118,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) { return false; // No gap to insert spill. } } else { - SpillPt = next(MachineBasicBlock::iterator(DefMI)); + SpillPt = llvm::next(MachineBasicBlock::iterator(DefMI)); if (SpillPt == DefMBB->end()) { DEBUG(errs() << "FAILED (could not find a suitable spill point).\n"); return false; // No gap to insert spill. diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 8905f757a07..4d9f9f8ccd2 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -674,7 +674,7 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) { if (PrevI == BB->end()) I = BB->begin(); // The replaced instr was the first in the block. else - I = next(PrevI); + I = llvm::next(PrevI); continue; } diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index 94680ed2992..67bf209c732 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -125,7 +125,7 @@ void RegScavenger::forward() { Tracking = true; } else { assert(MBBI != MBB->end() && "Already at the end of the basic block!"); - MBBI = next(MBBI); + MBBI = llvm::next(MBBI); } MachineInstr *MI = MBBI; diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 5eb9ca1ebe0..c6c14016d28 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -541,7 +541,7 @@ FastISel::SelectInstruction(Instruction *I) { void FastISel::FastEmitBranch(MachineBasicBlock *MSucc) { MachineFunction::iterator NextMBB = - next(MachineFunction::iterator(MBB)); + llvm::next(MachineFunction::iterator(MBB)); if (MBB->isLayoutSuccessor(MSucc)) { // The unconditional fall-through case, which needs no instructions. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 273dbf0d561..25eb6378f15 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -232,7 +232,7 @@ void SelectionDAGLegalize::LegalizeDAG() { // node is only legalized after all of its operands are legalized. DAG.AssignTopologicalOrder(); for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), - E = prior(DAG.allnodes_end()); I != next(E); ++I) + E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) LegalizeOp(SDValue(I, 0)); // Finally, it's possible the root changed. Get the new root. diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 785c2adb394..2dc7a547ce3 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -79,7 +79,7 @@ bool VectorLegalizer::Run() { // node is only legalized after all of its operands are legalized. DAG.AssignTopologicalOrder(); for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), - E = prior(DAG.allnodes_end()); I != next(E); ++I) + E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) LegalizeOp(SDValue(I, 0)); // Finally, it's possible the root changed. Get the new root. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index c39437f9864..acbb5c9bd26 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -789,7 +789,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn, SDB->setCurDebugLoc(FastIS->getCurDebugLoc()); bool HadTailCall = false; - SelectBasicBlock(LLVMBB, BI, next(BI), HadTailCall); + SelectBasicBlock(LLVMBB, BI, llvm::next(BI), HadTailCall); // If the call was emitted as a tail call, we're done with the block. if (HadTailCall) { diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp index cb2d22b6c99..94f17cec9b4 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.cpp +++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp @@ -130,7 +130,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA, // See PR3149: // 172 %ECX = MOV32rr %reg1039 // 180 INLINEASM , 10, %EAX, 14, %ECX, 9, %EAX, + // sbbl $3,$0>, 10, %EAX, 14, %ECX, 9, + // %EAX, // 36, , 1, %reg0, 0, 9, %ECX, 36, , 1, %reg0, 0 // 188 %EAX = MOV32rr %EAX // 196 %ECX = MOV32rr %ECX @@ -281,12 +282,12 @@ TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) { } } -/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with IntA -/// being the source and IntB being the dest, thus this defines a value number -/// in IntB. If the source value number (in IntA) is defined by a commutable -/// instruction and its other operand is coalesced to the copy dest register, -/// see if we can transform the copy into a noop by commuting the definition. For -/// example, +/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with +/// IntA being the source and IntB being the dest, thus this defines a value +/// number in IntB. If the source value number (in IntA) is defined by a +/// commutable instruction and its other operand is coalesced to the copy dest +/// register, see if we can transform the copy into a noop by commuting the +/// definition. For example, /// /// A3 = op A2 B0 /// ... @@ -508,7 +509,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA, if (BHasSubRegs) { for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) { LiveInterval &SRLI = li_->getInterval(*SR); - SRLI.MergeInClobberRange(*li_, AI->start, End, li_->getVNInfoAllocator()); + SRLI.MergeInClobberRange(*li_, AI->start, End, + li_->getVNInfoAllocator()); } } } @@ -708,7 +710,8 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, checkForDeadDef = true; } - MachineBasicBlock::iterator MII = next(MachineBasicBlock::iterator(CopyMI)); + MachineBasicBlock::iterator MII = + llvm::next(MachineBasicBlock::iterator(CopyMI)); tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, tri_); MachineInstr *NewMI = prior(MII); @@ -1611,9 +1614,9 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { } } } else { - // If the virtual register live interval is long but it has low use desity, - // do not join them, instead mark the physical register as its allocation - // preference. + // If the virtual register live interval is long but it has low use + // density, do not join them, instead mark the physical register as its + // allocation preference. LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt; unsigned JoinVReg = SrcIsPhys ? DstReg : SrcReg; unsigned JoinPReg = SrcIsPhys ? SrcReg : DstReg; @@ -2739,7 +2742,8 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { joinIntervals(); DEBUG({ errs() << "********** INTERVALS POST JOINING **********\n"; - for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I){ + for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); + I != E; ++I){ I->second->print(errs(), tri_); errs() << "\n"; } @@ -2780,7 +2784,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { DoDelete = true; } if (!DoDelete) - mii = next(mii); + mii = llvm::next(mii); else { li_->RemoveMachineInstrFromMaps(MI); mii = mbbi->erase(mii); diff --git a/lib/CodeGen/Spiller.cpp b/lib/CodeGen/Spiller.cpp index 237d0b5f465..74662155654 100644 --- a/lib/CodeGen/Spiller.cpp +++ b/lib/CodeGen/Spiller.cpp @@ -140,9 +140,9 @@ protected: // Insert store if necessary. if (hasDef) { - tii->storeRegToStackSlot(*mi->getParent(), next(miItr), newVReg, true, + tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, true, ss, trc); - MachineInstr *storeInstr(next(miItr)); + MachineInstr *storeInstr(llvm::next(miItr)); SlotIndex storeIndex = lis->InsertMachineInstrInMaps(storeInstr).getDefIndex(); SlotIndex beginIndex = storeIndex.getPrevIndex(); diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp index c299192b222..fd25a37c6c6 100644 --- a/lib/CodeGen/StackSlotColoring.cpp +++ b/lib/CodeGen/StackSlotColoring.cpp @@ -668,7 +668,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { if (DCELimit != -1 && (int)NumDead >= DCELimit) break; - MachineBasicBlock::iterator NextMI = next(I); + MachineBasicBlock::iterator NextMI = llvm::next(I); if (NextMI == MBB->end()) continue; int FirstSS, SecondSS; diff --git a/lib/CodeGen/TargetInstrInfoImpl.cpp b/lib/CodeGen/TargetInstrInfoImpl.cpp index 102e2a34a97..393e315a321 100644 --- a/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -329,7 +329,7 @@ TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr * return false; // For the def, it should be the only def of that register. - if (MO.isDef() && (next(MRI.def_begin(Reg)) != MRI.def_end() || + if (MO.isDef() && (llvm::next(MRI.def_begin(Reg)) != MRI.def_end() || MRI.isLiveIn(Reg))) return false; diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index 5fa690bc428..98b95acdbc8 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -211,7 +211,7 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB, ++KillPos; unsigned NumVisited = 0; - for (MachineBasicBlock::iterator I = next(OldPos); I != KillPos; ++I) { + for (MachineBasicBlock::iterator I = llvm::next(OldPos); I != KillPos; ++I) { MachineInstr *OtherMI = I; if (NumVisited > 30) // FIXME: Arbitrary limit to reduce compile time cost. return false; @@ -412,7 +412,7 @@ static bool isKilled(MachineInstr &MI, unsigned Reg, MachineRegisterInfo::def_iterator Begin = MRI->def_begin(Reg); // If there are multiple defs, we can't do a simple analysis, so just // go with what the kill flag says. - if (next(Begin) != MRI->def_end()) + if (llvm::next(Begin) != MRI->def_end()) return true; DefMI = &*Begin; bool IsSrcPhys, IsDstPhys; @@ -643,7 +643,7 @@ TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi, if (!Sunk) { DistanceMap.insert(std::make_pair(NewMI, Dist)); mi = NewMI; - nmi = next(mi); + nmi = llvm::next(mi); } return true; } @@ -923,7 +923,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) { Processed.clear(); for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end(); mi != me; ) { - MachineBasicBlock::iterator nmi = next(mi); + MachineBasicBlock::iterator nmi = llvm::next(mi); const TargetInstrDesc &TID = mi->getDesc(); bool FirstTied = true; diff --git a/lib/CodeGen/VirtRegRewriter.cpp b/lib/CodeGen/VirtRegRewriter.cpp index 10c806677c9..054c3b631b9 100644 --- a/lib/CodeGen/VirtRegRewriter.cpp +++ b/lib/CodeGen/VirtRegRewriter.cpp @@ -754,7 +754,7 @@ void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, } // Skip over the same register. - std::multimap::iterator NI = next(I); + std::multimap::iterator NI = llvm::next(I); while (NI != E && NI->first == Reg) { ++I; ++NI; @@ -1133,7 +1133,7 @@ private: std::vector &KillOps, VirtRegMap &VRM) { - MachineBasicBlock::iterator NextMII = next(MII); + MachineBasicBlock::iterator NextMII = llvm::next(MII); if (NextMII == MBB.end()) return false; @@ -1186,7 +1186,7 @@ private: // Unfold next instructions that fold the same SS. do { MachineInstr &NextMI = *NextMII; - NextMII = next(NextMII); + NextMII = llvm::next(NextMII); NewMIs.clear(); if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs)) llvm_unreachable("Unable unfold the load / store folding instruction!"); @@ -1463,8 +1463,8 @@ private: std::vector &KillOps, VirtRegMap &VRM) { - MachineBasicBlock::iterator oldNextMII = next(MII); - TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); + MachineBasicBlock::iterator oldNextMII = llvm::next(MII); + TII->storeRegToStackSlot(MBB, llvm::next(MII), PhysReg, true, StackSlot, RC); MachineInstr *StoreMI = prior(oldNextMII); VRM.addSpillSlotUse(StackSlot, StoreMI); DEBUG(errs() << "Store:\t" << *StoreMI); @@ -1626,14 +1626,14 @@ private: DistanceMap.clear(); for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MII != E; ) { - MachineBasicBlock::iterator NextMII = next(MII); + MachineBasicBlock::iterator NextMII = llvm::next(MII); VirtRegMap::MI2VirtMapTy::const_iterator I, End; bool Erased = false; bool BackTracked = false; if (OptimizeByUnfold(MBB, MII, MaybeDeadStores, Spills, RegKills, KillOps, VRM)) - NextMII = next(MII); + NextMII = llvm::next(MII); MachineInstr &MI = *MII; @@ -1657,7 +1657,7 @@ private: // Back-schedule reloads and remats. MachineBasicBlock::iterator InsertLoc = - ComputeReloadLoc(next(MII), MBB.begin(), PhysReg, TRI, false, + ComputeReloadLoc(llvm::next(MII), MBB.begin(), PhysReg, TRI, false, SS, TII, MF); TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC); @@ -1667,7 +1667,7 @@ private: ++NumPSpills; DistanceMap.insert(std::make_pair(LoadMI, Dist++)); } - NextMII = next(MII); + NextMII = llvm::next(MII); } // Insert restores here if asked to. @@ -1785,14 +1785,14 @@ private: const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); unsigned Phys = VRM.getPhys(VirtReg); int StackSlot = VRM.getStackSlot(VirtReg); - MachineBasicBlock::iterator oldNextMII = next(MII); - TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); + MachineBasicBlock::iterator oldNextMII = llvm::next(MII); + TII->storeRegToStackSlot(MBB, llvm::next(MII), Phys, isKill, StackSlot, RC); MachineInstr *StoreMI = prior(oldNextMII); VRM.addSpillSlotUse(StackSlot, StoreMI); DEBUG(errs() << "Store:\t" << *StoreMI); VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); } - NextMII = next(MII); + NextMII = llvm::next(MII); } /// ReusedOperands - Keep track of operand reuse in case we need to undo @@ -2265,7 +2265,7 @@ private: if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot, Spills, RegKills, KillOps, TRI, VRM)) { - NextMII = next(MII); + NextMII = llvm::next(MII); BackTracked = true; goto ProcessNextInst; } @@ -2381,7 +2381,7 @@ private: MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); - NextMII = next(MII); + NextMII = llvm::next(MII); // Check to see if this is a noop copy. If so, eliminate the // instruction before considering the dest reg to be changed. diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index e59a315a483..acd30d2897b 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -418,10 +418,10 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF, static bool BBHasFallthrough(MachineBasicBlock *MBB) { // Get the next machine basic block in the function. MachineFunction::iterator MBBI = MBB; - if (next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function. + if (llvm::next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function. return false; - MachineBasicBlock *NextBB = next(MBBI); + MachineBasicBlock *NextBB = llvm::next(MBBI); for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) if (*I == NextBB) @@ -760,7 +760,7 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) { CompareMBBNumbers); MachineBasicBlock* WaterBB = *IP; if (WaterBB == OrigBB) - WaterList.insert(next(IP), NewBB); + WaterList.insert(llvm::next(IP), NewBB); else WaterList.insert(IP, OrigBB); NewWaterList.insert(OrigBB); @@ -887,7 +887,7 @@ static bool BBIsJumpedOver(MachineBasicBlock *MBB) { void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB, int delta) { - MachineFunction::iterator MBBI = BB; MBBI = next(MBBI); + MachineFunction::iterator MBBI = BB; MBBI = llvm::next(MBBI); for(unsigned i = BB->getNumber()+1, e = BB->getParent()->getNumBlockIDs(); i < e; ++i) { BBOffsets[i] += delta; @@ -929,7 +929,7 @@ void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB, if (delta==0) return; } - MBBI = next(MBBI); + MBBI = llvm::next(MBBI); } } @@ -1096,7 +1096,7 @@ void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex, DEBUG(errs() << "Split at end of block\n"); if (&UserMBB->back() == UserMI) assert(BBHasFallthrough(UserMBB) && "Expected a fallthrough BB!"); - NewMBB = next(MachineFunction::iterator(UserMBB)); + NewMBB = llvm::next(MachineFunction::iterator(UserMBB)); // Add an unconditional branch from UserMBB to fallthrough block. // Record it for branch lengthening; this new branch will not get out of // range, but if the preceding conditional branch is out of range, the @@ -1144,7 +1144,7 @@ void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex, for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI); Offset < BaseInsertOffset; Offset += TII->GetInstSizeInBytes(MI), - MI = next(MI)) { + MI = llvm::next(MI)) { if (CPUIndex < CPUsers.size() && CPUsers[CPUIndex].MI == MI) { CPUser &U = CPUsers[CPUIndex]; if (!OffsetIsInRange(Offset, EndInsertOffset, @@ -1204,7 +1204,7 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF, NewWaterList.insert(NewIsland); } // The new CPE goes before the following block (NewMBB). - NewMBB = next(MachineFunction::iterator(WaterBB)); + NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); } else { // No water found. @@ -1406,7 +1406,7 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) { NumCBrFixed++; if (BMI != MI) { - if (next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && + if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) && BMI->getOpcode() == Br.UncondBr) { // Last MI in the BB is an unconditional branch. Can we simply invert the // condition and swap destinations: @@ -1433,12 +1433,12 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) { // branch to the destination. int delta = TII->GetInstSizeInBytes(&MBB->back()); BBSizes[MBB->getNumber()] -= delta; - MachineBasicBlock* SplitBB = next(MachineFunction::iterator(MBB)); + MachineBasicBlock* SplitBB = llvm::next(MachineFunction::iterator(MBB)); AdjustBBOffsetsAfter(SplitBB, -delta); MBB->back().eraseFromParent(); // BBOffsets[SplitBB] is wrong temporarily, fixed below } - MachineBasicBlock *NextBB = next(MachineFunction::iterator(MBB)); + MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB)); DEBUG(errs() << " Insert B to BB#" << DestBB->getNumber() << " also invert condition and change dest. to BB#" diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index c929c54d489..1b8727d9848 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -48,7 +48,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) { MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); while (MBBI != E) { MachineInstr &MI = *MBBI; - MachineBasicBlock::iterator NMBBI = next(MBBI); + MachineBasicBlock::iterator NMBBI = llvm::next(MBBI); unsigned Opcode = MI.getOpcode(); switch (Opcode) { diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 304d0ef6624..22bd80e594e 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -449,7 +449,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB, } if (MBBI != MBB.end()) { - MachineBasicBlock::iterator NextMBBI = next(MBBI); + MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI); if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) && isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) { MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true)); @@ -494,7 +494,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB, } if (MBBI != MBB.end()) { - MachineBasicBlock::iterator NextMBBI = next(MBBI); + MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI); if (Mode == ARM_AM::ia && isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) { MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset)); @@ -604,7 +604,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB, } if (!DoMerge && MBBI != MBB.end()) { - MachineBasicBlock::iterator NextMBBI = next(MBBI); + MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI); if (!isAM5 && isMatchingDecrement(NextMBBI, Base, Bytes, Limit, Pred, PredReg)) { DoMerge = true; diff --git a/lib/Target/ARM/NEONMoveFix.cpp b/lib/Target/ARM/NEONMoveFix.cpp index 50abcf464e0..3c0414d2b64 100644 --- a/lib/Target/ARM/NEONMoveFix.cpp +++ b/lib/Target/ARM/NEONMoveFix.cpp @@ -51,7 +51,7 @@ bool NEONMoveFixPass::InsertMoves(MachineBasicBlock &MBB) { MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MachineBasicBlock::iterator NextMII; for (; MII != E; MII = NextMII) { - NextMII = next(MII); + NextMII = llvm::next(MII); MachineInstr *MI = &*MII; if (MI->getOpcode() == ARM::VMOVD && diff --git a/lib/Target/ARM/NEONPreAllocPass.cpp b/lib/Target/ARM/NEONPreAllocPass.cpp index 206677b0a85..d9942c8c840 100644 --- a/lib/Target/ARM/NEONPreAllocPass.cpp +++ b/lib/Target/ARM/NEONPreAllocPass.cpp @@ -338,7 +338,7 @@ bool NEONPreAllocPass::PreAllocNEONRegisters(MachineBasicBlock &MBB) { if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride)) continue; - MachineBasicBlock::iterator NextI = next(MBBI); + MachineBasicBlock::iterator NextI = llvm::next(MBBI); for (unsigned R = 0; R < NumRegs; ++R) { MachineOperand &MO = MI->getOperand(FirstOpnd + R); assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand"); diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp index 37adf37af4c..9f3816a862c 100644 --- a/lib/Target/ARM/Thumb1RegisterInfo.cpp +++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp @@ -528,7 +528,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MI.getOperand(i+1).ChangeToImmediate(Mask); } Offset = (Offset - Mask * Scale); - MachineBasicBlock::iterator NII = next(II); + MachineBasicBlock::iterator NII = llvm::next(II); emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII, *this, dl); } else { diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index b2fd7b334d8..35359aa549a 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -649,7 +649,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MachineBasicBlock::iterator NextMII; for (; MII != E; MII = NextMII) { - NextMII = next(MII); + NextMII = llvm::next(MII); MachineInstr *MI = &*MII; LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); diff --git a/lib/Target/CBackend/CBackend.cpp b/lib/Target/CBackend/CBackend.cpp index 9e4fe278c30..a52ca79fdb6 100644 --- a/lib/Target/CBackend/CBackend.cpp +++ b/lib/Target/CBackend/CBackend.cpp @@ -2573,7 +2573,7 @@ void CWriter::visitSwitchInst(SwitchInst &SI) { BasicBlock *Succ = cast(SI.getOperand(i+1)); printPHICopiesForSuccessor (SI.getParent(), Succ, 2); printBranchToBlock(SI.getParent(), Succ, 2); - if (Function::iterator(Succ) == next(Function::iterator(SI.getParent()))) + if (Function::iterator(Succ) == llvm::next(Function::iterator(SI.getParent()))) Out << " break;\n"; } Out << " }\n"; @@ -2593,7 +2593,7 @@ bool CWriter::isGotoCodeNecessary(BasicBlock *From, BasicBlock *To) { /// FIXME: This should be reenabled, but loop reordering safe!! return true; - if (next(Function::iterator(From)) != Function::iterator(To)) + if (llvm::next(Function::iterator(From)) != Function::iterator(To)) return true; // Not the direct successor, we need a goto. //isa(From->getTerminator()) diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp index b2f09c78831..ee7001f9705 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -270,8 +270,8 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (next(I) != MBB.end()) - next(I)->eraseFromParent(); + while (llvm::next(I) != MBB.end()) + llvm::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index 92baad9ac10..717844070fd 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -193,10 +193,10 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // We need to materialize the offset via add instruction. unsigned DstReg = MI.getOperand(0).getReg(); if (Offset < 0) - BuildMI(MBB, next(II), dl, TII.get(MSP430::SUB16ri), DstReg) + BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::SUB16ri), DstReg) .addReg(DstReg).addImm(-Offset); else - BuildMI(MBB, next(II), dl, TII.get(MSP430::ADD16ri), DstReg) + BuildMI(MBB, llvm::next(II), dl, TII.get(MSP430::ADD16ri), DstReg) .addReg(DstReg).addImm(Offset); return 0; @@ -251,7 +251,7 @@ void MSP430RegisterInfo::emitPrologue(MachineFunction &MF) const { .addReg(MSP430::SPW); // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(MSP430::FPW); diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index d82d928d77e..d711d815eb9 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -454,8 +454,8 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (next(I) != MBB.end()) - next(I)->eraseFromParent(); + while (llvm::next(I) != MBB.end()) + llvm::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index 4d1c01f8b98..1318195ad00 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -247,7 +247,7 @@ void SystemZRegisterInfo::emitPrologue(MachineFunction &MF) const { .addReg(SystemZ::R15D); // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(SystemZ::R11D); diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp index ec00f9e2e9a..a4b58a070cf 100644 --- a/lib/Target/TargetData.cpp +++ b/lib/Target/TargetData.cpp @@ -29,6 +29,7 @@ #include "llvm/ADT/DenseMap.h" #include #include +#include using namespace llvm; // Handle the Pass registration stuff necessary to use TargetData's. diff --git a/lib/Target/X86/X86COFFMachineModuleInfo.h b/lib/Target/X86/X86COFFMachineModuleInfo.h index afd552563d9..1bd31b85203 100644 --- a/lib/Target/X86/X86COFFMachineModuleInfo.h +++ b/lib/Target/X86/X86COFFMachineModuleInfo.h @@ -16,6 +16,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/ADT/StringSet.h" +#include "X86MachinefunctionInfo.h" namespace llvm { class X86MachineFunctionInfo; diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index a2fe9b095de..044bd4be322 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -289,7 +289,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { while (Start != BB.begin() && prior(Start) != PrevI) --Start; errs() << "Inserted instructions:\n\t"; Start->print(errs(), &MF.getTarget()); - while (++Start != next(I)) {} + while (++Start != llvm::next(I)) {} } dumpStack(); ); diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index a37013d25bd..d503da3d565 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1587,8 +1587,8 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, } // If the block has any instructions after a JMP, delete them. - while (next(I) != MBB.end()) - next(I)->eraseFromParent(); + while (llvm::next(I) != MBB.end()) + llvm::next(I)->eraseFromParent(); Cond.clear(); FBB = 0; // Delete the JMP if it's equivalent to a fall-through. diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 9ef50c94e70..d96aafda603 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -723,7 +723,7 @@ void mergeSPUpdatesDown(MachineBasicBlock &MBB, if (MBBI == MBB.end()) return; - MachineBasicBlock::iterator NI = next(MBBI); + MachineBasicBlock::iterator NI = llvm::next(MBBI); if (NI == MBB.end()) return; unsigned Opc = NI->getOpcode(); @@ -757,7 +757,7 @@ static int mergeSPUpdates(MachineBasicBlock &MBB, return 0; MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI; - MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI); + MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI); unsigned Opc = PI->getOpcode(); int Offset = 0; @@ -983,7 +983,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { } // Mark the FramePtr as live-in in every block except the entry. - for (MachineFunction::iterator I = next(MF.begin()), E = MF.end(); + for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(FramePtr); diff --git a/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp b/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp index 13077fe642a..5acd6aa5e12 100644 --- a/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp @@ -88,7 +88,7 @@ InlineHalfPowrs(const std::vector &HalfPowrs, if (!isa(Body->getTerminator())) break; - Instruction *NextInst = next(BasicBlock::iterator(Call)); + Instruction *NextInst = llvm::next(BasicBlock::iterator(Call)); // Inline the call, taking care of what code ends up where. NewBlock = SplitBlock(NextInst->getParent(), NextInst, this); diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp index 8c18b591a49..743bb6e99bc 100644 --- a/lib/Transforms/Utils/LowerSwitch.cpp +++ b/lib/Transforms/Utils/LowerSwitch.cpp @@ -244,7 +244,7 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) { // Merge case into clusters if (Cases.size()>=2) - for (CaseItr I=Cases.begin(), J=next(Cases.begin()); J!=Cases.end(); ) { + for (CaseItr I=Cases.begin(), J=llvm::next(Cases.begin()); J!=Cases.end(); ) { int64_t nextValue = cast(J->Low)->getSExtValue(); int64_t currentValue = cast(I->High)->getSExtValue(); BasicBlock* nextBB = J->BB; diff --git a/lib/VMCore/BasicBlock.cpp b/lib/VMCore/BasicBlock.cpp index 23d0557dc74..c7f7f53e230 100644 --- a/lib/VMCore/BasicBlock.cpp +++ b/lib/VMCore/BasicBlock.cpp @@ -262,7 +262,7 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) { assert(I != InstList.end() && "Trying to get me to create degenerate basic block!"); - BasicBlock *InsertBefore = next(Function::iterator(this)) + BasicBlock *InsertBefore = llvm::next(Function::iterator(this)) .getNodePtrUnchecked(); BasicBlock *New = BasicBlock::Create(getContext(), BBName, getParent(), InsertBefore);