mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-07 01:38:26 +00:00
Use pre-increment instead of post-increment when the result is not used.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@106542 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
30f30e4386
commit
fe60104ac9
@ -456,7 +456,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
// Okay, we have a cache entry. If we know it is not dirty, just return it
|
||||
// with no computation.
|
||||
if (!CacheP.second) {
|
||||
NumCacheNonLocal++;
|
||||
++NumCacheNonLocal;
|
||||
return Cache;
|
||||
}
|
||||
|
||||
@ -478,7 +478,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
|
||||
for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
|
||||
DirtyBlocks.push_back(*PI);
|
||||
NumUncacheNonLocal++;
|
||||
++NumUncacheNonLocal;
|
||||
}
|
||||
|
||||
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
|
||||
|
@ -303,8 +303,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
|
||||
RetVal = IfConvertSimple(BBI, Kind);
|
||||
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
|
||||
if (RetVal) {
|
||||
if (isFalse) NumSimpleFalse++;
|
||||
else NumSimple++;
|
||||
if (isFalse) ++NumSimpleFalse;
|
||||
else ++NumSimple;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -330,11 +330,11 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
|
||||
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
|
||||
if (RetVal) {
|
||||
if (isFalse) {
|
||||
if (isRev) NumTriangleFRev++;
|
||||
else NumTriangleFalse++;
|
||||
if (isRev) ++NumTriangleFRev;
|
||||
else ++NumTriangleFalse;
|
||||
} else {
|
||||
if (isRev) NumTriangleRev++;
|
||||
else NumTriangle++;
|
||||
if (isRev) ++NumTriangleRev;
|
||||
else ++NumTriangle;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -346,7 +346,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
|
||||
<< BBI.FalseBB->getNumber() << ") ");
|
||||
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2);
|
||||
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
|
||||
if (RetVal) NumDiamonds++;
|
||||
if (RetVal) ++NumDiamonds;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1317,7 +1317,7 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
|
||||
BBI.IsAnalyzed = false;
|
||||
BBI.NonPredSize = 0;
|
||||
|
||||
NumIfConvBBs++;
|
||||
++NumIfConvBBs;
|
||||
}
|
||||
|
||||
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
|
||||
@ -1373,7 +1373,7 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
|
||||
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
|
||||
ToBBI.IsAnalyzed = false;
|
||||
|
||||
NumDupBBs++;
|
||||
++NumDupBBs;
|
||||
}
|
||||
|
||||
/// MergeBlocks - Move all instructions from FromBB to the end of ToBB.
|
||||
|
@ -836,7 +836,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
if (IntervalSSMap.count(CurrLI->reg))
|
||||
IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
|
||||
|
||||
NumRenumbers++;
|
||||
++NumRenumbers;
|
||||
}
|
||||
|
||||
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
|
||||
@ -1192,7 +1192,7 @@ unsigned PreAllocSplitting::getNumberOfNonSpills(
|
||||
int StoreFrameIndex;
|
||||
unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
|
||||
if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
|
||||
NonSpills++;
|
||||
++NonSpills;
|
||||
|
||||
int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
|
||||
if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
|
||||
@ -1255,7 +1255,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
|
||||
(*LI)->removeValNo(CurrVN);
|
||||
DefMI->eraseFromParent();
|
||||
VNUseCount.erase(CurrVN);
|
||||
NumDeadSpills++;
|
||||
++NumDeadSpills;
|
||||
changed = true;
|
||||
continue;
|
||||
}
|
||||
@ -1328,7 +1328,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
|
||||
if (VI->second.erase(use))
|
||||
VI->second.insert(NewMI);
|
||||
|
||||
NumDeadSpills++;
|
||||
++NumDeadSpills;
|
||||
changed = true;
|
||||
continue;
|
||||
}
|
||||
@ -1350,7 +1350,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
|
||||
LIs->RemoveMachineInstrFromMaps(DefMI);
|
||||
(*LI)->removeValNo(CurrVN);
|
||||
DefMI->eraseFromParent();
|
||||
NumDeadSpills++;
|
||||
++NumDeadSpills;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
|
||||
|
||||
MCE.processDebugLoc(MI.getDebugLoc(), true);
|
||||
|
||||
NumEmitted++; // Keep track of the # of mi's emitted
|
||||
++NumEmitted; // Keep track of the # of mi's emitted
|
||||
switch (MI.getDesc().TSFlags & ARMII::FormMask) {
|
||||
default: {
|
||||
llvm_unreachable("Unhandled instruction encoding format!");
|
||||
|
@ -407,7 +407,7 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
|
||||
std::vector<CPEntry> CPEs;
|
||||
CPEs.push_back(CPEntry(CPEMI, i));
|
||||
CPEntries.push_back(CPEs);
|
||||
NumCPEs++;
|
||||
++NumCPEs;
|
||||
DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i
|
||||
<< "\n");
|
||||
}
|
||||
@ -725,7 +725,7 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
|
||||
// correspond to anything in the source.
|
||||
unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
|
||||
BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
|
||||
NumSplit++;
|
||||
++NumSplit;
|
||||
|
||||
// Update the CFG. All succs of OrigBB are now succs of NewBB.
|
||||
while (!OrigBB->succ_empty()) {
|
||||
@ -948,7 +948,7 @@ bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) {
|
||||
if (--CPE->RefCount == 0) {
|
||||
RemoveDeadCPEMI(CPEMI);
|
||||
CPE->CPEMI = NULL;
|
||||
NumCPEs--;
|
||||
--NumCPEs;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -1249,7 +1249,7 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
|
||||
U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
|
||||
.addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
|
||||
CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
|
||||
NumCPEs++;
|
||||
++NumCPEs;
|
||||
|
||||
BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
|
||||
// Compensate for .align 2 in thumb mode.
|
||||
@ -1372,7 +1372,7 @@ ARMConstantIslands::FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br) {
|
||||
BBSizes[MBB->getNumber()] += 2;
|
||||
AdjustBBOffsetsAfter(MBB, 2);
|
||||
HasFarJump = true;
|
||||
NumUBrFixed++;
|
||||
++NumUBrFixed;
|
||||
|
||||
DEBUG(errs() << " Changed B to long jump " << *MI);
|
||||
|
||||
@ -1405,7 +1405,7 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
|
||||
MachineInstr *BMI = &MBB->back();
|
||||
bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
|
||||
|
||||
NumCBrFixed++;
|
||||
++NumCBrFixed;
|
||||
if (BMI != MI) {
|
||||
if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
|
||||
BMI->getOpcode() == Br.UncondBr) {
|
||||
|
@ -131,30 +131,30 @@ namespace {
|
||||
static int getLoadStoreMultipleOpcode(int Opcode) {
|
||||
switch (Opcode) {
|
||||
case ARM::LDR:
|
||||
NumLDMGened++;
|
||||
++NumLDMGened;
|
||||
return ARM::LDM;
|
||||
case ARM::STR:
|
||||
NumSTMGened++;
|
||||
++NumSTMGened;
|
||||
return ARM::STM;
|
||||
case ARM::t2LDRi8:
|
||||
case ARM::t2LDRi12:
|
||||
NumLDMGened++;
|
||||
++NumLDMGened;
|
||||
return ARM::t2LDM;
|
||||
case ARM::t2STRi8:
|
||||
case ARM::t2STRi12:
|
||||
NumSTMGened++;
|
||||
++NumSTMGened;
|
||||
return ARM::t2STM;
|
||||
case ARM::VLDRS:
|
||||
NumVLDMGened++;
|
||||
++NumVLDMGened;
|
||||
return ARM::VLDMS;
|
||||
case ARM::VSTRS:
|
||||
NumVSTMGened++;
|
||||
++NumVSTMGened;
|
||||
return ARM::VSTMS;
|
||||
case ARM::VLDRD:
|
||||
NumVLDMGened++;
|
||||
++NumVLDMGened;
|
||||
return ARM::VLDMD;
|
||||
case ARM::VSTRD:
|
||||
NumVSTMGened++;
|
||||
++NumVSTMGened;
|
||||
return ARM::VSTMD;
|
||||
default: llvm_unreachable("Unhandled opcode!");
|
||||
}
|
||||
@ -319,7 +319,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
|
||||
|
||||
// Try to do the merge.
|
||||
MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
|
||||
Loc++;
|
||||
++Loc;
|
||||
if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
|
||||
Pred, PredReg, Scratch, dl, Regs))
|
||||
return;
|
||||
@ -1082,7 +1082,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||
CurrPred = Pred;
|
||||
CurrPredReg = PredReg;
|
||||
MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill, Position, MBBI));
|
||||
NumMemOps++;
|
||||
++NumMemOps;
|
||||
Advance = true;
|
||||
} else {
|
||||
if (Clobber) {
|
||||
@ -1096,7 +1096,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||
if (Offset > MemOps.back().Offset) {
|
||||
MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill,
|
||||
Position, MBBI));
|
||||
NumMemOps++;
|
||||
++NumMemOps;
|
||||
Advance = true;
|
||||
} else {
|
||||
for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
|
||||
@ -1104,7 +1104,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
||||
if (Offset < I->Offset) {
|
||||
MemOps.insert(I, MemOpQueueEntry(Offset, Reg, isKill,
|
||||
Position, MBBI));
|
||||
NumMemOps++;
|
||||
++NumMemOps;
|
||||
Advance = true;
|
||||
break;
|
||||
} else if (Offset == I->Offset) {
|
||||
|
@ -138,7 +138,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
|
||||
// MOVPC32r is basically a call plus a pop instruction.
|
||||
if (Desc.getOpcode() == X86::MOVPC32r)
|
||||
emitInstruction(*I, &II->get(X86::POP32r));
|
||||
NumEmitted++; // Keep track of the # of mi's emitted
|
||||
++NumEmitted; // Keep track of the # of mi's emitted
|
||||
}
|
||||
}
|
||||
} while (MCE.finishFunction(MF));
|
||||
|
@ -133,7 +133,7 @@ namespace {
|
||||
|
||||
// Emit an fxch to update the runtime processors version of the state.
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
|
||||
NumFXCH++;
|
||||
++NumFXCH;
|
||||
}
|
||||
|
||||
void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
|
||||
@ -1021,7 +1021,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
|
||||
// StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
|
||||
if (StackTop == 1) {
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
|
||||
NumFXCH++;
|
||||
++NumFXCH;
|
||||
StackTop = 0;
|
||||
break;
|
||||
}
|
||||
@ -1058,7 +1058,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
|
||||
// StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
|
||||
if (StackTop == 1) {
|
||||
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
|
||||
NumFXCH++;
|
||||
++NumFXCH;
|
||||
StackTop = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ namespace {
|
||||
Hello() : FunctionPass(&ID) {}
|
||||
|
||||
virtual bool runOnFunction(Function &F) {
|
||||
HelloCounter++;
|
||||
++HelloCounter;
|
||||
errs() << "Hello: ";
|
||||
errs().write_escaped(F.getName()) << '\n';
|
||||
return false;
|
||||
@ -46,7 +46,7 @@ namespace {
|
||||
Hello2() : FunctionPass(&ID) {}
|
||||
|
||||
virtual bool runOnFunction(Function &F) {
|
||||
HelloCounter++;
|
||||
++HelloCounter;
|
||||
errs() << "Hello: ";
|
||||
errs().write_escaped(F.getName()) << '\n';
|
||||
return false;
|
||||
|
@ -107,12 +107,12 @@ CallGraphNode *SRETPromotion::PromoteReturn(CallGraphNode *CGN) {
|
||||
// Check if it is ok to perform this promotion.
|
||||
if (isSafeToUpdateAllCallers(F) == false) {
|
||||
DEBUG(dbgs() << "SretPromotion: Not all callers can be updated\n");
|
||||
NumRejectedSRETUses++;
|
||||
++NumRejectedSRETUses;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "SretPromotion: sret argument will be promoted\n");
|
||||
NumSRET++;
|
||||
++NumSRET;
|
||||
// [1] Replace use of sret parameter
|
||||
AllocaInst *TheAlloca = new AllocaInst(STy, NULL, "mrv",
|
||||
F->getEntryBlock().begin());
|
||||
|
@ -143,7 +143,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
|
||||
ProfileInfo::Edge edge = ProfileInfo::getEdge(0,entry);
|
||||
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
|
||||
printEdgeCounter(edge,entry,i);
|
||||
IncrementCounterInBlock(entry, i, Counters); NumEdgesInserted++;
|
||||
IncrementCounterInBlock(entry, i, Counters); ++NumEdgesInserted;
|
||||
Initializer[i++] = (Zero);
|
||||
} else{
|
||||
Initializer[i++] = (Uncounted);
|
||||
@ -166,7 +166,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
|
||||
ProfileInfo::Edge edge = ProfileInfo::getEdge(BB,0);
|
||||
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
|
||||
printEdgeCounter(edge,BB,i);
|
||||
IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
|
||||
IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
|
||||
Initializer[i++] = (Zero);
|
||||
} else{
|
||||
Initializer[i++] = (Uncounted);
|
||||
@ -189,11 +189,11 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
|
||||
if (TI->getNumSuccessors() == 1) {
|
||||
// Insert counter at the start of the block
|
||||
printEdgeCounter(edge,BB,i);
|
||||
IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
|
||||
IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
|
||||
} else {
|
||||
// Insert counter at the start of the block
|
||||
printEdgeCounter(edge,Succ,i);
|
||||
IncrementCounterInBlock(Succ, i, Counters); NumEdgesInserted++;
|
||||
IncrementCounterInBlock(Succ, i, Counters); ++NumEdgesInserted;
|
||||
}
|
||||
Initializer[i++] = (Zero);
|
||||
} else {
|
||||
|
@ -83,7 +83,7 @@ bool ADCE::runOnFunction(Function& F) {
|
||||
|
||||
for (SmallVector<Instruction*, 1024>::iterator I = worklist.begin(),
|
||||
E = worklist.end(); I != E; ++I) {
|
||||
NumRemoved++;
|
||||
++NumRemoved;
|
||||
(*I)->eraseFromParent();
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
|
||||
isElidable(DepStore)) {
|
||||
// Delete the store and now-dead instructions that feed it.
|
||||
DeleteDeadInstruction(DepStore);
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
MadeChange = true;
|
||||
|
||||
// DeleteDeadInstruction can delete the current instruction in loop
|
||||
@ -249,7 +249,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
|
||||
BBI = BB.begin();
|
||||
else if (BBI != BB.begin()) // Revisit this instruction if possible.
|
||||
--BBI;
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
MadeChange = true;
|
||||
continue;
|
||||
}
|
||||
@ -270,7 +270,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
|
||||
BBI = BB.begin();
|
||||
else if (BBI != BB.begin()) // Revisit this instruction if possible.
|
||||
--BBI;
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
MadeChange = true;
|
||||
continue;
|
||||
}
|
||||
@ -303,7 +303,7 @@ bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
|
||||
|
||||
// DCE instructions only used to calculate that store
|
||||
DeleteDeadInstruction(Dependency);
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -349,9 +349,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
if (deadPointers.count(pointerOperand)) {
|
||||
// DCE instructions only used to calculate that store.
|
||||
Instruction *Dead = BBI;
|
||||
BBI++;
|
||||
++BBI;
|
||||
DeleteDeadInstruction(Dead, &deadPointers);
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
MadeChange = true;
|
||||
continue;
|
||||
}
|
||||
@ -371,9 +371,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
// However, if this load is unused and not volatile, we can go ahead and
|
||||
// remove it, and not have to worry about it making our pointer undead!
|
||||
if (L->use_empty() && !L->isVolatile()) {
|
||||
BBI++;
|
||||
++BBI;
|
||||
DeleteDeadInstruction(L, &deadPointers);
|
||||
NumFastOther++;
|
||||
++NumFastOther;
|
||||
MadeChange = true;
|
||||
continue;
|
||||
}
|
||||
@ -391,9 +391,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
|
||||
// Dead alloca's can be DCE'd when we reach them
|
||||
if (A->use_empty()) {
|
||||
BBI++;
|
||||
++BBI;
|
||||
DeleteDeadInstruction(A, &deadPointers);
|
||||
NumFastOther++;
|
||||
++NumFastOther;
|
||||
MadeChange = true;
|
||||
}
|
||||
|
||||
@ -426,9 +426,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
getPointerSize(*I));
|
||||
|
||||
if (A == AliasAnalysis::ModRef)
|
||||
modRef++;
|
||||
++modRef;
|
||||
else
|
||||
other++;
|
||||
++other;
|
||||
|
||||
if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
|
||||
dead.push_back(*I);
|
||||
@ -442,9 +442,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
} else if (isInstructionTriviallyDead(BBI)) {
|
||||
// For any non-memory-affecting non-terminators, DCE them as we reach them
|
||||
Instruction *Inst = BBI;
|
||||
BBI++;
|
||||
++BBI;
|
||||
DeleteDeadInstruction(Inst, &deadPointers);
|
||||
NumFastOther++;
|
||||
++NumFastOther;
|
||||
MadeChange = true;
|
||||
continue;
|
||||
}
|
||||
@ -497,7 +497,7 @@ bool DSE::RemoveUndeadPointers(Value *killPointer, uint64_t killPointerSize,
|
||||
// Remove it!
|
||||
++BBI;
|
||||
DeleteDeadInstruction(S, &deadPointers);
|
||||
NumFastStores++;
|
||||
++NumFastStores;
|
||||
MadeChange = true;
|
||||
|
||||
continue;
|
||||
|
@ -1501,7 +1501,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
|
||||
MD->invalidateCachedPointerInfo(V);
|
||||
VN.erase(LI);
|
||||
toErase.push_back(LI);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1724,7 +1724,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
|
||||
MD->invalidateCachedPointerInfo(V);
|
||||
VN.erase(LI);
|
||||
toErase.push_back(LI);
|
||||
NumPRELoad++;
|
||||
++NumPRELoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1785,7 +1785,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
MD->invalidateCachedPointerInfo(AvailVal);
|
||||
VN.erase(L);
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1831,7 +1831,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
MD->invalidateCachedPointerInfo(StoredVal);
|
||||
VN.erase(L);
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1861,7 +1861,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
MD->invalidateCachedPointerInfo(DepLI);
|
||||
VN.erase(L);
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1872,7 +1872,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
L->replaceAllUsesWith(UndefValue::get(L->getType()));
|
||||
VN.erase(L);
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1883,7 +1883,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
L->replaceAllUsesWith(UndefValue::get(L->getType()));
|
||||
VN.erase(L);
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
++NumGVNLoad;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2015,7 +2015,7 @@ bool GVN::runOnFunction(Function& F) {
|
||||
BasicBlock *BB = FI;
|
||||
++FI;
|
||||
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
|
||||
if (removedBlock) NumGVNBlocks++;
|
||||
if (removedBlock) ++NumGVNBlocks;
|
||||
|
||||
Changed |= removedBlock;
|
||||
}
|
||||
@ -2142,12 +2142,12 @@ bool GVN::performPRE(Function &F) {
|
||||
localAvail[*PI]->table.find(ValNo);
|
||||
if (predV == localAvail[*PI]->table.end()) {
|
||||
PREPred = *PI;
|
||||
NumWithout++;
|
||||
++NumWithout;
|
||||
} else if (predV->second == CurInst) {
|
||||
NumWithout = 2;
|
||||
} else {
|
||||
predMap[*PI] = predV->second;
|
||||
NumWith++;
|
||||
++NumWith;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2202,7 +2202,7 @@ bool GVN::performPRE(Function &F) {
|
||||
PREInstr->setName(CurInst->getName() + ".pre");
|
||||
predMap[PREPred] = PREInstr;
|
||||
VN.add(PREInstr, ValNo);
|
||||
NumGVNPRE++;
|
||||
++NumGVNPRE;
|
||||
|
||||
// Update the availability map to include the new instruction.
|
||||
localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
|
||||
|
@ -83,7 +83,7 @@ bool LoopDeletion::IsLoopDead(Loop* L,
|
||||
if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator()))
|
||||
return false;
|
||||
|
||||
BI++;
|
||||
++BI;
|
||||
}
|
||||
|
||||
// Make sure that no instructions in the block have potential side-effects.
|
||||
@ -176,7 +176,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
|
||||
BasicBlock::iterator BI = exitBlock->begin();
|
||||
while (PHINode* P = dyn_cast<PHINode>(BI)) {
|
||||
P->replaceUsesOfWith(exitingBlock, preheader);
|
||||
BI++;
|
||||
++BI;
|
||||
}
|
||||
|
||||
// Update the dominator tree and remove the instructions and blocks that will
|
||||
@ -226,7 +226,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
|
||||
LPM.deleteLoopFromQueue(L);
|
||||
Changed = true;
|
||||
|
||||
NumDeleted++;
|
||||
++NumDeleted;
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
@ -649,7 +649,7 @@ bool LoopIndexSplit::updateLoopIterationSpace() {
|
||||
}
|
||||
}
|
||||
}
|
||||
NumRestrictBounds++;
|
||||
++NumRestrictBounds;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1159,7 +1159,7 @@ bool LoopIndexSplit::splitLoop() {
|
||||
B_SplitCondition, B_IndVar, B_IndVarIncrement,
|
||||
BLoop, EVOpNum);
|
||||
|
||||
NumIndexSplit++;
|
||||
++NumIndexSplit;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
|
||||
continue; // PHI nodes don't count.
|
||||
if (isa<DbgInfoIntrinsic>(OI))
|
||||
continue; // Debug intrinsics don't count as size.
|
||||
Size++;
|
||||
++Size;
|
||||
}
|
||||
|
||||
if (Size > MAX_HEADER_SIZE)
|
||||
@ -263,7 +263,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
|
||||
|
||||
preserveCanonicalLoopForm(LPM);
|
||||
|
||||
NumRotated++;
|
||||
++NumRotated;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -632,7 +632,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
|
||||
// Remove the memcpy
|
||||
MD.removeInstruction(cpy);
|
||||
cpy->eraseFromParent();
|
||||
NumMemCpyInstr++;
|
||||
++NumMemCpyInstr;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -710,7 +710,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
|
||||
if (MD.getDependency(C) == dep) {
|
||||
MD.removeInstruction(M);
|
||||
M->eraseFromParent();
|
||||
NumMemCpyInstr++;
|
||||
++NumMemCpyInstr;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -926,7 +926,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
|
||||
DeleteDeadInstructions();
|
||||
AI->eraseFromParent();
|
||||
|
||||
NumReplaced++;
|
||||
++NumReplaced;
|
||||
}
|
||||
|
||||
/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
|
||||
|
@ -192,7 +192,7 @@ ReprocessLoop:
|
||||
if (!Preheader) {
|
||||
Preheader = InsertPreheaderForLoop(L);
|
||||
if (Preheader) {
|
||||
NumInserted++;
|
||||
++NumInserted;
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
@ -215,7 +215,7 @@ ReprocessLoop:
|
||||
// allowed.
|
||||
if (!L->contains(*PI)) {
|
||||
if (RewriteLoopExitBlock(L, ExitBlock)) {
|
||||
NumInserted++;
|
||||
++NumInserted;
|
||||
Changed = true;
|
||||
}
|
||||
break;
|
||||
@ -244,7 +244,7 @@ ReprocessLoop:
|
||||
// loop header.
|
||||
LoopLatch = InsertUniqueBackedgeBlock(L, Preheader);
|
||||
if (LoopLatch) {
|
||||
NumInserted++;
|
||||
++NumInserted;
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user