Use range-based for loops in ASan, TSan and MSan

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209834 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Alexey Samsonov 2014-05-29 18:40:48 +00:00
parent fa68048322
commit d4d04199ac
3 changed files with 81 additions and 97 deletions

View File

@ -225,8 +225,7 @@ class SetOfDynamicallyInitializedGlobals {
M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
if (!DynamicGlobals)
return;
for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) {
MDNode *MDN = DynamicGlobals->getOperand(i);
for (const auto MDN : DynamicGlobals->operands()) {
assert(MDN->getNumOperands() == 1);
Value *VG = MDN->getOperand(0);
// The optimizer may optimize away a global entirely, in which case we
@ -1009,10 +1008,9 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
SmallVector<GlobalVariable *, 16> GlobalsToChange;
for (Module::GlobalListType::iterator G = M.global_begin(),
E = M.global_end(); G != E; ++G) {
if (ShouldInstrumentGlobal(G))
GlobalsToChange.push_back(G);
for (auto &G : M.globals()) {
if (ShouldInstrumentGlobal(&G))
GlobalsToChange.push_back(&G);
}
Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
@ -1305,8 +1303,8 @@ bool AddressSanitizer::InjectCoverage(Function &F,
(unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
InjectCoverageAtBlock(F, F.getEntryBlock());
} else {
for (size_t i = 0, n = AllBlocks.size(); i < n; i++)
InjectCoverageAtBlock(F, *AllBlocks[i]);
for (auto BB : AllBlocks)
InjectCoverageAtBlock(F, *BB);
}
return true;
}
@ -1339,29 +1337,28 @@ bool AddressSanitizer::runOnFunction(Function &F) {
unsigned Alignment;
// Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI) {
AllBlocks.push_back(FI);
for (auto &BB : F) {
AllBlocks.push_back(&BB);
TempsToInstrument.clear();
int NumInsnsPerBB = 0;
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
if (LooksLikeCodeInBug11395(BI)) return false;
if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) {
for (auto &Inst : BB) {
if (LooksLikeCodeInBug11395(&Inst)) return false;
if (Value *Addr =
isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
}
} else if (ClInvalidPointerPairs &&
isInterestingPointerComparisonOrSubtraction(BI)) {
PointerComparisonsOrSubtracts.push_back(BI);
isInterestingPointerComparisonOrSubtraction(&Inst)) {
PointerComparisonsOrSubtracts.push_back(&Inst);
continue;
} else if (isa<MemIntrinsic>(BI)) {
} else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
} else {
if (isa<AllocaInst>(BI))
if (isa<AllocaInst>(Inst))
NumAllocas++;
CallSite CS(BI);
CallSite CS(&Inst);
if (CS) {
// A call inside BB.
TempsToInstrument.clear();
@ -1370,7 +1367,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
continue;
}
ToInstrument.push_back(BI);
ToInstrument.push_back(&Inst);
NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
break;
@ -1395,8 +1392,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// Instrument.
int NumInstrumented = 0;
for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
Instruction *Inst = ToInstrument[i];
for (auto Inst : ToInstrument) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
@ -1412,14 +1408,13 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
Instruction *CI = NoReturnCalls[i];
for (auto CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
IRB.CreateCall(AsanHandleNoReturnFunc);
}
for (size_t i = 0, n = PointerComparisonsOrSubtracts.size(); i != n; i++) {
instrumentPointerComparisonOrSubtraction(PointerComparisonsOrSubtracts[i]);
for (auto Inst : PointerComparisonsOrSubtracts) {
instrumentPointerComparisonOrSubtraction(Inst);
NumInstrumented++;
}
@ -1532,12 +1527,10 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
}
static DebugLoc getFunctionEntryDebugLocation(Function &F) {
BasicBlock::iterator I = F.getEntryBlock().begin(),
E = F.getEntryBlock().end();
for (; I != E; ++I)
if (!isa<AllocaInst>(I))
break;
return I->getDebugLoc();
for (const auto &Inst : F.getEntryBlock())
if (!isa<AllocaInst>(Inst))
return Inst.getDebugLoc();
return DebugLoc();
}
void FunctionStackPoisoner::poisonStack() {
@ -1551,8 +1544,7 @@ void FunctionStackPoisoner::poisonStack() {
SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size());
for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
AllocaInst *AI = AllocaVec[i];
for (AllocaInst *AI : AllocaVec) {
ASanStackVariableDescription D = { AI->getName().data(),
getAllocaSizeInBytes(AI),
AI->getAlignment(), AI, 0};
@ -1607,8 +1599,7 @@ void FunctionStackPoisoner::poisonStack() {
// Insert poison calls for lifetime intrinsics for alloca.
bool HavePoisonedAllocas = false;
for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) {
const AllocaPoisonCall &APC = AllocaPoisonCallVec[i];
for (const auto &APC : AllocaPoisonCallVec) {
assert(APC.InsBefore);
assert(APC.AI);
IRBuilder<> IRB(APC.InsBefore);
@ -1617,11 +1608,10 @@ void FunctionStackPoisoner::poisonStack() {
}
// Replace Alloca instructions with base+offset.
for (size_t i = 0, n = SVD.size(); i < n; i++) {
AllocaInst *AI = SVD[i].AI;
for (const auto &Desc : SVD) {
AllocaInst *AI = Desc.AI;
Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase,
ConstantInt::get(IntptrTy, SVD[i].Offset)),
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
AI->getType());
replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
AI->replaceAllUsesWith(NewAllocaPtr);
@ -1654,8 +1644,7 @@ void FunctionStackPoisoner::poisonStack() {
poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
// (Un)poison the stack before all ret instructions.
for (size_t i = 0, n = RetVec.size(); i < n; i++) {
Instruction *Ret = RetVec[i];
for (auto Ret : RetVec) {
IRBuilder<> IRBRet(Ret);
// Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
@ -1709,8 +1698,8 @@ void FunctionStackPoisoner::poisonStack() {
}
// We are done. Remove the old unused alloca instructions.
for (size_t i = 0, n = AllocaVec.size(); i < n; i++)
AllocaVec[i]->eraseFromParent();
for (auto AI : AllocaVec)
AI->eraseFromParent();
}
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,

View File

@ -599,26 +599,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void materializeStores(bool InstrumentWithCalls) {
for (size_t i = 0, n = StoreList.size(); i < n; i++) {
StoreInst &I = *dyn_cast<StoreInst>(StoreList[i]);
for (auto Inst : StoreList) {
StoreInst &SI = *dyn_cast<StoreInst>(Inst);
IRBuilder<> IRB(&I);
Value *Val = I.getValueOperand();
Value *Addr = I.getPointerOperand();
Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
IRBuilder<> IRB(&SI);
Value *Val = SI.getValueOperand();
Value *Addr = SI.getPointerOperand();
Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
if (ClCheckAccessAddress) insertShadowCheck(Addr, &I);
if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
if (I.isAtomic()) I.setOrdering(addReleaseOrdering(I.getOrdering()));
if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
if (MS.TrackOrigins) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
InstrumentWithCalls);
}
@ -662,18 +662,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void materializeChecks(bool InstrumentWithCalls) {
for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
Instruction *OrigIns = InstrumentationList[i].OrigIns;
Value *Shadow = InstrumentationList[i].Shadow;
Value *Origin = InstrumentationList[i].Origin;
for (const auto &ShadowData : InstrumentationList) {
Instruction *OrigIns = ShadowData.OrigIns;
Value *Shadow = ShadowData.Shadow;
Value *Origin = ShadowData.Origin;
materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
}
DEBUG(dbgs() << "DONE:\n" << F);
}
void materializeIndirectCalls() {
for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
CallSite CS = IndirectCallList[i];
for (auto &CS : IndirectCallList) {
Instruction *I = CS.getInstruction();
BasicBlock *B = I->getParent();
IRBuilder<> IRB(I);
@ -732,8 +731,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Finalize PHI nodes.
for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
PHINode *PN = ShadowPHINodes[i];
for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues();
@ -950,22 +948,21 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
AI != AE; ++AI) {
if (!AI->getType()->isSized()) {
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
unsigned Size = AI->hasByValAttr()
? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
: MS.DL->getTypeAllocSize(AI->getType());
if (A == AI) {
Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
if (AI->hasByValAttr()) {
unsigned Size = FArg.hasByValAttr()
? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
: MS.DL->getTypeAllocSize(FArg.getType());
if (A == &FArg) {
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
unsigned ArgAlign = AI->getParamAlignment();
unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
ArgAlign = MS.DL->getABITypeAlignment(EltType);
@ -980,10 +977,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else {
*ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
**ShadowPtr << "\n");
if (MS.TrackOrigins) {
Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
}
}

View File

@ -333,20 +333,17 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
bool HasCalls = false;
// Traverse all instructions, collect loads/stores/returns, check for calls.
for (Function::iterator FI = F.begin(), FE = F.end();
FI != FE; ++FI) {
BasicBlock &BB = *FI;
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
BI != BE; ++BI) {
if (isAtomic(BI))
AtomicAccesses.push_back(BI);
else if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
LocalLoadsAndStores.push_back(BI);
else if (isa<ReturnInst>(BI))
RetVec.push_back(BI);
else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
if (isa<MemIntrinsic>(BI))
MemIntrinCalls.push_back(BI);
for (auto &BB : F) {
for (auto &Inst : BB) {
if (isAtomic(&Inst))
AtomicAccesses.push_back(&Inst);
else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
LocalLoadsAndStores.push_back(&Inst);
else if (isa<ReturnInst>(Inst))
RetVec.push_back(&Inst);
else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
if (isa<MemIntrinsic>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
}
@ -360,19 +357,19 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// Instrument memory accesses.
if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread))
for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
for (auto Inst : AllLoadsAndStores) {
Res |= instrumentLoadOrStore(Inst);
}
// Instrument atomic memory accesses.
if (ClInstrumentAtomics)
for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
Res |= instrumentAtomic(AtomicAccesses[i]);
for (auto Inst : AtomicAccesses) {
Res |= instrumentAtomic(Inst);
}
if (ClInstrumentMemIntrinsics)
for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
for (auto Inst : MemIntrinCalls) {
Res |= instrumentMemIntrinsic(Inst);
}
// Instrument function entry/exit points if there were instrumented accesses.
@ -382,8 +379,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
IRB.getInt32(0));
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
for (size_t i = 0, n = RetVec.size(); i < n; ++i) {
IRBuilder<> IRBRet(RetVec[i]);
for (auto RetInst : RetVec) {
IRBuilder<> IRBRet(RetInst);
IRBRet.CreateCall(TsanFuncExit);
}
Res = true;