Use range-based for loops in ASan, TSan and MSan

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209834 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Alexey Samsonov 2014-05-29 18:40:48 +00:00
parent fa68048322
commit d4d04199ac
3 changed files with 81 additions and 97 deletions

View File

@ -225,8 +225,7 @@ class SetOfDynamicallyInitializedGlobals {
M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); M.getNamedMetadata("llvm.asan.dynamically_initialized_globals");
if (!DynamicGlobals) if (!DynamicGlobals)
return; return;
for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) { for (const auto MDN : DynamicGlobals->operands()) {
MDNode *MDN = DynamicGlobals->getOperand(i);
assert(MDN->getNumOperands() == 1); assert(MDN->getNumOperands() == 1);
Value *VG = MDN->getOperand(0); Value *VG = MDN->getOperand(0);
// The optimizer may optimize away a global entirely, in which case we // The optimizer may optimize away a global entirely, in which case we
@ -1009,10 +1008,9 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
SmallVector<GlobalVariable *, 16> GlobalsToChange; SmallVector<GlobalVariable *, 16> GlobalsToChange;
for (Module::GlobalListType::iterator G = M.global_begin(), for (auto &G : M.globals()) {
E = M.global_end(); G != E; ++G) { if (ShouldInstrumentGlobal(&G))
if (ShouldInstrumentGlobal(G)) GlobalsToChange.push_back(&G);
GlobalsToChange.push_back(G);
} }
Function *CtorFunc = M.getFunction(kAsanModuleCtorName); Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
@ -1305,8 +1303,8 @@ bool AddressSanitizer::InjectCoverage(Function &F,
(unsigned)ClCoverageBlockThreshold < AllBlocks.size()) { (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
InjectCoverageAtBlock(F, F.getEntryBlock()); InjectCoverageAtBlock(F, F.getEntryBlock());
} else { } else {
for (size_t i = 0, n = AllBlocks.size(); i < n; i++) for (auto BB : AllBlocks)
InjectCoverageAtBlock(F, *AllBlocks[i]); InjectCoverageAtBlock(F, *BB);
} }
return true; return true;
} }
@ -1339,29 +1337,28 @@ bool AddressSanitizer::runOnFunction(Function &F) {
unsigned Alignment; unsigned Alignment;
// Fill the set of memory operations to instrument. // Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end(); for (auto &BB : F) {
FI != FE; ++FI) { AllBlocks.push_back(&BB);
AllBlocks.push_back(FI);
TempsToInstrument.clear(); TempsToInstrument.clear();
int NumInsnsPerBB = 0; int NumInsnsPerBB = 0;
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); for (auto &Inst : BB) {
BI != BE; ++BI) { if (LooksLikeCodeInBug11395(&Inst)) return false;
if (LooksLikeCodeInBug11395(BI)) return false; if (Value *Addr =
if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) { isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) { if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr)) if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB. continue; // We've seen this temp in the current BB.
} }
} else if (ClInvalidPointerPairs && } else if (ClInvalidPointerPairs &&
isInterestingPointerComparisonOrSubtraction(BI)) { isInterestingPointerComparisonOrSubtraction(&Inst)) {
PointerComparisonsOrSubtracts.push_back(BI); PointerComparisonsOrSubtracts.push_back(&Inst);
continue; continue;
} else if (isa<MemIntrinsic>(BI)) { } else if (isa<MemIntrinsic>(Inst)) {
// ok, take it. // ok, take it.
} else { } else {
if (isa<AllocaInst>(BI)) if (isa<AllocaInst>(Inst))
NumAllocas++; NumAllocas++;
CallSite CS(BI); CallSite CS(&Inst);
if (CS) { if (CS) {
// A call inside BB. // A call inside BB.
TempsToInstrument.clear(); TempsToInstrument.clear();
@ -1370,7 +1367,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
} }
continue; continue;
} }
ToInstrument.push_back(BI); ToInstrument.push_back(&Inst);
NumInsnsPerBB++; NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
break; break;
@ -1395,8 +1392,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// Instrument. // Instrument.
int NumInstrumented = 0; int NumInstrumented = 0;
for (size_t i = 0, n = ToInstrument.size(); i != n; i++) { for (auto Inst : ToInstrument) {
Instruction *Inst = ToInstrument[i];
if (ClDebugMin < 0 || ClDebugMax < 0 || if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
@ -1412,14 +1408,13 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// We must unpoison the stack before every NoReturn call (throw, _exit, etc). // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) { for (auto CI : NoReturnCalls) {
Instruction *CI = NoReturnCalls[i];
IRBuilder<> IRB(CI); IRBuilder<> IRB(CI);
IRB.CreateCall(AsanHandleNoReturnFunc); IRB.CreateCall(AsanHandleNoReturnFunc);
} }
for (size_t i = 0, n = PointerComparisonsOrSubtracts.size(); i != n; i++) { for (auto Inst : PointerComparisonsOrSubtracts) {
instrumentPointerComparisonOrSubtraction(PointerComparisonsOrSubtracts[i]); instrumentPointerComparisonOrSubtraction(Inst);
NumInstrumented++; NumInstrumented++;
} }
@ -1532,12 +1527,10 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
} }
static DebugLoc getFunctionEntryDebugLocation(Function &F) { static DebugLoc getFunctionEntryDebugLocation(Function &F) {
BasicBlock::iterator I = F.getEntryBlock().begin(), for (const auto &Inst : F.getEntryBlock())
E = F.getEntryBlock().end(); if (!isa<AllocaInst>(Inst))
for (; I != E; ++I) return Inst.getDebugLoc();
if (!isa<AllocaInst>(I)) return DebugLoc();
break;
return I->getDebugLoc();
} }
void FunctionStackPoisoner::poisonStack() { void FunctionStackPoisoner::poisonStack() {
@ -1551,8 +1544,7 @@ void FunctionStackPoisoner::poisonStack() {
SmallVector<ASanStackVariableDescription, 16> SVD; SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size()); SVD.reserve(AllocaVec.size());
for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { for (AllocaInst *AI : AllocaVec) {
AllocaInst *AI = AllocaVec[i];
ASanStackVariableDescription D = { AI->getName().data(), ASanStackVariableDescription D = { AI->getName().data(),
getAllocaSizeInBytes(AI), getAllocaSizeInBytes(AI),
AI->getAlignment(), AI, 0}; AI->getAlignment(), AI, 0};
@ -1607,8 +1599,7 @@ void FunctionStackPoisoner::poisonStack() {
// Insert poison calls for lifetime intrinsics for alloca. // Insert poison calls for lifetime intrinsics for alloca.
bool HavePoisonedAllocas = false; bool HavePoisonedAllocas = false;
for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) { for (const auto &APC : AllocaPoisonCallVec) {
const AllocaPoisonCall &APC = AllocaPoisonCallVec[i];
assert(APC.InsBefore); assert(APC.InsBefore);
assert(APC.AI); assert(APC.AI);
IRBuilder<> IRB(APC.InsBefore); IRBuilder<> IRB(APC.InsBefore);
@ -1617,11 +1608,10 @@ void FunctionStackPoisoner::poisonStack() {
} }
// Replace Alloca instructions with base+offset. // Replace Alloca instructions with base+offset.
for (size_t i = 0, n = SVD.size(); i < n; i++) { for (const auto &Desc : SVD) {
AllocaInst *AI = SVD[i].AI; AllocaInst *AI = Desc.AI;
Value *NewAllocaPtr = IRB.CreateIntToPtr( Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase, IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
ConstantInt::get(IntptrTy, SVD[i].Offset)),
AI->getType()); AI->getType());
replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
AI->replaceAllUsesWith(NewAllocaPtr); AI->replaceAllUsesWith(NewAllocaPtr);
@ -1654,8 +1644,7 @@ void FunctionStackPoisoner::poisonStack() {
poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true); poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
// (Un)poison the stack before all ret instructions. // (Un)poison the stack before all ret instructions.
for (size_t i = 0, n = RetVec.size(); i < n; i++) { for (auto Ret : RetVec) {
Instruction *Ret = RetVec[i];
IRBuilder<> IRBRet(Ret); IRBuilder<> IRBRet(Ret);
// Mark the current frame as retired. // Mark the current frame as retired.
IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
@ -1709,8 +1698,8 @@ void FunctionStackPoisoner::poisonStack() {
} }
// We are done. Remove the old unused alloca instructions. // We are done. Remove the old unused alloca instructions.
for (size_t i = 0, n = AllocaVec.size(); i < n; i++) for (auto AI : AllocaVec)
AllocaVec[i]->eraseFromParent(); AI->eraseFromParent();
} }
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,

View File

@ -599,26 +599,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} }
void materializeStores(bool InstrumentWithCalls) { void materializeStores(bool InstrumentWithCalls) {
for (size_t i = 0, n = StoreList.size(); i < n; i++) { for (auto Inst : StoreList) {
StoreInst &I = *dyn_cast<StoreInst>(StoreList[i]); StoreInst &SI = *dyn_cast<StoreInst>(Inst);
IRBuilder<> IRB(&I); IRBuilder<> IRB(&SI);
Value *Val = I.getValueOperand(); Value *Val = SI.getValueOperand();
Value *Addr = I.getPointerOperand(); Value *Addr = SI.getPointerOperand();
Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val); Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI = StoreInst *NewSI =
IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment()); IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI; (void)NewSI;
if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
if (I.isAtomic()) I.setOrdering(addReleaseOrdering(I.getOrdering())); if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
if (MS.TrackOrigins) { if (MS.TrackOrigins) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment, storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
InstrumentWithCalls); InstrumentWithCalls);
} }
@ -662,18 +662,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} }
void materializeChecks(bool InstrumentWithCalls) { void materializeChecks(bool InstrumentWithCalls) {
for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) { for (const auto &ShadowData : InstrumentationList) {
Instruction *OrigIns = InstrumentationList[i].OrigIns; Instruction *OrigIns = ShadowData.OrigIns;
Value *Shadow = InstrumentationList[i].Shadow; Value *Shadow = ShadowData.Shadow;
Value *Origin = InstrumentationList[i].Origin; Value *Origin = ShadowData.Origin;
materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
} }
DEBUG(dbgs() << "DONE:\n" << F); DEBUG(dbgs() << "DONE:\n" << F);
} }
void materializeIndirectCalls() { void materializeIndirectCalls() {
for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) { for (auto &CS : IndirectCallList) {
CallSite CS = IndirectCallList[i];
Instruction *I = CS.getInstruction(); Instruction *I = CS.getInstruction();
BasicBlock *B = I->getParent(); BasicBlock *B = I->getParent();
IRBuilder<> IRB(I); IRBuilder<> IRB(I);
@ -732,8 +731,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Finalize PHI nodes. // Finalize PHI nodes.
for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) { for (PHINode *PN : ShadowPHINodes) {
PHINode *PN = ShadowPHINodes[i];
PHINode *PNS = cast<PHINode>(getShadow(PN)); PHINode *PNS = cast<PHINode>(getShadow(PN));
PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues(); size_t NumValues = PN->getNumIncomingValues();
@ -950,22 +948,21 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent(); Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0; unsigned ArgOffset = 0;
for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); for (auto &FArg : F->args()) {
AI != AE; ++AI) { if (!FArg.getType()->isSized()) {
if (!AI->getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n"); DEBUG(dbgs() << "Arg is not sized\n");
continue; continue;
} }
unsigned Size = AI->hasByValAttr() unsigned Size = FArg.hasByValAttr()
? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType()) ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
: MS.DL->getTypeAllocSize(AI->getType()); : MS.DL->getTypeAllocSize(FArg.getType());
if (A == AI) { if (A == &FArg) {
Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset); Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
if (AI->hasByValAttr()) { if (FArg.hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual // ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory. // argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment. // Figure out maximal valid memcpy alignment.
unsigned ArgAlign = AI->getParamAlignment(); unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) { if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType(); Type *EltType = A->getType()->getPointerElementType();
ArgAlign = MS.DL->getABITypeAlignment(EltType); ArgAlign = MS.DL->getABITypeAlignment(EltType);
@ -980,10 +977,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else { } else {
*ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
} }
DEBUG(dbgs() << " ARG: " << *AI << " ==> " << DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
**ShadowPtr << "\n"); **ShadowPtr << "\n");
if (MS.TrackOrigins) { if (MS.TrackOrigins) {
Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset); Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
} }
} }

View File

@ -333,20 +333,17 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
bool HasCalls = false; bool HasCalls = false;
// Traverse all instructions, collect loads/stores/returns, check for calls. // Traverse all instructions, collect loads/stores/returns, check for calls.
for (Function::iterator FI = F.begin(), FE = F.end(); for (auto &BB : F) {
FI != FE; ++FI) { for (auto &Inst : BB) {
BasicBlock &BB = *FI; if (isAtomic(&Inst))
for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); AtomicAccesses.push_back(&Inst);
BI != BE; ++BI) { else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
if (isAtomic(BI)) LocalLoadsAndStores.push_back(&Inst);
AtomicAccesses.push_back(BI); else if (isa<ReturnInst>(Inst))
else if (isa<LoadInst>(BI) || isa<StoreInst>(BI)) RetVec.push_back(&Inst);
LocalLoadsAndStores.push_back(BI); else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
else if (isa<ReturnInst>(BI)) if (isa<MemIntrinsic>(Inst))
RetVec.push_back(BI); MemIntrinCalls.push_back(&Inst);
else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
if (isa<MemIntrinsic>(BI))
MemIntrinCalls.push_back(BI);
HasCalls = true; HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
} }
@ -360,19 +357,19 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// Instrument memory accesses. // Instrument memory accesses.
if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread)) if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread))
for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { for (auto Inst : AllLoadsAndStores) {
Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); Res |= instrumentLoadOrStore(Inst);
} }
// Instrument atomic memory accesses. // Instrument atomic memory accesses.
if (ClInstrumentAtomics) if (ClInstrumentAtomics)
for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { for (auto Inst : AtomicAccesses) {
Res |= instrumentAtomic(AtomicAccesses[i]); Res |= instrumentAtomic(Inst);
} }
if (ClInstrumentMemIntrinsics) if (ClInstrumentMemIntrinsics)
for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) { for (auto Inst : MemIntrinCalls) {
Res |= instrumentMemIntrinsic(MemIntrinCalls[i]); Res |= instrumentMemIntrinsic(Inst);
} }
// Instrument function entry/exit points if there were instrumented accesses. // Instrument function entry/exit points if there were instrumented accesses.
@ -382,8 +379,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
IRB.getInt32(0)); IRB.getInt32(0));
IRB.CreateCall(TsanFuncEntry, ReturnAddress); IRB.CreateCall(TsanFuncEntry, ReturnAddress);
for (size_t i = 0, n = RetVec.size(); i < n; ++i) { for (auto RetInst : RetVec) {
IRBuilder<> IRBRet(RetVec[i]); IRBuilder<> IRBRet(RetInst);
IRBRet.CreateCall(TsanFuncExit); IRBRet.CreateCall(TsanFuncExit);
} }
Res = true; Res = true;