[asan] properly instrument memory accesses that have small alignment (smaller than min(8,size)) by making two checks instead of one. This may slowdown some cases, e.g. long long on 32-bit or wide loads produced after loop unrolling. The benefit is higher sencitivity.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209508 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Kostya Serebryany 2014-05-23 11:52:07 +00:00
parent 4f22c980f4
commit f2938bf8da
4 changed files with 44 additions and 18 deletions

View File

@ -623,26 +623,31 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
}
// If I is an interesting memory access, return the PointerOperand
// and set IsWrite. Otherwise return NULL.
static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
// and set IsWrite/Alignment. Otherwise return NULL.
static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
unsigned *Alignment) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
*Alignment = LI->getAlignment();
return LI->getPointerOperand();
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
*Alignment = SI->getAlignment();
return SI->getPointerOperand();
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*Alignment = 0;
return RMW->getPointerOperand();
}
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*Alignment = 0;
return XCHG->getPointerOperand();
}
return nullptr;
@ -692,7 +697,8 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
bool IsWrite = false;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite);
unsigned Alignment = 0;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
assert(Addr);
if (ClOpt && ClOptGlobals) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
@ -727,11 +733,14 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
else
NumInstrumentedReads++;
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check.
if (TypeSize == 8 || TypeSize == 16 ||
TypeSize == 32 || TypeSize == 64 || TypeSize == 128)
unsigned Granularity = 1 << Mapping.Scale;
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
// if the data is properly aligned.
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
TypeSize == 128) &&
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
// Instrument unusual size (but still multiple of 8).
// Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
@ -1328,6 +1337,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
bool IsWrite;
unsigned Alignment;
// Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end();
@ -1338,7 +1348,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) {
if (LooksLikeCodeInBug11395(BI)) return false;
if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB.
@ -1390,7 +1400,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
Instruction *Inst = ToInstrument[i];
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite))
if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
instrumentMop(Inst, UseCalls);
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));

View File

@ -34,7 +34,7 @@ define i32 @test_load(i32* %a) sanitize_address {
entry:
%tmp1 = load i32* %a
%tmp1 = load i32* %a, align 4
ret i32 %tmp1
}
@ -66,7 +66,7 @@ define void @test_store(i32* %a) sanitize_address {
;
entry:
store i32 42, i32* %a
store i32 42, i32* %a, align 4
ret void
}
@ -115,6 +115,18 @@ define void @i40test(i40* %a, i40* %b) nounwind uwtable sanitize_address {
; CHECK: __asan_report_store_n{{.*}}, i64 5)
; CHECK: ret void
define void @i64test_align1(i64* %b) nounwind uwtable sanitize_address {
entry:
store i64 0, i64* %b, align 1
ret void
}
; CHECK-LABEL: i64test_align1
; CHECK: __asan_report_store_n{{.*}}, i64 8)
; CHECK: __asan_report_store_n{{.*}}, i64 8)
; CHECK: ret void
define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address {
entry:
%t = load i80* %a

View File

@ -20,10 +20,10 @@ entry:
; CHECK-CUSTOM-PREFIX: call void @__foo_load8
; CHECK-CUSTOM-PREFIX: call void @__foo_loadN
; CHECK-INLINE-NOT: call void @__asan_load
%tmp1 = load i32* %a
%tmp2 = load i64* %b
%tmp3 = load i512* %c
%tmp4 = load i80* %d
%tmp1 = load i32* %a, align 4
%tmp2 = load i64* %b, align 8
%tmp3 = load i512* %c, align 32
%tmp4 = load i80* %d, align 8
ret void
}

View File

@ -6,7 +6,7 @@ entry:
%tmp1 = load i32* %a, align 4
ret i32 %tmp1
}
; CHECK: @read_4_bytes
; CHECK-LABEL: @read_4_bytes
; CHECK-NOT: ret
; CHECK: lshr {{.*}} 3
; Check for ASAN's Offset for 64-bit (7fff8000)
@ -19,8 +19,10 @@ entry:
ret void
}
; CHECK: @example_atomicrmw
; CHECK-LABEL: @example_atomicrmw
; CHECK: lshr {{.*}} 3
; CHECK: __asan_report_store8
; CHECK-NOT: __asan_report
; CHECK: atomicrmw
; CHECK: ret
@ -30,7 +32,9 @@ entry:
ret void
}
; CHECK: @example_cmpxchg
; CHECK-LABEL: @example_cmpxchg
; CHECK: lshr {{.*}} 3
; CHECK: __asan_report_store8
; CHECK-NOT: __asan_report
; CHECK: cmpxchg
; CHECK: ret