asan: optimization experiments

The experiments can be used to evaluate potential optimizations that remove
instrumentation (assess false negatives). Instead of completely removing
some instrumentation, you set Exp to a non-zero value (mask of optimization
experiments that want to remove instrumentation of this instruction).
If Exp is non-zero, this pass will emit special calls into runtime
(e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
make runtime terminate the program in a special way (with a different
exit status). Then you run the new compiler on a buggy corpus, collect
the special terminations (ideally, you don't see them at all -- no false
negatives) and make the decision on the optimization.

The exact reaction to experiments in runtime is not implemented in this patch.
It will be defined and implemented in a subsequent patch.

http://reviews.llvm.org/D8198



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@232502 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dmitry Vyukov 2015-03-17 16:59:19 +00:00
parent 78158ce3a4
commit 6e4a97dfce
3 changed files with 340 additions and 60 deletions

View File

@ -83,8 +83,6 @@ static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
static const uint64_t kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
static const char *const kAsanReportLoadN = "__asan_report_load_n";
static const char *const kAsanReportStoreN = "__asan_report_store_n";
static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
static const char *const kAsanUnregisterGlobalsName =
"__asan_unregister_globals";
@ -216,6 +214,11 @@ static cl::opt<bool> ClDynamicAllocaStack(
cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
cl::init(true));
static cl::opt<uint32_t> ClForceExperiment(
"asan-force-experiment",
cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
cl::init(0));
// Debug flags.
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
cl::init(0));
@ -412,12 +415,16 @@ struct AddressSanitizer : public FunctionPass {
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls);
Value *SizeArgument, bool UseCalls, uint32_t Exp);
void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr,
uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
uint32_t Exp);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
Value *SizeArgument);
Value *SizeArgument, uint32_t Exp);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F) override;
@ -445,11 +452,12 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanInitFunction;
Function *AsanHandleNoReturnFunc;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
// This array is indexed by AccessIsWrite and log2(AccessSize).
Function *AsanErrorCallback[2][kNumberOfAccessSizes];
Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
// This array is indexed by AccessIsWrite.
Function *AsanErrorCallbackSized[2], *AsanMemoryAccessCallbackSized[2];
// This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
Function *AsanErrorCallback[2][2][kNumberOfAccessSizes];
Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
// This array is indexed by AccessIsWrite and Experiment.
Function *AsanErrorCallbackSized[2][2];
Function *AsanMemoryAccessCallbackSized[2][2];
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
GlobalsMetadata GlobalsMD;
@ -904,6 +912,19 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment);
assert(Addr);
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
// instrumentation (assess false negatives). Instead of completely removing
// some instrumentation, you set Exp to a non-zero value (mask of optimization
// experiments that want to remove instrumentation of this instruction).
// If Exp is non-zero, this pass will emit special calls into runtime
// (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
// make runtime terminate the program in a special way (with a different
// exit status). Then you run the new compiler on a buggy corpus, collect
// the special terminations (ideally, you don't see them at all -- no false
// negatives) and make the decision on the optimization.
uint32_t Exp = ClForceExperiment;
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
@ -935,23 +956,10 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
TypeSize == 128) &&
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
// Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
IRBuilder<> IRB(I);
Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
} else {
Value *LastByte = IRB.CreateIntToPtr(
IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
Addr->getType());
instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
}
return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls,
Exp);
instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr,
UseCalls, Exp);
}
// Validate the result of Module::getOrInsertFunction called for an interface
@ -969,12 +977,26 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
Value *Addr, bool IsWrite,
size_t AccessSizeIndex,
Value *SizeArgument) {
Value *SizeArgument,
uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
CallInst *Call =
SizeArgument
? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
: IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
CallInst *Call = nullptr;
if (SizeArgument) {
if (Exp == 0)
Call = IRB.CreateCall2(AsanErrorCallbackSized[IsWrite][0], Addr,
SizeArgument);
else
Call = IRB.CreateCall3(AsanErrorCallbackSized[IsWrite][1], Addr,
SizeArgument, ExpVal);
} else {
if (Exp == 0)
Call =
IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
else
Call = IRB.CreateCall2(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
Addr, ExpVal);
}
// We don't do Call->setDoesNotReturn() because the BB already has
// UnreachableInst at the end.
@ -1004,14 +1026,19 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls) {
Value *SizeArgument, bool UseCalls,
uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (UseCalls) {
IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
AddrLong);
if (Exp == 0)
IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
AddrLong);
else
IRB.CreateCall2(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp));
return;
}
@ -1046,10 +1073,36 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
}
Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
AccessSizeIndex, SizeArgument);
AccessSizeIndex, SizeArgument, Exp);
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
// Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
void AddressSanitizer::instrumentUnusualSizeOrAlignment(
Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite,
Value *SizeArgument, bool UseCalls, uint32_t Exp) {
IRBuilder<> IRB(I);
Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
if (Exp == 0)
IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite][0], AddrLong,
Size);
else
IRB.CreateCall3(AsanMemoryAccessCallbackSized[IsWrite][1], AddrLong, Size,
ConstantInt::get(IRB.getInt32Ty(), Exp));
} else {
Value *LastByte = IRB.CreateIntToPtr(
IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
Addr->getType());
instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp);
instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp);
}
}
void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
@ -1342,33 +1395,34 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
void AddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
// IsWrite and TypeSize are encoded in the function name.
std::string Suffix =
(AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
checkInterfaceFunction(
M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
IRB.getVoidTy(), IntptrTy, nullptr));
AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
IRB.getVoidTy(), IntptrTy, nullptr));
// IsWrite, TypeSize and Exp are encoded in the function name.
for (int Exp = 0; Exp < 2; Exp++) {
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string ExpStr = Exp ? "exp_" : "";
const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
AsanErrorCallbackSized[AccessIsWrite][Exp] =
checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportErrorTemplate + ExpStr + TypeStr + "_n",
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N",
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(),
IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(),
IntptrTy, ExpType, nullptr));
}
}
}
AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),

View File

@ -0,0 +1,113 @@
; Test optimization experiments.
; -asan-force-experiment flag turns all memory accesses into experiments.
; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define void @load1(i8* %p) sanitize_address {
entry:
%t = load i8, i8* %p, align 1
ret void
; CHECK-LABEL: define void @load1
; CHECK: __asan_exp_load1{{.*}} i32 42
; CHECK: ret void
}
define void @load2(i16* %p) sanitize_address {
entry:
%t = load i16, i16* %p, align 2
ret void
; CHECK-LABEL: define void @load2
; CHECK: __asan_exp_load2{{.*}} i32 42
; CHECK: ret void
}
define void @load4(i32* %p) sanitize_address {
entry:
%t = load i32, i32* %p, align 4
ret void
; CHECK-LABEL: define void @load4
; CHECK: __asan_exp_load4{{.*}} i32 42
; CHECK: ret void
}
define void @load8(i64* %p) sanitize_address {
entry:
%t = load i64, i64* %p, align 8
ret void
; CHECK-LABEL: define void @load8
; CHECK: __asan_exp_load8{{.*}} i32 42
; CHECK: ret void
}
define void @load16(i128* %p) sanitize_address {
entry:
%t = load i128, i128* %p, align 16
ret void
; CHECK-LABEL: define void @load16
; CHECK: __asan_exp_load16{{.*}} i32 42
; CHECK: ret void
}
define void @loadN(i48* %p) sanitize_address {
entry:
%t = load i48, i48* %p, align 1
ret void
; CHECK-LABEL: define void @loadN
; CHECK: __asan_exp_loadN{{.*}} i32 42
; CHECK: ret void
}
define void @store1(i8* %p) sanitize_address {
entry:
store i8 1, i8* %p, align 1
ret void
; CHECK-LABEL: define void @store1
; CHECK: __asan_exp_store1{{.*}} i32 42
; CHECK: ret void
}
define void @store2(i16* %p) sanitize_address {
entry:
store i16 1, i16* %p, align 2
ret void
; CHECK-LABEL: define void @store2
; CHECK: __asan_exp_store2{{.*}} i32 42
; CHECK: ret void
}
define void @store4(i32* %p) sanitize_address {
entry:
store i32 1, i32* %p, align 4
ret void
; CHECK-LABEL: define void @store4
; CHECK: __asan_exp_store4{{.*}} i32 42
; CHECK: ret void
}
define void @store8(i64* %p) sanitize_address {
entry:
store i64 1, i64* %p, align 8
ret void
; CHECK-LABEL: define void @store8
; CHECK: __asan_exp_store8{{.*}} i32 42
; CHECK: ret void
}
define void @store16(i128* %p) sanitize_address {
entry:
store i128 1, i128* %p, align 16
ret void
; CHECK-LABEL: define void @store16
; CHECK: __asan_exp_store16{{.*}} i32 42
; CHECK: ret void
}
define void @storeN(i48* %p) sanitize_address {
entry:
store i48 1, i48* %p, align 1
ret void
; CHECK-LABEL: define void @storeN
; CHECK: __asan_exp_storeN{{.*}} i32 42
; CHECK: ret void
}

View File

@ -0,0 +1,113 @@
; Test optimization experiments.
; -asan-force-experiment flag turns all memory accesses into experiments.
; RUN: opt < %s -asan -asan-module -asan-force-experiment=42 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define void @load1(i8* %p) sanitize_address {
entry:
%t = load i8, i8* %p, align 1
ret void
; CHECK-LABEL: define void @load1
; CHECK: __asan_report_exp_load1{{.*}} i32 42
; CHECK: ret void
}
define void @load2(i16* %p) sanitize_address {
entry:
%t = load i16, i16* %p, align 2
ret void
; CHECK-LABEL: define void @load2
; CHECK: __asan_report_exp_load2{{.*}} i32 42
; CHECK: ret void
}
define void @load4(i32* %p) sanitize_address {
entry:
%t = load i32, i32* %p, align 4
ret void
; CHECK-LABEL: define void @load4
; CHECK: __asan_report_exp_load4{{.*}} i32 42
; CHECK: ret void
}
define void @load8(i64* %p) sanitize_address {
entry:
%t = load i64, i64* %p, align 8
ret void
; CHECK-LABEL: define void @load8
; CHECK: __asan_report_exp_load8{{.*}} i32 42
; CHECK: ret void
}
define void @load16(i128* %p) sanitize_address {
entry:
%t = load i128, i128* %p, align 16
ret void
; CHECK-LABEL: define void @load16
; CHECK: __asan_report_exp_load16{{.*}} i32 42
; CHECK: ret void
}
define void @loadN(i48* %p) sanitize_address {
entry:
%t = load i48, i48* %p, align 1
ret void
; CHECK-LABEL: define void @loadN
; CHECK: __asan_report_exp_load_n{{.*}} i32 42
; CHECK: ret void
}
define void @store1(i8* %p) sanitize_address {
entry:
store i8 1, i8* %p, align 1
ret void
; CHECK-LABEL: define void @store1
; CHECK: __asan_report_exp_store1{{.*}} i32 42
; CHECK: ret void
}
define void @store2(i16* %p) sanitize_address {
entry:
store i16 1, i16* %p, align 2
ret void
; CHECK-LABEL: define void @store2
; CHECK: __asan_report_exp_store2{{.*}} i32 42
; CHECK: ret void
}
define void @store4(i32* %p) sanitize_address {
entry:
store i32 1, i32* %p, align 4
ret void
; CHECK-LABEL: define void @store4
; CHECK: __asan_report_exp_store4{{.*}} i32 42
; CHECK: ret void
}
define void @store8(i64* %p) sanitize_address {
entry:
store i64 1, i64* %p, align 8
ret void
; CHECK-LABEL: define void @store8
; CHECK: __asan_report_exp_store8{{.*}} i32 42
; CHECK: ret void
}
define void @store16(i128* %p) sanitize_address {
entry:
store i128 1, i128* %p, align 16
ret void
; CHECK-LABEL: define void @store16
; CHECK: __asan_report_exp_store16{{.*}} i32 42
; CHECK: ret void
}
define void @storeN(i48* %p) sanitize_address {
entry:
store i48 1, i48* %p, align 1
ret void
; CHECK-LABEL: define void @storeN
; CHECK: __asan_report_exp_store_n{{.*}} i32 42
; CHECK: ret void
}