mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
[tsan] make sure memset/memcpy/memmove are not inlined in tsan mode
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@178230 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
21fb0193b2
commit
f464481db0
@ -30,6 +30,7 @@
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/IR/Intrinsics.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
#include "llvm/IR/Metadata.h"
|
||||
@ -56,6 +57,9 @@ static cl::opt<bool> ClInstrumentFuncEntryExit(
|
||||
static cl::opt<bool> ClInstrumentAtomics(
|
||||
"tsan-instrument-atomics", cl::init(true),
|
||||
cl::desc("Instrument atomics"), cl::Hidden);
|
||||
static cl::opt<bool> ClInstrumentMemIntrinsics(
|
||||
"tsan-instrument-memintrinsics", cl::init(true),
|
||||
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
|
||||
|
||||
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
|
||||
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
|
||||
@ -86,12 +90,14 @@ struct ThreadSanitizer : public FunctionPass {
|
||||
void initializeCallbacks(Module &M);
|
||||
bool instrumentLoadOrStore(Instruction *I);
|
||||
bool instrumentAtomic(Instruction *I);
|
||||
bool instrumentMemIntrinsic(Instruction *I);
|
||||
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
|
||||
SmallVectorImpl<Instruction*> &All);
|
||||
bool addrPointsToConstantData(Value *Addr);
|
||||
int getMemoryAccessFuncIndex(Value *Addr);
|
||||
|
||||
DataLayout *TD;
|
||||
Type *IntptrTy;
|
||||
SmallString<64> BlacklistFile;
|
||||
OwningPtr<BlackList> BL;
|
||||
IntegerType *OrdTy;
|
||||
@ -110,6 +116,7 @@ struct ThreadSanitizer : public FunctionPass {
|
||||
Function *TsanAtomicSignalFence;
|
||||
Function *TsanVptrUpdate;
|
||||
Function *TsanVptrLoad;
|
||||
Function *MemmoveFn, *MemcpyFn, *MemsetFn;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
@ -204,6 +211,16 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
|
||||
"__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
|
||||
TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
|
||||
"__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
|
||||
|
||||
MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
|
||||
"memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
|
||||
IRB.getInt8PtrTy(), IntptrTy, NULL));
|
||||
MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
|
||||
"memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
|
||||
IntptrTy, NULL));
|
||||
MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
|
||||
"memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
|
||||
IntptrTy, NULL));
|
||||
}
|
||||
|
||||
bool ThreadSanitizer::doInitialization(Module &M) {
|
||||
@ -214,6 +231,7 @@ bool ThreadSanitizer::doInitialization(Module &M) {
|
||||
|
||||
// Always insert a call to __tsan_init into the module's CTORs.
|
||||
IRBuilder<> IRB(M.getContext());
|
||||
IntptrTy = IRB.getIntPtrTy(TD);
|
||||
Value *TsanInit = M.getOrInsertFunction("__tsan_init",
|
||||
IRB.getVoidTy(), NULL);
|
||||
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
|
||||
@ -313,6 +331,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
|
||||
SmallVector<Instruction*, 8> AllLoadsAndStores;
|
||||
SmallVector<Instruction*, 8> LocalLoadsAndStores;
|
||||
SmallVector<Instruction*, 8> AtomicAccesses;
|
||||
SmallVector<Instruction*, 8> MemIntrinCalls;
|
||||
bool Res = false;
|
||||
bool HasCalls = false;
|
||||
|
||||
@ -329,6 +348,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
|
||||
else if (isa<ReturnInst>(BI))
|
||||
RetVec.push_back(BI);
|
||||
else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
|
||||
if (isa<MemIntrinsic>(BI))
|
||||
MemIntrinCalls.push_back(BI);
|
||||
HasCalls = true;
|
||||
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
|
||||
}
|
||||
@ -352,6 +373,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
|
||||
Res |= instrumentAtomic(AtomicAccesses[i]);
|
||||
}
|
||||
|
||||
if (ClInstrumentMemIntrinsics)
|
||||
for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
|
||||
Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
|
||||
}
|
||||
|
||||
// Instrument function entry/exit points if there were instrumented accesses.
|
||||
if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
|
||||
IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
|
||||
@ -433,6 +459,32 @@ static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
|
||||
return IRB->getInt32(v);
|
||||
}
|
||||
|
||||
// If a memset intrinsic gets inlined by the code gen, we will miss races on it.
|
||||
// So, we either need to ensure the intrinsic is not inlined, or instrument it.
|
||||
// We do not instrument memset/memmove/memcpy intrinsics (too complicated),
|
||||
// instead we simply replace them with regular function calls, which are then
|
||||
// intercepted by the run-time.
|
||||
// Since tsan is running after everyone else, the calls should not be
|
||||
// replaced back with intrinsics. If that becomes wrong at some point,
|
||||
// we will need to call e.g. __tsan_memset to avoid the intrinsics.
|
||||
bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
|
||||
IRBuilder<> IRB(I);
|
||||
if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
|
||||
IRB.CreateCall3(MemsetFn,
|
||||
IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
|
||||
IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
|
||||
IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
|
||||
I->eraseFromParent();
|
||||
} else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
|
||||
IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
|
||||
IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
|
||||
IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
|
||||
IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
|
||||
I->eraseFromParent();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
|
||||
// standards. For background see C++11 standard. A slightly older, publically
|
||||
// available draft of the standard (not entirely up-to-date, but close enough
|
||||
|
@ -20,3 +20,36 @@ entry:
|
||||
; CHECK: ret i32
|
||||
|
||||
|
||||
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
|
||||
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
|
||||
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
|
||||
|
||||
|
||||
; Check that tsan converts mem intrinsics back to function calls.
|
||||
|
||||
define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) {
|
||||
entry:
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
|
||||
ret void
|
||||
; CHECK: define void @MemCpyTest
|
||||
; CHECK: call i8* @memcpy
|
||||
; CHECK: ret void
|
||||
}
|
||||
|
||||
define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) {
|
||||
entry:
|
||||
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
|
||||
ret void
|
||||
; CHECK: define void @MemMoveTest
|
||||
; CHECK: call i8* @memmove
|
||||
; CHECK: ret void
|
||||
}
|
||||
|
||||
define void @MemSetTest(i8* nocapture %x) {
|
||||
entry:
|
||||
tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
|
||||
ret void
|
||||
; CHECK define void @MemSetTest
|
||||
; CHECK: call i8* @memset
|
||||
; CHECK: ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user