[msan] Handling of atomic load/store, atomic rmw, cmpxchg.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191287 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evgeniy Stepanov 2013-09-24 11:20:27 +00:00
parent ae1fb8fc19
commit ecf0fcd2b1
3 changed files with 289 additions and 6 deletions

View File

@ -66,6 +66,31 @@
/// avoids storing origin to memory when a fully initialized value is stored.
/// This way it avoids needless overwritting origin of the 4-byte region on
/// a short (i.e. 1 byte) clean store, and it is also good for performance.
///
/// Atomic handling.
///
/// Ideally, every atomic store of application value should update the
/// corresponding shadow location in an atomic way. Unfortunately, atomic store
/// of two disjoint locations can not be done without severe slowdown.
///
/// Therefore, we implement an approximation that may err on the safe side.
/// In this implementation, every atomically accessed location in the program
/// may only change from (partially) uninitialized to fully initialized, but
/// not the other way around. We load the shadow _after_ the application load,
/// and we store the shadow _before_ the app store. Also, we always store clean
/// shadow (if the application store is atomic). This way, if the store-load
/// pair constitutes a happens-before arc, shadow store and load are correctly
/// ordered such that the load will get either the value that was stored, or
/// some later value (which is always clean).
///
/// This does not work very well with Compare-And-Swap (CAS) and
/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
/// must store the new shadow before the app operation, and load the shadow
/// after the app operation. Computers don't work this way. Current
/// implementation ignores the load aspect of CAS/RMW, always returning a clean
/// value. It implements the store part as a simple atomic store by storing a
/// clean shadow.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "msan"
@ -487,7 +512,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Val = I.getValueOperand();
Value *Addr = I.getPointerOperand();
Value *Shadow = getShadow(Val);
Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
@ -498,6 +523,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClCheckAccessAddress)
insertCheck(Addr, &I);
if (I.isAtomic())
I.setOrdering(addReleaseOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
@ -876,6 +904,38 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
}
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
case NotAtomic:
return NotAtomic;
case Unordered:
case Monotonic:
case Release:
return Release;
case Acquire:
case AcquireRelease:
return AcquireRelease;
case SequentiallyConsistent:
return SequentiallyConsistent;
}
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
case NotAtomic:
return NotAtomic;
case Unordered:
case Monotonic:
case Acquire:
return Acquire;
case Release:
case AcquireRelease:
return AcquireRelease;
case SequentiallyConsistent:
return SequentiallyConsistent;
}
}
// ------------------- Visitors.
/// \brief Instrument LoadInst
@ -884,7 +944,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Optionally, checks that the load address is fully defined.
void visitLoadInst(LoadInst &I) {
assert(I.getType()->isSized() && "Load type must have size");
IRBuilder<> IRB(&I);
IRBuilder<> IRB(I.getNextNode());
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
if (LoadShadow) {
@ -898,6 +958,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClCheckAccessAddress)
insertCheck(I.getPointerOperand(), &I);
if (I.isAtomic())
I.setOrdering(addAcquireOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
if (LoadShadow) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
@ -917,6 +980,37 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
StoreList.push_back(&I);
}
void handleCASOrRMW(Instruction &I) {
assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
IRBuilder<> IRB(&I);
Value *Addr = I.getOperand(0);
Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
if (ClCheckAccessAddress)
insertCheck(Addr, &I);
// Only test the conditional argument of cmpxchg instruction.
// The other argument can potentially be uninitialized, but we can not
// detect this situation reliably without possible false positives.
if (isa<AtomicCmpXchgInst>(I))
insertCheck(I.getOperand(1), &I);
IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
setShadow(&I, getCleanShadow(&I));
}
void visitAtomicRMWInst(AtomicRMWInst &I) {
handleCASOrRMW(I);
I.setOrdering(addReleaseOrdering(I.getOrdering()));
}
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
handleCASOrRMW(I);
I.setOrdering(addReleaseOrdering(I.getOrdering()));
}
// Vector manipulation.
void visitExtractElementInst(ExtractElementInst &I) {
insertCheck(I.getOperand(1), &I);

View File

@ -0,0 +1,189 @@
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; atomicrmw xchg: store clean shadow, return clean shadow
define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
entry:
%0 = atomicrmw xchg i32* %p, i32 %x seq_cst
ret i32 %0
}
; CHECK: @AtomicRmwXchg
; CHECK: store i32 0,
; CHECK: atomicrmw xchg {{.*}} seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomicrmw max: exactly the same as above
define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
entry:
%0 = atomicrmw max i32* %p, i32 %x seq_cst
ret i32 %0
}
; CHECK: @AtomicRmwMax
; CHECK: store i32 0,
; CHECK: atomicrmw max {{.*}} seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; cmpxchg: the same as above, but also check %a shadow
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
%0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst
ret i32 %0
}
; CHECK: @Cmpxchg
; CHECK: store i32 0,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
; CHECK: cmpxchg {{.*}} seq_cst
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; relaxed cmpxchg: bump up to "release"
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
entry:
%0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic
ret i32 %0
}
; CHECK: @CmpxchgMonotonic
; CHECK: store i32 0,
; CHECK: icmp
; CHECK: br
; CHECK: @__msan_warning
; CHECK: cmpxchg {{.*}} release
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load: preserve alignment, load shadow value after app value
define i32 @AtomicLoad(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32* %p seq_cst, align 16
ret i32 %0
}
; CHECK: @AtomicLoad
; CHECK: load atomic i32* {{.*}} seq_cst, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load: preserve alignment, load shadow value after app value
define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32* %p acquire, align 16
ret i32 %0
}
; CHECK: @AtomicLoadAcquire
; CHECK: load atomic i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load monotonic: bump up to load acquire
define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32* %p monotonic, align 16
ret i32 %0
}
; CHECK: @AtomicLoadMonotonic
; CHECK: load atomic i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic load unordered: bump up to load acquire
define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
entry:
%0 = load atomic i32* %p unordered, align 16
ret i32 %0
}
; CHECK: @AtomicLoadUnordered
; CHECK: load atomic i32* {{.*}} acquire, align 16
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
; CHECK: ret i32
; atomic store: preserve alignment, store clean shadow value before app value
define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p seq_cst, align 16
ret void
}
; CHECK: @AtomicStore
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
; CHECK: ret void
; atomic store: preserve alignment, store clean shadow value before app value
define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p release, align 16
ret void
}
; CHECK: @AtomicStoreRelease
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void
; atomic store monotonic: bumped up to store release
define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p monotonic, align 16
ret void
}
; CHECK: @AtomicStoreMonotonic
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void
; atomic store unordered: bumped up to store release
define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
entry:
store atomic i32 %x, i32* %p unordered, align 16
ret void
}
; CHECK: @AtomicStoreUnordered
; CHECK-NOT: @__msan_param_tls
; CHECK: store i32 0, i32* {{.*}}, align 16
; CHECK: store atomic i32 %x, i32* %p release, align 16
; CHECK: ret void

View File

@ -442,8 +442,8 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
}
; CHECK: @ShadowLoadAlignmentLarge
; CHECK: load i32* {{.*}} align 64
; CHECK: load volatile i32* {{.*}} align 64
; CHECK: load i32* {{.*}} align 64
; CHECK: ret i32
define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
@ -453,14 +453,14 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
}
; CHECK: @ShadowLoadAlignmentSmall
; CHECK: load i32* {{.*}} align 2
; CHECK: load volatile i32* {{.*}} align 2
; CHECK: load i32* {{.*}} align 2
; CHECK: ret i32
; CHECK-ORIGINS: @ShadowLoadAlignmentSmall
; CHECK-ORIGINS: load volatile i32* {{.*}} align 2
; CHECK-ORIGINS: load i32* {{.*}} align 2
; CHECK-ORIGINS: load i32* {{.*}} align 4
; CHECK-ORIGINS: load volatile i32* {{.*}} align 2
; CHECK-ORIGINS: ret i32
@ -600,8 +600,8 @@ define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memo
}
; CHECK: @VectorOfPointers
; CHECK: load <8 x i64>*
; CHECK: load <8 x i8*>*
; CHECK: load <8 x i64>*
; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
; CHECK: ret <8 x i8*>