2013-09-24 11:20:27 +00:00
|
|
|
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
|
|
|
|
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
; atomicrmw xchg: store clean shadow, return clean shadow
|
|
|
|
|
|
|
|
define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw xchg i32* %p, i32 %x seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicRmwXchg
|
|
|
|
; CHECK: store i32 0,
|
|
|
|
; CHECK: atomicrmw xchg {{.*}} seq_cst
|
|
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomicrmw max: exactly the same as above
|
|
|
|
|
|
|
|
define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw max i32* %p, i32 %x seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicRmwMax
|
|
|
|
; CHECK: store i32 0,
|
|
|
|
; CHECK: atomicrmw max {{.*}} seq_cst
|
|
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; cmpxchg: the same as above, but also check %a shadow
|
|
|
|
|
|
|
|
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
|
|
|
|
entry:
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210903 91177308-0d34-0410-b5e6-96231b3b80d8
2014-06-13 14:24:07 +00:00
|
|
|
%pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
|
|
|
|
%0 = extractvalue { i32, i1 } %pair, 0
|
2013-09-24 11:20:27 +00:00
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @Cmpxchg
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210903 91177308-0d34-0410-b5e6-96231b3b80d8
2014-06-13 14:24:07 +00:00
|
|
|
; CHECK: store { i32, i1 } zeroinitializer,
|
2013-09-24 11:20:27 +00:00
|
|
|
; CHECK: icmp
|
|
|
|
; CHECK: br
|
|
|
|
; CHECK: @__msan_warning
|
2014-03-11 10:48:52 +00:00
|
|
|
; CHECK: cmpxchg {{.*}} seq_cst seq_cst
|
2013-09-24 11:20:27 +00:00
|
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
2014-03-11 10:48:52 +00:00
|
|
|
; relaxed cmpxchg: bump up to "release monotonic"
|
2013-09-24 11:20:27 +00:00
|
|
|
|
|
|
|
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
|
|
|
|
entry:
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210903 91177308-0d34-0410-b5e6-96231b3b80d8
2014-06-13 14:24:07 +00:00
|
|
|
%pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
|
|
|
|
%0 = extractvalue { i32, i1 } %pair, 0
|
2013-09-24 11:20:27 +00:00
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @CmpxchgMonotonic
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210903 91177308-0d34-0410-b5e6-96231b3b80d8
2014-06-13 14:24:07 +00:00
|
|
|
; CHECK: store { i32, i1 } zeroinitializer,
|
2013-09-24 11:20:27 +00:00
|
|
|
; CHECK: icmp
|
|
|
|
; CHECK: br
|
|
|
|
; CHECK: @__msan_warning
|
2014-03-11 10:48:52 +00:00
|
|
|
; CHECK: cmpxchg {{.*}} release monotonic
|
2013-09-24 11:20:27 +00:00
|
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomic load: preserve alignment, load shadow value after app value
|
|
|
|
|
|
|
|
define i32 @AtomicLoad(i32* %p) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i32* %p seq_cst, align 16
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicLoad
|
|
|
|
; CHECK: load atomic i32* {{.*}} seq_cst, align 16
|
|
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
|
|
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomic load: preserve alignment, load shadow value after app value
|
|
|
|
|
|
|
|
define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i32* %p acquire, align 16
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicLoadAcquire
|
|
|
|
; CHECK: load atomic i32* {{.*}} acquire, align 16
|
|
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
|
|
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomic load monotonic: bump up to load acquire
|
|
|
|
|
|
|
|
define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i32* %p monotonic, align 16
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicLoadMonotonic
|
|
|
|
; CHECK: load atomic i32* {{.*}} acquire, align 16
|
|
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
|
|
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomic load unordered: bump up to load acquire
|
|
|
|
|
|
|
|
define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i32* %p unordered, align 16
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicLoadUnordered
|
|
|
|
; CHECK: load atomic i32* {{.*}} acquire, align 16
|
|
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
|
|
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; atomic store: preserve alignment, store clean shadow value before app value
|
|
|
|
|
|
|
|
define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
store atomic i32 %x, i32* %p seq_cst, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicStore
|
|
|
|
; CHECK-NOT: @__msan_param_tls
|
|
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
|
|
; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
|
|
|
|
; CHECK: ret void
|
|
|
|
|
|
|
|
|
|
|
|
; atomic store: preserve alignment, store clean shadow value before app value
|
|
|
|
|
|
|
|
define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
store atomic i32 %x, i32* %p release, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicStoreRelease
|
|
|
|
; CHECK-NOT: @__msan_param_tls
|
|
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
|
|
; CHECK: ret void
|
|
|
|
|
|
|
|
|
|
|
|
; atomic store monotonic: bumped up to store release
|
|
|
|
|
|
|
|
define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
store atomic i32 %x, i32* %p monotonic, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicStoreMonotonic
|
|
|
|
; CHECK-NOT: @__msan_param_tls
|
|
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
|
|
; CHECK: ret void
|
|
|
|
|
|
|
|
|
|
|
|
; atomic store unordered: bumped up to store release
|
|
|
|
|
|
|
|
define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
store atomic i32 %x, i32* %p unordered, align 16
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: @AtomicStoreUnordered
|
|
|
|
; CHECK-NOT: @__msan_param_tls
|
|
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
|
|
; CHECK: ret void
|