mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
7c9c6ed761
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
194 lines
5.2 KiB
LLVM
194 lines
5.2 KiB
LLVM
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
|
|
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
|
|
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
; atomicrmw xchg: store clean shadow, return clean shadow
|
|
|
|
define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
%0 = atomicrmw xchg i32* %p, i32 %x seq_cst
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicRmwXchg
|
|
; CHECK: store i32 0,
|
|
; CHECK: atomicrmw xchg {{.*}} seq_cst
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomicrmw max: exactly the same as above
|
|
|
|
define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
%0 = atomicrmw max i32* %p, i32 %x seq_cst
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicRmwMax
|
|
; CHECK: store i32 0,
|
|
; CHECK: atomicrmw max {{.*}} seq_cst
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; cmpxchg: the same as above, but also check %a shadow
|
|
|
|
define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
|
|
entry:
|
|
%pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
|
|
%0 = extractvalue { i32, i1 } %pair, 0
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @Cmpxchg
|
|
; CHECK: store { i32, i1 } zeroinitializer,
|
|
; CHECK: icmp
|
|
; CHECK: br
|
|
; CHECK: @__msan_warning
|
|
; CHECK: cmpxchg {{.*}} seq_cst seq_cst
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; relaxed cmpxchg: bump up to "release monotonic"
|
|
|
|
define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
|
|
entry:
|
|
%pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
|
|
%0 = extractvalue { i32, i1 } %pair, 0
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @CmpxchgMonotonic
|
|
; CHECK: store { i32, i1 } zeroinitializer,
|
|
; CHECK: icmp
|
|
; CHECK: br
|
|
; CHECK: @__msan_warning
|
|
; CHECK: cmpxchg {{.*}} release monotonic
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomic load: preserve alignment, load shadow value after app value
|
|
|
|
define i32 @AtomicLoad(i32* %p) sanitize_memory {
|
|
entry:
|
|
%0 = load atomic i32, i32* %p seq_cst, align 16
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicLoad
|
|
; CHECK: load atomic i32, i32* {{.*}} seq_cst, align 16
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomic load: preserve alignment, load shadow value after app value
|
|
|
|
define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
|
|
entry:
|
|
%0 = load atomic i32, i32* %p acquire, align 16
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicLoadAcquire
|
|
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomic load monotonic: bump up to load acquire
|
|
|
|
define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
|
|
entry:
|
|
%0 = load atomic i32, i32* %p monotonic, align 16
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicLoadMonotonic
|
|
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomic load unordered: bump up to load acquire
|
|
|
|
define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
|
|
entry:
|
|
%0 = load atomic i32, i32* %p unordered, align 16
|
|
ret i32 %0
|
|
}
|
|
|
|
; CHECK: @AtomicLoadUnordered
|
|
; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
|
|
; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
|
|
; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
|
|
; CHECK: ret i32
|
|
|
|
|
|
; atomic store: preserve alignment, store clean shadow value before app value
|
|
|
|
define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
store atomic i32 %x, i32* %p seq_cst, align 16
|
|
ret void
|
|
}
|
|
|
|
; CHECK: @AtomicStore
|
|
; CHECK-NOT: @__msan_param_tls
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
|
|
; CHECK: ret void
|
|
|
|
|
|
; atomic store: preserve alignment, store clean shadow value before app value
|
|
|
|
define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
store atomic i32 %x, i32* %p release, align 16
|
|
ret void
|
|
}
|
|
|
|
; CHECK: @AtomicStoreRelease
|
|
; CHECK-NOT: @__msan_param_tls
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
; CHECK: ret void
|
|
|
|
|
|
; atomic store monotonic: bumped up to store release
|
|
|
|
define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
store atomic i32 %x, i32* %p monotonic, align 16
|
|
ret void
|
|
}
|
|
|
|
; CHECK: @AtomicStoreMonotonic
|
|
; CHECK-NOT: @__msan_param_tls
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
; CHECK: ret void
|
|
|
|
|
|
; atomic store unordered: bumped up to store release
|
|
|
|
define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
|
|
entry:
|
|
store atomic i32 %x, i32* %p unordered, align 16
|
|
ret void
|
|
}
|
|
|
|
; CHECK: @AtomicStoreUnordered
|
|
; CHECK-NOT: @__msan_param_tls
|
|
; CHECK: store i32 0, i32* {{.*}}, align 16
|
|
; CHECK: store atomic i32 %x, i32* %p release, align 16
|
|
; CHECK: ret void
|