mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-16 11:30:51 +00:00
2076af0184
a write to the same temp follows in the same BB. Also add stats printing. On Spec CPU2006 this optimization saves roughly 4% of instrumented reads (which is 3% of all instrumented accesses): Writes : 161216 Reads : 446458 Reads-before-write: 18295 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@154418 91177308-0d34-0410-b5e6-96231b3b80d8
33 lines
841 B
LLVM
33 lines
841 B
LLVM
; RUN: opt < %s -tsan -S | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
|
|
define void @IncrementMe(i32* nocapture %ptr) nounwind uwtable {
|
|
entry:
|
|
%0 = load i32* %ptr, align 4
|
|
%inc = add nsw i32 %0, 1
|
|
store i32 %inc, i32* %ptr, align 4
|
|
ret void
|
|
}
|
|
; CHECK: define void @IncrementMe
|
|
; CHECK-NOT: __tsan_read
|
|
; CHECK: __tsan_write
|
|
; CHECK: ret void
|
|
|
|
define void @IncrementMeWithCallInBetween(i32* nocapture %ptr) nounwind uwtable {
|
|
entry:
|
|
%0 = load i32* %ptr, align 4
|
|
%inc = add nsw i32 %0, 1
|
|
call void @foo()
|
|
store i32 %inc, i32* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; CHECK: define void @IncrementMeWithCallInBetween
|
|
; CHECK: __tsan_read
|
|
; CHECK: __tsan_write
|
|
; CHECK: ret void
|
|
|
|
declare void @foo()
|
|
|