2013-11-12 10:11:26 +00:00
|
|
|
; RUN: llc < %s -march=xcore | FileCheck %s
|
|
|
|
|
|
|
|
; CHECK-LABEL: atomic_fence
|
|
|
|
; CHECK: #MEMBARRIER
|
|
|
|
; CHECK: #MEMBARRIER
|
|
|
|
; CHECK: #MEMBARRIER
|
|
|
|
; CHECK: #MEMBARRIER
|
|
|
|
; CHECK: retsp 0
|
|
|
|
define void @atomic_fence() nounwind {
|
|
|
|
entry:
|
|
|
|
fence acquire
|
|
|
|
fence release
|
|
|
|
fence acq_rel
|
|
|
|
fence seq_cst
|
|
|
|
ret void
|
|
|
|
}
|
2014-02-11 10:36:18 +00:00
|
|
|
|
|
|
|
@pool = external global i64
|
|
|
|
|
|
|
|
define void @atomicloadstore() nounwind {
|
|
|
|
entry:
|
|
|
|
; CHECK-LABEL: atomicloadstore
|
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK: ldw r[[R0:[0-9]+]], dp[pool]
|
|
|
|
; CHECK-NEXT: ldaw r[[R1:[0-9]+]], dp[pool]
|
Erase fence insertion from SelectionDAGBuilder.cpp (NFC)
Summary:
Backends can use setInsertFencesForAtomic to signal to the middle-end that
montonic is the only memory ordering they can accept for
stores/loads/rmws/cmpxchg. The code lowering those accesses with a stronger
ordering to fences + monotonic accesses is currently living in
SelectionDAGBuilder.cpp. In this patch I propose moving this logic out of it
for several reasons:
- There is lots of redundancy to avoid: extremely similar logic already
exists in AtomicExpand.
- The current code in SelectionDAGBuilder does not use any target-hooks, it
does the same transformation for every backend that requires it
- As a result it is plain *unsound*, as it was apparently designed for ARM.
It happens to mostly work for the other targets because they are extremely
conservative, but Power for example had to switch to AtomicExpand to be
able to use lwsync safely (see r218331).
- Because it produces IR-level fences, it cannot be made sound ! This is noted
in the C++11 standard (section 29.3, page 1140):
```
Fences cannot, in general, be used to restore sequential consistency for atomic
operations with weaker ordering semantics.
```
It can also be seen by the following example (called IRIW in the litterature):
```
atomic<int> x = y = 0;
int r1, r2, r3, r4;
Thread 0:
x.store(1);
Thread 1:
y.store(1);
Thread 2:
r1 = x.load();
r2 = y.load();
Thread 3:
r3 = y.load();
r4 = x.load();
```
r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all seq_cst.
But if they are lowered to monotonic accesses, no amount of fences can prevent it..
This patch does three things (I could cut it into parts, but then some of them
would not be tested/testable, please tell me if you would prefer that):
- it provides a default implementation for emitLeadingFence/emitTrailingFence in
terms of IR-level fences, that mimic the original logic of SelectionDAGBuilder.
As we saw above, this is unsound, but the best that can be done without knowing
the targets well (and there is a comment warning about this risk).
- it then switches Mips/Sparc/XCore to use AtomicExpand, relying on this default
implementation (that exactly replicates the logic of SelectionDAGBuilder, so no
functional change)
- it finally erase this logic from SelectionDAGBuilder as it is dead-code.
Ideally, each target would define its own override for emitLeading/TrailingFence
using target-specific fences, but I do not know the Sparc/Mips/XCore memory model
well enough to do this, and they appear to be dealing fine with the ARM-inspired
default expansion for now (probably because they are overly conservative, as
Power was). If anyone wants to compile fences more agressively on these
platforms, the long comment should make it clear why he should first override
emitLeading/TrailingFence.
Test Plan: make check-all, no functional change
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5474
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@219957 91177308-0d34-0410-b5e6-96231b3b80d8
2014-10-16 20:34:57 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ldc r[[R2:[0-9]+]], 0
|
2015-02-27 21:17:42 +00:00
|
|
|
%0 = load atomic i32, i32* bitcast (i64* @pool to i32*) acquire, align 4
|
2014-02-11 10:36:18 +00:00
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ld16s r3, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2015-02-27 21:17:42 +00:00
|
|
|
%1 = load atomic i16, i16* bitcast (i64* @pool to i16*) acquire, align 2
|
2014-02-11 10:36:18 +00:00
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ld8u r11, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2015-02-27 21:17:42 +00:00
|
|
|
%2 = load atomic i8, i8* bitcast (i64* @pool to i8*) acquire, align 1
|
2014-02-11 10:36:18 +00:00
|
|
|
|
|
|
|
; CHECK-NEXT: ldw r4, dp[pool]
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2015-02-27 21:17:42 +00:00
|
|
|
%3 = load atomic i32, i32* bitcast (i64* @pool to i32*) seq_cst, align 4
|
2014-02-11 10:36:18 +00:00
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ld16s r5, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2015-02-27 21:17:42 +00:00
|
|
|
%4 = load atomic i16, i16* bitcast (i64* @pool to i16*) seq_cst, align 2
|
2014-02-11 10:36:18 +00:00
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ld8u r6, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2015-02-27 21:17:42 +00:00
|
|
|
%5 = load atomic i8, i8* bitcast (i64* @pool to i8*) seq_cst, align 1
|
2014-02-11 10:36:18 +00:00
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: stw r[[R0]], dp[pool]
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i32 %0, i32* bitcast (i64* @pool to i32*) release, align 4
|
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: st16 r3, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i16 %1, i16* bitcast (i64* @pool to i16*) release, align 2
|
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: st8 r11, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i8 %2, i8* bitcast (i64* @pool to i8*) release, align 1
|
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
|
|
|
; CHECK-NEXT: stw r4, dp[pool]
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
|
|
|
store atomic i32 %3, i32* bitcast (i64* @pool to i32*) seq_cst, align 4
|
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: st16 r5, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
|
|
|
store atomic i16 %4, i16* bitcast (i64* @pool to i16*) seq_cst, align 2
|
|
|
|
|
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: st8 r6, r[[R1]][r[[R2]]]
|
2014-02-11 10:36:18 +00:00
|
|
|
; CHECK-NEXT: #MEMBARRIER
|
|
|
|
store atomic i8 %5, i8* bitcast (i64* @pool to i8*) seq_cst, align 1
|
|
|
|
|
2014-02-26 23:22:49 +00:00
|
|
|
; CHECK-NEXT: ldw r[[R0]], dp[pool]
|
|
|
|
; CHECK-NEXT: stw r[[R0]], dp[pool]
|
|
|
|
; CHECK-NEXT: ld16s r[[R0]], r[[R1]][r[[R2]]]
|
|
|
|
; CHECK-NEXT: st16 r[[R0]], r[[R1]][r[[R2]]]
|
|
|
|
; CHECK-NEXT: ld8u r[[R0]], r[[R1]][r[[R2]]]
|
|
|
|
; CHECK-NEXT: st8 r[[R0]], r[[R1]][r[[R2]]]
|
2015-02-27 21:17:42 +00:00
|
|
|
%6 = load atomic i32, i32* bitcast (i64* @pool to i32*) monotonic, align 4
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i32 %6, i32* bitcast (i64* @pool to i32*) monotonic, align 4
|
2015-02-27 21:17:42 +00:00
|
|
|
%7 = load atomic i16, i16* bitcast (i64* @pool to i16*) monotonic, align 2
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i16 %7, i16* bitcast (i64* @pool to i16*) monotonic, align 2
|
2015-02-27 21:17:42 +00:00
|
|
|
%8 = load atomic i8, i8* bitcast (i64* @pool to i8*) monotonic, align 1
|
2014-02-11 10:36:18 +00:00
|
|
|
store atomic i8 %8, i8* bitcast (i64* @pool to i8*) monotonic, align 1
|
|
|
|
|
|
|
|
ret void
|
|
|
|
}
|