llvm-6502/test/CodeGen/ARM/swift-atomics.ll
Tim Northover a10c01a6c6 ARM: relax the atomic release barrier to "dmb ishst" on Swift
Swift cores implement store barriers that are stronger than the ARM
specification but weaker than general barriers. They are, in fact, just about
enough to provide the ordering needed for atomic operations with release
semantics.

This patch makes use of that quirk.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185527 91177308-0d34-0410-b5e6-96231b3b80d8
2013-07-03 09:20:36 +00:00

46 lines
1.2 KiB
LLVM

; RUN: llc -mtriple=armv7-apple-ios6.0 -mcpu=swift < %s | FileCheck %s
; RUN: llc -mtriple=armv7-apple-ios6.0 < %s | FileCheck %s --check-prefix=CHECK-STRICT-ATOMIC
; Release operations only need the store barrier provided by a "dmb ishst",
define void @test_store_release(i32* %p, i32 %v) {
; CHECK: test_store_release:
; CHECK: dmb ishst
; CHECK: str
; CHECK-STRICT-ATOMIC: dmb {{ish$}}
store atomic i32 %v, i32* %p release, align 4
ret void
}
; However, if sequential consistency is needed *something* must ensure a release
; followed by an acquire does not get reordered. In that case a "dmb ishst" is
; not adequate.
define i32 @test_seq_cst(i32* %p, i32 %v) {
; CHECK: test_seq_cst:
; CHECK: dmb ishst
; CHECK: str
; CHECK: dmb {{ish$}}
; CHECK: ldr
; CHECK: dmb {{ish$}}
; CHECK-STRICT-ATOMIC: dmb {{ish$}}
; CHECK-STRICT-ATOMIC: dmb {{ish$}}
store atomic i32 %v, i32* %p seq_cst, align 4
%val = load atomic i32* %p seq_cst, align 4
ret i32 %val
}
; Also, pure acquire operations should definitely not have an ishst barrier.
define i32 @test_acq(i32* %addr) {
; CHECK: test_acq:
; CHECK: ldr
; CHECK: dmb {{ish$}}
; CHECK-STRICT-ATOMIC: dmb {{ish$}}
%val = load atomic i32* %addr acquire, align 4
ret i32 %val
}