SelectionDAG: skip barriers for unordered atomic operations

Unordered is strictly weaker than monotonic, so if the latter doesn't have any
barriers then the former certainly shouldn't.

rdar://problem/16548260

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209901 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover
2014-05-30 14:41:51 +00:00
parent 645c5b94e2
commit 3a45d4c621
2 changed files with 27 additions and 14 deletions

View File

@@ -3570,12 +3570,12 @@ static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
if (Before) { if (Before) {
if (Order == AcquireRelease || Order == SequentiallyConsistent) if (Order == AcquireRelease || Order == SequentiallyConsistent)
Order = Release; Order = Release;
else if (Order == Acquire || Order == Monotonic) else if (Order == Acquire || Order == Monotonic || Order == Unordered)
return Chain; return Chain;
} else { } else {
if (Order == AcquireRelease) if (Order == AcquireRelease)
Order = Acquire; Order = Acquire;
else if (Order == Release || Order == Monotonic) else if (Order == Release || Order == Monotonic || Order == Unordered)
return Chain; return Chain;
} }
SDValue Ops[3]; SDValue Ops[3];

View File

@@ -5,13 +5,13 @@
; RUN: llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4 ; RUN: llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4
define void @test1(i32* %ptr, i32 %val1) { define void @test1(i32* %ptr, i32 %val1) {
; ARM: test1 ; ARM-LABEL: test1
; ARM: dmb {{ish$}} ; ARM: dmb {{ish$}}
; ARM-NEXT: str ; ARM-NEXT: str
; ARM-NEXT: dmb {{ish$}} ; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test1 ; THUMBONE-LABEL: test1
; THUMBONE: __sync_lock_test_and_set_4 ; THUMBONE: __sync_lock_test_and_set_4
; THUMBTWO: test1 ; THUMBTWO-LABEL: test1
; THUMBTWO: dmb {{ish$}} ; THUMBTWO: dmb {{ish$}}
; THUMBTWO-NEXT: str ; THUMBTWO-NEXT: str
; THUMBTWO-NEXT: dmb {{ish$}} ; THUMBTWO-NEXT: dmb {{ish$}}
@@ -20,12 +20,12 @@ define void @test1(i32* %ptr, i32 %val1) {
} }
define i32 @test2(i32* %ptr) { define i32 @test2(i32* %ptr) {
; ARM: test2 ; ARM-LABEL: test2
; ARM: ldr ; ARM: ldr
; ARM-NEXT: dmb {{ish$}} ; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test2 ; THUMBONE-LABEL: test2
; THUMBONE: __sync_val_compare_and_swap_4 ; THUMBONE: __sync_val_compare_and_swap_4
; THUMBTWO: test2 ; THUMBTWO-LABEL: test2
; THUMBTWO: ldr ; THUMBTWO: ldr
; THUMBTWO-NEXT: dmb {{ish$}} ; THUMBTWO-NEXT: dmb {{ish$}}
%val = load atomic i32* %ptr seq_cst, align 4 %val = load atomic i32* %ptr seq_cst, align 4
@@ -33,22 +33,35 @@ define i32 @test2(i32* %ptr) {
} }
define void @test3(i8* %ptr1, i8* %ptr2) { define void @test3(i8* %ptr1, i8* %ptr2) {
; ARM: test3 ; ARM-LABEL: test3
; ARM-NOT: dmb
; ARM: ldrb ; ARM: ldrb
; ARM-NOT: dmb
; ARM: strb ; ARM: strb
; THUMBTWO: test3 ; ARM-NOT: dmb
; ARM: bx lr
; THUMBTWO-LABEL: test3
; THUMBTWO-NOT: dmb
; THUMBTWO: ldrb ; THUMBTWO: ldrb
; THUMBTWO-NOT: dmb
; THUMBTWO: strb ; THUMBTWO: strb
; THUMBONE: test3 ; THUMBTWO-NOT: dmb
; THUMBTWO: bx lr
; THUMBONE-LABEL: test3
; THUMBONE-NOT: dmb
; THUMBONE: ldrb ; THUMBONE: ldrb
; THUMBONE-NOT: dmb
; THUMBONE: strb ; THUMBONE: strb
; THUMBONE-NOT: dmb
%val = load atomic i8* %ptr1 unordered, align 1 %val = load atomic i8* %ptr1 unordered, align 1
store atomic i8 %val, i8* %ptr2 unordered, align 1 store atomic i8 %val, i8* %ptr2 unordered, align 1
ret void ret void
} }
define void @test4(i8* %ptr1, i8* %ptr2) { define void @test4(i8* %ptr1, i8* %ptr2) {
; THUMBONE: test4 ; THUMBONE-LABEL: test4
; THUMBONE: ___sync_val_compare_and_swap_1 ; THUMBONE: ___sync_val_compare_and_swap_1
; THUMBONE: ___sync_lock_test_and_set_1 ; THUMBONE: ___sync_lock_test_and_set_1
%val = load atomic i8* %ptr1 seq_cst, align 1 %val = load atomic i8* %ptr1 seq_cst, align 1
@@ -57,14 +70,14 @@ define void @test4(i8* %ptr1, i8* %ptr2) {
} }
define i64 @test_old_load_64bit(i64* %p) { define i64 @test_old_load_64bit(i64* %p) {
; ARMV4: test_old_load_64bit ; ARMV4-LABEL: test_old_load_64bit
; ARMV4: ___sync_val_compare_and_swap_8 ; ARMV4: ___sync_val_compare_and_swap_8
%1 = load atomic i64* %p seq_cst, align 8 %1 = load atomic i64* %p seq_cst, align 8
ret i64 %1 ret i64 %1
} }
define void @test_old_store_64bit(i64* %p, i64 %v) { define void @test_old_store_64bit(i64* %p, i64 %v) {
; ARMV4: test_old_store_64bit ; ARMV4-LABEL: test_old_store_64bit
; ARMV4: ___sync_lock_test_and_set_8 ; ARMV4: ___sync_lock_test_and_set_8
store atomic i64 %v, i64* %p seq_cst, align 8 store atomic i64 %v, i64* %p seq_cst, align 8
ret void ret void