IR: add a second ordering operand to cmpxhg for failure

The syntax for "cmpxchg" should now look something like:

	cmpxchg i32* %addr, i32 42, i32 3 acquire monotonic

where the second ordering argument gives the required semantics in the case
that no exchange takes place. It should be no stronger than the first ordering
constraint and cannot be either "release" or "acq_rel" (since no store will
have taken place).

rdar://problem/15996804

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203559 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover
2014-03-11 10:48:52 +00:00
parent fb411c8b8c
commit ca396e391e
60 changed files with 489 additions and 288 deletions

View File

@@ -223,69 +223,69 @@ define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
entry:
;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
%res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
%res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
%res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
%res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
%res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
%res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
%res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
%res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
%res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
%res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
%res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
%res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
%res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
%res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
%res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
%res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
%res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
%res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
%res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
%res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
%res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
%res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
%res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
%res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
%res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
%res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
%res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
%res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
%res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
%res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
ret void
}