mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-06 21:05:51 +00:00
8f2a85e099
This commit adds a weak variant of the cmpxchg operation, as described in C++11. A cmpxchg instruction with this modifier is permitted to fail to store, even if the comparison indicated it should. As a result, cmpxchg instructions must return a flag indicating success in addition to their original iN value loaded. Thus, for uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The second flag is 1 when the store succeeded. At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been added as the natural representation for the new cmpxchg instructions. It is a strong cmpxchg. By default this gets Expanded to the existing ATOMIC_CMP_SWAP during Legalization, so existing backends should see no change in behaviour. If they wish to deal with the enhanced node instead, they can call setOperationAction on it. Beware: as a node with 2 results, it cannot be selected from TableGen. Currently, no use is made of the extra information provided in this patch. Test updates are almost entirely adapting the input IR to the new scheme. Summary for out of tree users: ------------------------------ + Legacy Bitcode files are upgraded during read. + Legacy assembly IR files will be invalid. + Front-ends must adapt to different type for "cmpxchg". + Backends should be unaffected by default. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210903 91177308-0d34-0410-b5e6-96231b3b80d8
236 lines
6.6 KiB
LLVM
236 lines
6.6 KiB
LLVM
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
|
|
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-T1
|
|
|
|
define void @func(i32 %argc, i8** %argv) nounwind {
|
|
entry:
|
|
%argc.addr = alloca i32 ; <i32*> [#uses=1]
|
|
%argv.addr = alloca i8** ; <i8***> [#uses=1]
|
|
%val1 = alloca i32 ; <i32*> [#uses=2]
|
|
%val2 = alloca i32 ; <i32*> [#uses=15]
|
|
%andt = alloca i32 ; <i32*> [#uses=2]
|
|
%ort = alloca i32 ; <i32*> [#uses=2]
|
|
%xort = alloca i32 ; <i32*> [#uses=2]
|
|
%old = alloca i32 ; <i32*> [#uses=18]
|
|
%temp = alloca i32 ; <i32*> [#uses=2]
|
|
store i32 %argc, i32* %argc.addr
|
|
store i8** %argv, i8*** %argv.addr
|
|
store i32 0, i32* %val1
|
|
store i32 31, i32* %val2
|
|
store i32 3855, i32* %andt
|
|
store i32 3855, i32* %ort
|
|
store i32 3855, i32* %xort
|
|
store i32 4, i32* %temp
|
|
%tmp = load i32* %temp
|
|
; CHECK: ldrex
|
|
; CHECK: add
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
|
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
|
|
store i32 %0, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: sub
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
|
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
|
|
store i32 %1, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: add
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
|
%2 = atomicrmw add i32* %val2, i32 1 monotonic
|
|
store i32 %2, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: sub
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
|
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
|
|
store i32 %3, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: and
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_and_4
|
|
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
|
|
store i32 %4, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: or
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_or_4
|
|
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
|
|
store i32 %5, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: eor
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_xor_4
|
|
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
|
|
store i32 %6, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
|
%7 = atomicrmw min i32* %val2, i32 16 monotonic
|
|
store i32 %7, i32* %old
|
|
%neg = sub i32 0, 1
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
|
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
|
|
store i32 %8, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
|
%9 = atomicrmw max i32* %val2, i32 1 monotonic
|
|
store i32 %9, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
|
%10 = atomicrmw max i32* %val2, i32 0 monotonic
|
|
store i32 %10, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
|
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
|
|
store i32 %11, i32* %old
|
|
%uneg = sub i32 0, 1
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
|
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
|
|
store i32 %12, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
|
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
|
|
store i32 %13, i32* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
|
%14 = atomicrmw umax i32* %val2, i32 0 monotonic
|
|
store i32 %14, i32* %old
|
|
|
|
ret void
|
|
}
|
|
|
|
define void @func2() nounwind {
|
|
entry:
|
|
%val = alloca i16
|
|
%old = alloca i16
|
|
store i16 31, i16* %val
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
|
%0 = atomicrmw umin i16* %val, i16 16 monotonic
|
|
store i16 %0, i16* %old
|
|
%uneg = sub i16 0, 1
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
|
%1 = atomicrmw umin i16* %val, i16 %uneg monotonic
|
|
store i16 %1, i16* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
|
%2 = atomicrmw umax i16* %val, i16 1 monotonic
|
|
store i16 %2, i16* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
|
%3 = atomicrmw umax i16* %val, i16 0 monotonic
|
|
store i16 %3, i16* %old
|
|
ret void
|
|
}
|
|
|
|
define void @func3() nounwind {
|
|
entry:
|
|
%val = alloca i8
|
|
%old = alloca i8
|
|
store i8 31, i8* %val
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
|
%0 = atomicrmw umin i8* %val, i8 16 monotonic
|
|
store i8 %0, i8* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
|
%uneg = sub i8 0, 1
|
|
%1 = atomicrmw umin i8* %val, i8 %uneg monotonic
|
|
store i8 %1, i8* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
|
%2 = atomicrmw umax i8* %val, i8 1 monotonic
|
|
store i8 %2, i8* %old
|
|
; CHECK: ldrex
|
|
; CHECK: cmp
|
|
; CHECK: strex
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
|
%3 = atomicrmw umax i8* %val, i8 0 monotonic
|
|
store i8 %3, i8* %old
|
|
ret void
|
|
}
|
|
|
|
; CHECK: func4
|
|
; This function should not need to use callee-saved registers.
|
|
; rdar://problem/12203728
|
|
; CHECK-NOT: r4
|
|
define i32 @func4(i32* %p) nounwind optsize ssp {
|
|
entry:
|
|
%0 = atomicrmw add i32* %p, i32 1 monotonic
|
|
ret i32 %0
|
|
}
|
|
|
|
define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|
; CHECK-LABEL: test_cmpxchg_fail_order:
|
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
|
; CHECK: dmb ish
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
; CHECK: bxne lr
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
; CHECK: bne [[LOOP_BB]]
|
|
; CHECK: dmb ish
|
|
; CHECK: bx lr
|
|
|
|
ret i32 %oldval
|
|
}
|
|
|
|
define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
|
|
; CHECK-LABEL: test_cmpxchg_fail_order1:
|
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
|
; CHECK-NOT: dmb ish
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
; CHECK: bne [[END_BB:\.?LBB[0-9]+_[0-9]+]]
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
; CHECK: bne [[LOOP_BB]]
|
|
; CHECK: [[END_BB]]:
|
|
; CHECK: dmb ish
|
|
; CHECK: bx lr
|
|
|
|
ret i32 %oldval
|
|
}
|