llvm-6502/test/CodeGen/X86/atomic32.ll
Michael Liao b118a073d7 Re-work X86 code generation of atomic ops with spin-loop
- Rewrite/merge pseudo-atomic instruction emitters to address the
  following issue:
  * Reduce one unnecessary load in spin-loop

    previously the spin-loop looks like

        thisMBB:
        newMBB:
          ld  t1 = [bitinstr.addr]
          op  t2 = t1, [bitinstr.val]
          not t3 = t2  (if Invert)
          mov EAX = t1
          lcs dest = [bitinstr.addr], t3  [EAX is implicit]
          bz  newMBB
          fallthrough -->nextMBB

    the 'ld' at the beginning of newMBB should be lift out of the loop
    as lcs (or CMPXCHG on x86) will load the current memory value into
    EAX. This loop is refined as:

        thisMBB:
          EAX = LOAD [MI.addr]
        mainMBB:
          t1 = OP [MI.val], EAX
          LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined]
          JNE mainMBB
        sinkMBB:

  * Remove immopc as, so far, all pseudo-atomic instructions has
    all-register form only, there is no immedidate operand.

  * Remove unnecessary attributes/modifiers in pseudo-atomic instruction
    td

  * Fix issues in PR13458

- Add comprehensive tests on atomic ops on various data types.
  NOTE: Some of them are turned off due to missing functionality.

- Revise tests due to the new spin-loop generated.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164281 91177308-0d34-0410-b5e6-96231b3b80d8
2012-09-20 03:06:15 +00:00

251 lines
5.3 KiB
LLVM

; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
@sc32 = external global i32
define void @atomic_fetch_add32() nounwind {
; X64: atomic_fetch_add32
; X32: atomic_fetch_add32
entry:
; 32-bit
%t1 = atomicrmw add i32* @sc32, i32 1 acquire
; X64: lock
; X64: incl
; X32: lock
; X32: incl
%t2 = atomicrmw add i32* @sc32, i32 3 acquire
; X64: lock
; X64: addl $3
; X32: lock
; X32: addl $3
%t3 = atomicrmw add i32* @sc32, i32 5 acquire
; X64: lock
; X64: xaddl
; X32: lock
; X32: xaddl
%t4 = atomicrmw add i32* @sc32, i32 %t3 acquire
; X64: lock
; X64: addl
; X32: lock
; X32: addl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_sub32() nounwind {
; X64: atomic_fetch_sub32
; X32: atomic_fetch_sub32
%t1 = atomicrmw sub i32* @sc32, i32 1 acquire
; X64: lock
; X64: decl
; X32: lock
; X32: decl
%t2 = atomicrmw sub i32* @sc32, i32 3 acquire
; X64: lock
; X64: subl $3
; X32: lock
; X32: subl $3
%t3 = atomicrmw sub i32* @sc32, i32 5 acquire
; X64: lock
; X64: xaddl
; X32: lock
; X32: xaddl
%t4 = atomicrmw sub i32* @sc32, i32 %t3 acquire
; X64: lock
; X64: subl
; X32: lock
; X32: subl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_and32() nounwind {
; X64: atomic_fetch_and32
; X32: atomic_fetch_and32
%t1 = atomicrmw and i32* @sc32, i32 3 acquire
; X64: lock
; X64: andl $3
; X32: lock
; X32: andl $3
%t2 = atomicrmw and i32* @sc32, i32 5 acquire
; X64: andl
; X64: lock
; X64: cmpxchgl
; X32: andl
; X32: lock
; X32: cmpxchgl
%t3 = atomicrmw and i32* @sc32, i32 %t2 acquire
; X64: lock
; X64: andl
; X32: lock
; X32: andl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_or32() nounwind {
; X64: atomic_fetch_or32
; X32: atomic_fetch_or32
%t1 = atomicrmw or i32* @sc32, i32 3 acquire
; X64: lock
; X64: orl $3
; X32: lock
; X32: orl $3
%t2 = atomicrmw or i32* @sc32, i32 5 acquire
; X64: orl
; X64: lock
; X64: cmpxchgl
; X32: orl
; X32: lock
; X32: cmpxchgl
%t3 = atomicrmw or i32* @sc32, i32 %t2 acquire
; X64: lock
; X64: orl
; X32: lock
; X32: orl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_xor32() nounwind {
; X64: atomic_fetch_xor32
; X32: atomic_fetch_xor32
%t1 = atomicrmw xor i32* @sc32, i32 3 acquire
; X64: lock
; X64: xorl $3
; X32: lock
; X32: xorl $3
%t2 = atomicrmw xor i32* @sc32, i32 5 acquire
; X64: xorl
; X64: lock
; X64: cmpxchgl
; X32: xorl
; X32: lock
; X32: cmpxchgl
%t3 = atomicrmw xor i32* @sc32, i32 %t2 acquire
; X64: lock
; X64: xorl
; X32: lock
; X32: xorl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_nand32(i32 %x) nounwind {
; X64: atomic_fetch_nand32
; X32: atomic_fetch_nand32
%t1 = atomicrmw nand i32* @sc32, i32 %x acquire
; X64: andl
; X64: notl
; X64: lock
; X64: cmpxchgl
; X32: andl
; X32: notl
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_max32(i32 %x) nounwind {
%t1 = atomicrmw max i32* @sc32, i32 %x acquire
; X64: cmpl
; X64: cmov
; X64: lock
; X64: cmpxchgl
; X32: cmpl
; X32: cmov
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_min32(i32 %x) nounwind {
%t1 = atomicrmw min i32* @sc32, i32 %x acquire
; X64: cmpl
; X64: cmov
; X64: lock
; X64: cmpxchgl
; X32: cmpl
; X32: cmov
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umax32(i32 %x) nounwind {
%t1 = atomicrmw umax i32* @sc32, i32 %x acquire
; X64: cmpl
; X64: cmov
; X64: lock
; X64: cmpxchgl
; X32: cmpl
; X32: cmov
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_umin32(i32 %x) nounwind {
%t1 = atomicrmw umin i32* @sc32, i32 %x acquire
; X64: cmpl
; X64: cmov
; X64: lock
; X64: cmpxchgl
; X32: cmpl
; X32: cmov
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_cmpxchg32() nounwind {
%t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire
; X64: lock
; X64: cmpxchgl
; X32: lock
; X32: cmpxchgl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_store32(i32 %x) nounwind {
store atomic i32 %x, i32* @sc32 release, align 4
; X64-NOT: lock
; X64: movl
; X32-NOT: lock
; X32: movl
ret void
; X64: ret
; X32: ret
}
define void @atomic_fetch_swap32(i32 %x) nounwind {
%t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
; X64-NOT: lock
; X64: xchgl
; X32-NOT: lock
; X32: xchgl
ret void
; X64: ret
; X32: ret
}