1
0
mirror of https://github.com/c64scene-ar/llvm-6502.git synced 2024-12-15 20:29:48 +00:00
llvm-6502/test/CodeGen/X86/atomic-ops-ancient-64.ll
Saleem Abdulrasool 5335b49f96 X86: correct 64-bit atomics on 32-bit
We would emit a libcall for a 64-bit atomic on x86 after SVN r212119.  This was
due to the misuse of hasCmpxchg16 to indicate if cmpxchg8b was supported on a
32-bit target.  They were added at different times and would result in the
border condition being mishandled.

This fixes the border case to emit the cmpxchg8b instruction for 64-bit atomic
operations on x86 at the cost of restoring a long-standing bug in the codegen.
We emit a cmpxchg8b on all x86 targets even where the CPU does not support this
instruction (pre-Pentium CPUs).  Although this bug should be fixed, this was
present prior to SVN r212119 and this change, so this is not really introducing
a regression.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212956 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-14 16:28:13 +00:00

45 lines
1.1 KiB
LLVM

; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
; XFAIL: *
define i64 @test_add(i64* %addr, i64 %inc) {
; CHECK-LABEL: test_add:
; CHECK: calll __sync_fetch_and_add_8
%old = atomicrmw add i64* %addr, i64 %inc seq_cst
ret i64 %old
}
define i64 @test_sub(i64* %addr, i64 %inc) {
; CHECK-LABEL: test_sub:
; CHECK: calll __sync_fetch_and_sub_8
%old = atomicrmw sub i64* %addr, i64 %inc seq_cst
ret i64 %old
}
define i64 @test_and(i64* %andr, i64 %inc) {
; CHECK-LABEL: test_and:
; CHECK: calll __sync_fetch_and_and_8
%old = atomicrmw and i64* %andr, i64 %inc seq_cst
ret i64 %old
}
define i64 @test_or(i64* %orr, i64 %inc) {
; CHECK-LABEL: test_or:
; CHECK: calll __sync_fetch_and_or_8
%old = atomicrmw or i64* %orr, i64 %inc seq_cst
ret i64 %old
}
define i64 @test_xor(i64* %xorr, i64 %inc) {
; CHECK-LABEL: test_xor:
; CHECK: calll __sync_fetch_and_xor_8
%old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
ret i64 %old
}
define i64 @test_nand(i64* %nandr, i64 %inc) {
; CHECK-LABEL: test_nand:
; CHECK: calll __sync_fetch_and_nand_8
%old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
ret i64 %old
}