mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
X86: delegate expanding atomic libcalls to generic code.
On targets without cmpxchg16b or cmpxchg8b, the borderline atomic operations were slipping through the gaps. X86AtomicExpand.cpp was delegating to ISelLowering. Generic ISelLowering was delegating to X86ISelLowering and X86ISelLowering was asserting. The correct behaviour is to expand to a libcall, preferably in generic ISelLowering. This can be achieved by X86ISelLowering deciding it doesn't want the faff after all. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212134 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
3ca3826528
commit
99ec36c684
@ -16360,6 +16360,20 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
|
||||
Results.push_back(EFLAGS.getValue(1));
|
||||
return;
|
||||
}
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
// Delegate to generic TypeLegalization. Situations we can really handle
|
||||
// should have already been dealt with by X86AtomicExpand.cpp.
|
||||
break;
|
||||
case ISD::ATOMIC_LOAD: {
|
||||
ReplaceATOMIC_LOAD(N, Results, DAG);
|
||||
return;
|
||||
|
43
test/CodeGen/X86/atomic-ops-ancient-64.ll
Normal file
43
test/CodeGen/X86/atomic-ops-ancient-64.ll
Normal file
@ -0,0 +1,43 @@
|
||||
; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s
|
||||
|
||||
define i64 @test_add(i64* %addr, i64 %inc) {
|
||||
; CHECK-LABEL: test_add:
|
||||
; CHECK: calll __sync_fetch_and_add_8
|
||||
%old = atomicrmw add i64* %addr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
||||
|
||||
define i64 @test_sub(i64* %addr, i64 %inc) {
|
||||
; CHECK-LABEL: test_sub:
|
||||
; CHECK: calll __sync_fetch_and_sub_8
|
||||
%old = atomicrmw sub i64* %addr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
||||
|
||||
define i64 @test_and(i64* %andr, i64 %inc) {
|
||||
; CHECK-LABEL: test_and:
|
||||
; CHECK: calll __sync_fetch_and_and_8
|
||||
%old = atomicrmw and i64* %andr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
||||
|
||||
define i64 @test_or(i64* %orr, i64 %inc) {
|
||||
; CHECK-LABEL: test_or:
|
||||
; CHECK: calll __sync_fetch_and_or_8
|
||||
%old = atomicrmw or i64* %orr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
||||
|
||||
define i64 @test_xor(i64* %xorr, i64 %inc) {
|
||||
; CHECK-LABEL: test_xor:
|
||||
; CHECK: calll __sync_fetch_and_xor_8
|
||||
%old = atomicrmw xor i64* %xorr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
||||
|
||||
define i64 @test_nand(i64* %nandr, i64 %inc) {
|
||||
; CHECK-LABEL: test_nand:
|
||||
; CHECK: calll __sync_fetch_and_nand_8
|
||||
%old = atomicrmw nand i64* %nandr, i64 %inc seq_cst
|
||||
ret i64 %old
|
||||
}
|
Loading…
Reference in New Issue
Block a user