mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-26 21:32:10 +00:00
XCore target: Lower ATOMIC_LOAD & ATOMIC_STORE
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201143 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e9d5f6e387
commit
04a573a41f
@ -159,7 +159,12 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
|
||||
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
|
||||
|
||||
// Atomic operations
|
||||
// We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
|
||||
// As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
|
||||
setInsertFencesForAtomic(true);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
|
||||
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
|
||||
|
||||
// TRAMPOLINE is custom lowered.
|
||||
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
|
||||
@ -223,6 +228,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
|
||||
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
||||
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
|
||||
case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
|
||||
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
|
||||
default:
|
||||
llvm_unreachable("unimplemented operand");
|
||||
}
|
||||
@ -964,6 +971,67 @@ LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
|
||||
return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
|
||||
}
|
||||
|
||||
SDValue XCoreTargetLowering::
|
||||
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
|
||||
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
||||
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
|
||||
assert(N->getOrdering() <= Monotonic &&
|
||||
"setInsertFencesForAtomic(true) and yet greater than Monotonic");
|
||||
if (N->getMemoryVT() == MVT::i32) {
|
||||
if (N->getAlignment() < 4)
|
||||
report_fatal_error("atomic load must be aligned");
|
||||
return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(),
|
||||
N->getBasePtr(), N->getPointerInfo(),
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->isInvariant(), N->getAlignment(),
|
||||
N->getTBAAInfo(), N->getRanges());
|
||||
}
|
||||
if (N->getMemoryVT() == MVT::i16) {
|
||||
if (N->getAlignment() < 2)
|
||||
report_fatal_error("atomic load must be aligned");
|
||||
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
|
||||
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->getAlignment(), N->getTBAAInfo());
|
||||
}
|
||||
if (N->getMemoryVT() == MVT::i8)
|
||||
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
|
||||
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->getAlignment(), N->getTBAAInfo());
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue XCoreTargetLowering::
|
||||
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
|
||||
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
||||
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
|
||||
assert(N->getOrdering() <= Monotonic &&
|
||||
"setInsertFencesForAtomic(true) and yet greater than Monotonic");
|
||||
if (N->getMemoryVT() == MVT::i32) {
|
||||
if (N->getAlignment() < 4)
|
||||
report_fatal_error("atomic store must be aligned");
|
||||
return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
|
||||
N->getBasePtr(), N->getPointerInfo(),
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->getAlignment(), N->getTBAAInfo());
|
||||
}
|
||||
if (N->getMemoryVT() == MVT::i16) {
|
||||
if (N->getAlignment() < 2)
|
||||
report_fatal_error("atomic store must be aligned");
|
||||
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
|
||||
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->getAlignment(), N->getTBAAInfo());
|
||||
}
|
||||
if (N->getMemoryVT() == MVT::i8)
|
||||
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
|
||||
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
|
||||
N->isVolatile(), N->isNonTemporal(),
|
||||
N->getAlignment(), N->getTBAAInfo());
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Calling Convention Implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -172,6 +172,8 @@ namespace llvm {
|
||||
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
// Inline asm support
|
||||
std::pair<unsigned, const TargetRegisterClass*>
|
||||
|
@ -14,3 +14,79 @@ entry:
|
||||
fence seq_cst
|
||||
ret void
|
||||
}
|
||||
|
||||
@pool = external global i64
|
||||
|
||||
define void @atomicloadstore() nounwind {
|
||||
entry:
|
||||
; CHECK-LABEL: atomicloadstore
|
||||
|
||||
; CHECK: ldw r0, dp[pool]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%0 = load atomic i32* bitcast (i64* @pool to i32*) acquire, align 4
|
||||
|
||||
; CHECK-NEXT: ldaw r1, dp[pool]
|
||||
; CHECK-NEXT: ldc r2, 0
|
||||
|
||||
; CHECK-NEXT: ld16s r3, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%1 = load atomic i16* bitcast (i64* @pool to i16*) acquire, align 2
|
||||
|
||||
; CHECK-NEXT: ld8u r11, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%2 = load atomic i8* bitcast (i64* @pool to i8*) acquire, align 1
|
||||
|
||||
; CHECK-NEXT: ldw r4, dp[pool]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%3 = load atomic i32* bitcast (i64* @pool to i32*) seq_cst, align 4
|
||||
|
||||
; CHECK-NEXT: ld16s r5, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%4 = load atomic i16* bitcast (i64* @pool to i16*) seq_cst, align 2
|
||||
|
||||
; CHECK-NEXT: ld8u r6, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
%5 = load atomic i8* bitcast (i64* @pool to i8*) seq_cst, align 1
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: stw r0, dp[pool]
|
||||
store atomic i32 %0, i32* bitcast (i64* @pool to i32*) release, align 4
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: st16 r3, r1[r2]
|
||||
store atomic i16 %1, i16* bitcast (i64* @pool to i16*) release, align 2
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: st8 r11, r1[r2]
|
||||
store atomic i8 %2, i8* bitcast (i64* @pool to i8*) release, align 1
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: stw r4, dp[pool]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
store atomic i32 %3, i32* bitcast (i64* @pool to i32*) seq_cst, align 4
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: st16 r5, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
store atomic i16 %4, i16* bitcast (i64* @pool to i16*) seq_cst, align 2
|
||||
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
; CHECK-NEXT: st8 r6, r1[r2]
|
||||
; CHECK-NEXT: #MEMBARRIER
|
||||
store atomic i8 %5, i8* bitcast (i64* @pool to i8*) seq_cst, align 1
|
||||
|
||||
; CHECK-NEXT: ldw r0, dp[pool]
|
||||
; CHECK-NEXT: stw r0, dp[pool]
|
||||
; CHECK-NEXT: ld16s r0, r1[r2]
|
||||
; CHECK-NEXT: st16 r0, r1[r2]
|
||||
; CHECK-NEXT: ld8u r0, r1[r2]
|
||||
; CHECK-NEXT: st8 r0, r1[r2]
|
||||
%6 = load atomic i32* bitcast (i64* @pool to i32*) monotonic, align 4
|
||||
store atomic i32 %6, i32* bitcast (i64* @pool to i32*) monotonic, align 4
|
||||
%7 = load atomic i16* bitcast (i64* @pool to i16*) monotonic, align 2
|
||||
store atomic i16 %7, i16* bitcast (i64* @pool to i16*) monotonic, align 2
|
||||
%8 = load atomic i8* bitcast (i64* @pool to i8*) monotonic, align 1
|
||||
store atomic i8 %8, i8* bitcast (i64* @pool to i8*) monotonic, align 1
|
||||
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user