mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-26 21:32:10 +00:00
Basic x86 code generation for atomic load and store instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@138478 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b05fdd6bab
commit
327236cd6c
@ -597,22 +597,22 @@ namespace ISD {
|
||||
// two integer constants: an AtomicOrdering and a SynchronizationScope.
|
||||
ATOMIC_FENCE,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
|
||||
// This corresponds to "load atomic" instruction.
|
||||
ATOMIC_LOAD,
|
||||
|
||||
// OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
|
||||
// This corresponds to "store atomic" instruction.
|
||||
ATOMIC_STORE,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
|
||||
// this corresponds to the atomic.lcs intrinsic.
|
||||
// cmp is compared to *ptr, and if equal, swap is stored in *ptr.
|
||||
// the return is always the original value in *ptr
|
||||
// This corresponds to the cmpxchg instruction.
|
||||
ATOMIC_CMP_SWAP,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.swap intrinsic.
|
||||
// amt is stored to *ptr atomically.
|
||||
// the return is always the original value in *ptr
|
||||
ATOMIC_SWAP,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.load.[OpName] intrinsic.
|
||||
// op(*ptr, amt) is stored to *ptr atomically.
|
||||
// the return is always the original value in *ptr
|
||||
// These correspond to the atomicrmw instruction.
|
||||
ATOMIC_SWAP,
|
||||
ATOMIC_LOAD_ADD,
|
||||
ATOMIC_LOAD_SUB,
|
||||
ATOMIC_LOAD_AND,
|
||||
|
@ -598,16 +598,26 @@ public:
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
|
||||
/// getAtomic - Gets a node for an atomic op, produces result and chain and
|
||||
/// takes 2 operands.
|
||||
/// getAtomic - Gets a node for an atomic op, produces result (if relevant)
|
||||
/// and chain and takes 2 operands.
|
||||
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
|
||||
SDValue Ptr, SDValue Val, const Value* PtrVal,
|
||||
unsigned Alignment, AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
|
||||
SDValue Ptr, SDValue Val, MachineMemOperand *MMO,
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
|
||||
/// getAtomic - Gets a node for an atomic op, produces result and chain and
|
||||
/// takes 1 operand.
|
||||
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, EVT VT,
|
||||
SDValue Chain, SDValue Ptr, const Value* PtrVal,
|
||||
unsigned Alignment,
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Chain,
|
||||
SDValue Ptr, SDValue Val,
|
||||
MachineMemOperand *MMO,
|
||||
SDValue getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, EVT VT,
|
||||
SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
|
||||
|
@ -976,6 +976,8 @@ public:
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD ||
|
||||
N->getOpcode() == ISD::ATOMIC_STORE ||
|
||||
N->isTargetMemoryOpcode();
|
||||
}
|
||||
};
|
||||
@ -1025,6 +1027,14 @@ public:
|
||||
InitAtomic(Ordering, SynchScope);
|
||||
InitOperands(Ops, Chain, Ptr, Val);
|
||||
}
|
||||
AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, EVT MemVT,
|
||||
SDValue Chain, SDValue Ptr,
|
||||
MachineMemOperand *MMO,
|
||||
AtomicOrdering Ordering, SynchronizationScope SynchScope)
|
||||
: MemSDNode(Opc, dl, VTL, MemVT, MMO) {
|
||||
InitAtomic(Ordering, SynchScope);
|
||||
InitOperands(Ops, Chain, Ptr);
|
||||
}
|
||||
|
||||
const SDValue &getBasePtr() const { return getOperand(1); }
|
||||
const SDValue &getVal() const { return getOperand(2); }
|
||||
@ -1048,7 +1058,9 @@ public:
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD ||
|
||||
N->getOpcode() == ISD::ATOMIC_STORE;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -214,6 +214,12 @@ def SDTAtomic3 : SDTypeProfile<1, 3, [
|
||||
def SDTAtomic2 : SDTypeProfile<1, 2, [
|
||||
SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
|
||||
]>;
|
||||
def SDTAtomicStore : SDTypeProfile<0, 2, [
|
||||
SDTCisPtrTy<0>, SDTCisInt<1>
|
||||
]>;
|
||||
def SDTAtomicLoad : SDTypeProfile<1, 1, [
|
||||
SDTCisInt<0>, SDTCisPtrTy<1>
|
||||
]>;
|
||||
|
||||
def SDTConvertOp : SDTypeProfile<1, 5, [ //cvtss, su, us, uu, ff, fs, fu, sf, su
|
||||
SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>, SDTCisPtrTy<4>, SDTCisPtrTy<5>
|
||||
@ -427,6 +433,10 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
|
||||
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
|
||||
// and truncst (see below).
|
||||
@ -844,6 +854,28 @@ defm atomic_load_min : binary_atomic_op<atomic_load_min>;
|
||||
defm atomic_load_max : binary_atomic_op<atomic_load_max>;
|
||||
defm atomic_load_umin : binary_atomic_op<atomic_load_umin>;
|
||||
defm atomic_load_umax : binary_atomic_op<atomic_load_umax>;
|
||||
defm atomic_store : binary_atomic_op<atomic_store>;
|
||||
|
||||
def atomic_load_8 :
|
||||
PatFrag<(ops node:$ptr),
|
||||
(atomic_load node:$ptr), [{
|
||||
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
|
||||
}]>;
|
||||
def atomic_load_16 :
|
||||
PatFrag<(ops node:$ptr),
|
||||
(atomic_load node:$ptr), [{
|
||||
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
|
||||
}]>;
|
||||
def atomic_load_32 :
|
||||
PatFrag<(ops node:$ptr),
|
||||
(atomic_load node:$ptr), [{
|
||||
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
|
||||
}]>;
|
||||
def atomic_load_64 :
|
||||
PatFrag<(ops node:$ptr),
|
||||
(atomic_load node:$ptr), [{
|
||||
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
|
||||
}]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Selection DAG CONVERT_RNDSAT patterns
|
||||
|
@ -819,6 +819,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
|
||||
break;
|
||||
}
|
||||
case ISD::ATOMIC_STORE: {
|
||||
Action = TLI.getOperationAction(Node->getOpcode(),
|
||||
Node->getOperand(2).getValueType());
|
||||
break;
|
||||
}
|
||||
case ISD::SELECT_CC:
|
||||
case ISD::SETCC:
|
||||
case ISD::BR_CC: {
|
||||
|
@ -432,7 +432,9 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX: {
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
case ISD::ATOMIC_LOAD:
|
||||
case ISD::ATOMIC_STORE: {
|
||||
const AtomicSDNode *AT = cast<AtomicSDNode>(N);
|
||||
ID.AddInteger(AT->getMemoryVT().getRawBits());
|
||||
ID.AddInteger(AT->getRawSubclassData());
|
||||
@ -3904,12 +3906,14 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
|
||||
Opcode == ISD::ATOMIC_LOAD_MAX ||
|
||||
Opcode == ISD::ATOMIC_LOAD_UMIN ||
|
||||
Opcode == ISD::ATOMIC_LOAD_UMAX ||
|
||||
Opcode == ISD::ATOMIC_SWAP) &&
|
||||
Opcode == ISD::ATOMIC_SWAP ||
|
||||
Opcode == ISD::ATOMIC_STORE) &&
|
||||
"Invalid Atomic Op");
|
||||
|
||||
EVT VT = Val.getValueType();
|
||||
|
||||
SDVTList VTs = getVTList(VT, MVT::Other);
|
||||
SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
|
||||
getVTList(VT, MVT::Other);
|
||||
FoldingSetNodeID ID;
|
||||
ID.AddInteger(MemVT.getRawBits());
|
||||
SDValue Ops[] = {Chain, Ptr, Val};
|
||||
@ -3927,6 +3931,55 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
|
||||
return SDValue(N, 0);
|
||||
}
|
||||
|
||||
SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
|
||||
EVT VT, SDValue Chain,
|
||||
SDValue Ptr,
|
||||
const Value* PtrVal,
|
||||
unsigned Alignment,
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope) {
|
||||
if (Alignment == 0) // Ensure that codegen never sees alignment 0
|
||||
Alignment = getEVTAlignment(MemVT);
|
||||
|
||||
MachineFunction &MF = getMachineFunction();
|
||||
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
|
||||
|
||||
// For now, atomics are considered to be volatile always.
|
||||
Flags |= MachineMemOperand::MOVolatile;
|
||||
|
||||
MachineMemOperand *MMO =
|
||||
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
|
||||
MemVT.getStoreSize(), Alignment);
|
||||
|
||||
return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
|
||||
Ordering, SynchScope);
|
||||
}
|
||||
|
||||
SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
|
||||
EVT VT, SDValue Chain,
|
||||
SDValue Ptr,
|
||||
MachineMemOperand *MMO,
|
||||
AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope) {
|
||||
assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
|
||||
|
||||
SDVTList VTs = getVTList(VT, MVT::Other);
|
||||
FoldingSetNodeID ID;
|
||||
ID.AddInteger(MemVT.getRawBits());
|
||||
SDValue Ops[] = {Chain, Ptr};
|
||||
AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
|
||||
void* IP = 0;
|
||||
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
|
||||
cast<AtomicSDNode>(E)->refineAlignment(MMO);
|
||||
return SDValue(E, 0);
|
||||
}
|
||||
SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
|
||||
Ptr, MMO, Ordering, SynchScope);
|
||||
CSEMap.InsertNode(N, IP);
|
||||
AllNodes.push_back(N);
|
||||
return SDValue(N, 0);
|
||||
}
|
||||
|
||||
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
|
||||
SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
|
||||
DebugLoc dl) {
|
||||
@ -5795,6 +5848,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
|
||||
case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
|
||||
case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
|
||||
case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
|
||||
case ISD::ATOMIC_LOAD: return "AtomicLoad";
|
||||
case ISD::ATOMIC_STORE: return "AtomicStore";
|
||||
case ISD::PCMARKER: return "PCMarker";
|
||||
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
|
||||
case ISD::SRCVALUE: return "SrcValue";
|
||||
|
@ -3149,6 +3149,9 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
||||
if (I.isAtomic())
|
||||
return visitAtomicLoad(I);
|
||||
|
||||
const Value *SV = I.getOperand(0);
|
||||
SDValue Ptr = getValue(SV);
|
||||
|
||||
@ -3226,6 +3229,9 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitStore(const StoreInst &I) {
|
||||
if (I.isAtomic())
|
||||
return visitAtomicStore(I);
|
||||
|
||||
const Value *SrcV = I.getOperand(0);
|
||||
const Value *PtrV = I.getOperand(1);
|
||||
|
||||
@ -3277,6 +3283,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
|
||||
}
|
||||
|
||||
static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
|
||||
SynchronizationScope Scope,
|
||||
bool Before, DebugLoc dl,
|
||||
SelectionDAG &DAG,
|
||||
const TargetLowering &TLI) {
|
||||
@ -3294,19 +3301,21 @@ static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
|
||||
}
|
||||
SDValue Ops[3];
|
||||
Ops[0] = Chain;
|
||||
Ops[1] = DAG.getConstant(SequentiallyConsistent, TLI.getPointerTy());
|
||||
Ops[2] = DAG.getConstant(Order, TLI.getPointerTy());
|
||||
Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
|
||||
Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
|
||||
return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3);
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
|
||||
DebugLoc dl = getCurDebugLoc();
|
||||
AtomicOrdering Order = I.getOrdering();
|
||||
SynchronizationScope Scope = I.getSynchScope();
|
||||
|
||||
SDValue InChain = getRoot();
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
InChain = InsertFenceForAtomic(InChain, Order, true, dl, DAG, TLI);
|
||||
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
||||
DAG, TLI);
|
||||
|
||||
SDValue L =
|
||||
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
|
||||
@ -3316,12 +3325,14 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
|
||||
getValue(I.getCompareOperand()),
|
||||
getValue(I.getNewValOperand()),
|
||||
MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
|
||||
I.getOrdering(), I.getSynchScope());
|
||||
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
||||
Scope);
|
||||
|
||||
SDValue OutChain = L.getValue(1);
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, false, dl, DAG, TLI);
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
||||
DAG, TLI);
|
||||
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(OutChain);
|
||||
@ -3345,11 +3356,13 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
|
||||
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
|
||||
}
|
||||
AtomicOrdering Order = I.getOrdering();
|
||||
SynchronizationScope Scope = I.getSynchScope();
|
||||
|
||||
SDValue InChain = getRoot();
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
InChain = InsertFenceForAtomic(InChain, Order, true, dl, DAG, TLI);
|
||||
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
||||
DAG, TLI);
|
||||
|
||||
SDValue L =
|
||||
DAG.getAtomic(NT, dl,
|
||||
@ -3359,12 +3372,13 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
|
||||
getValue(I.getValOperand()),
|
||||
I.getPointerOperand(), 0 /* Alignment */,
|
||||
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
||||
I.getSynchScope());
|
||||
Scope);
|
||||
|
||||
SDValue OutChain = L.getValue(1);
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, false, dl, DAG, TLI);
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
||||
DAG, TLI);
|
||||
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(OutChain);
|
||||
@ -3379,6 +3393,65 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
|
||||
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
|
||||
DebugLoc dl = getCurDebugLoc();
|
||||
AtomicOrdering Order = I.getOrdering();
|
||||
SynchronizationScope Scope = I.getSynchScope();
|
||||
|
||||
SDValue InChain = getRoot();
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
||||
DAG, TLI);
|
||||
|
||||
EVT VT = EVT::getEVT(I.getType());
|
||||
|
||||
SDValue L =
|
||||
DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
|
||||
getValue(I.getPointerOperand()),
|
||||
I.getPointerOperand(), I.getAlignment(),
|
||||
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
||||
Scope);
|
||||
|
||||
SDValue OutChain = L.getValue(1);
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
||||
DAG, TLI);
|
||||
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(OutChain);
|
||||
}
|
||||
|
||||
void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
|
||||
DebugLoc dl = getCurDebugLoc();
|
||||
|
||||
AtomicOrdering Order = I.getOrdering();
|
||||
SynchronizationScope Scope = I.getSynchScope();
|
||||
|
||||
SDValue InChain = getRoot();
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
|
||||
DAG, TLI);
|
||||
|
||||
SDValue OutChain =
|
||||
DAG.getAtomic(ISD::ATOMIC_STORE, dl,
|
||||
getValue(I.getValueOperand()).getValueType().getSimpleVT(),
|
||||
InChain,
|
||||
getValue(I.getPointerOperand()),
|
||||
getValue(I.getValueOperand()),
|
||||
I.getPointerOperand(), I.getAlignment(),
|
||||
TLI.getInsertFencesForAtomic() ? Monotonic : Order,
|
||||
Scope);
|
||||
|
||||
if (TLI.getInsertFencesForAtomic())
|
||||
OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
|
||||
DAG, TLI);
|
||||
|
||||
DAG.setRoot(OutChain);
|
||||
}
|
||||
|
||||
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
|
||||
/// node.
|
||||
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
|
||||
|
@ -526,7 +526,9 @@ private:
|
||||
void visitPHI(const PHINode &I);
|
||||
void visitCall(const CallInst &I);
|
||||
bool visitMemCmpCall(const CallInst &I);
|
||||
|
||||
void visitAtomicLoad(const LoadInst &I);
|
||||
void visitAtomicStore(const StoreInst &I);
|
||||
|
||||
void visitInlineAsm(ImmutableCallSite CS);
|
||||
const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
|
||||
void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
|
||||
|
@ -464,6 +464,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
||||
MVT VT = IntVTs[i];
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
|
||||
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
|
||||
}
|
||||
|
||||
if (!Subtarget->is64Bit()) {
|
||||
@ -9999,6 +10000,26 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
|
||||
cast<AtomicSDNode>(Node)->getSynchScope());
|
||||
}
|
||||
|
||||
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
|
||||
SDNode *Node = Op.getNode();
|
||||
DebugLoc dl = Node->getDebugLoc();
|
||||
|
||||
// Convert seq_cst store -> xchg
|
||||
if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent) {
|
||||
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
|
||||
cast<AtomicSDNode>(Node)->getMemoryVT(),
|
||||
Node->getOperand(0),
|
||||
Node->getOperand(1), Node->getOperand(2),
|
||||
cast<AtomicSDNode>(Node)->getSrcValue(),
|
||||
cast<AtomicSDNode>(Node)->getAlignment(),
|
||||
cast<AtomicSDNode>(Node)->getOrdering(),
|
||||
cast<AtomicSDNode>(Node)->getSynchScope());
|
||||
return Swap.getValue(1);
|
||||
}
|
||||
// Other atomic stores have a simple pattern.
|
||||
return Op;
|
||||
}
|
||||
|
||||
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
|
||||
EVT VT = Op.getNode()->getValueType(0);
|
||||
|
||||
@ -10035,6 +10056,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
|
||||
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
|
||||
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
|
||||
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
||||
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
|
||||
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
||||
|
@ -1691,3 +1691,17 @@ def : Pat<(and GR64:$src1, i64immSExt8:$src2),
|
||||
(AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
|
||||
def : Pat<(and GR64:$src1, i64immSExt32:$src2),
|
||||
(AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
|
||||
|
||||
def : Pat<(atomic_load_8 addr:$src), (MOV8rm addr:$src)>;
|
||||
def : Pat<(atomic_load_16 addr:$src), (MOV16rm addr:$src)>;
|
||||
def : Pat<(atomic_load_32 addr:$src), (MOV32rm addr:$src)>;
|
||||
def : Pat<(atomic_load_64 addr:$src), (MOV64rm addr:$src)>;
|
||||
|
||||
def : Pat<(atomic_store_8 addr:$ptr, GR8:$val),
|
||||
(MOV8mr addr:$ptr, GR8:$val)>;
|
||||
def : Pat<(atomic_store_16 addr:$ptr, GR16:$val),
|
||||
(MOV16mr addr:$ptr, GR16:$val)>;
|
||||
def : Pat<(atomic_store_32 addr:$ptr, GR32:$val),
|
||||
(MOV32mr addr:$ptr, GR32:$val)>;
|
||||
def : Pat<(atomic_store_64 addr:$ptr, GR64:$val),
|
||||
(MOV64mr addr:$ptr, GR64:$val)>;
|
||||
|
Loading…
Reference in New Issue
Block a user