mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
Pass MemOperand through for 64-bit atomics on 32-bit,
incidentally making the case where the memop is a pointer deref work. Fix cmp-and-swap regression. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57027 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
94e98af210
commit
1b54c7fe9b
@ -6212,20 +6212,33 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::ATOMIC_CMP_SWAP_64: {
|
||||
// This operation does not need a loop.
|
||||
SDValue Tmp = TLI.LowerOperation(Op, DAG);
|
||||
assert(Tmp.getNode() && "Node must be custom expanded!");
|
||||
ExpandOp(Tmp.getValue(0), Lo, Hi);
|
||||
AddLegalizedOperand(SDValue(Node, 1), // Remember we legalized the chain.
|
||||
LegalizeOp(Tmp.getValue(1)));
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::ATOMIC_LOAD_ADD_64:
|
||||
case ISD::ATOMIC_LOAD_SUB_64:
|
||||
case ISD::ATOMIC_LOAD_AND_64:
|
||||
case ISD::ATOMIC_LOAD_OR_64:
|
||||
case ISD::ATOMIC_LOAD_XOR_64:
|
||||
case ISD::ATOMIC_LOAD_NAND_64:
|
||||
case ISD::ATOMIC_SWAP_64:
|
||||
case ISD::ATOMIC_CMP_SWAP_64: {
|
||||
case ISD::ATOMIC_SWAP_64: {
|
||||
// These operations require a loop to be generated. We can't do that yet,
|
||||
// so substitute a target-dependent pseudo and expand that later.
|
||||
SDValue In2Lo, In2Hi, In2;
|
||||
ExpandOp(Op.getOperand(2), In2Lo, In2Hi);
|
||||
In2 = DAG.getNode(ISD::BUILD_PAIR, VT, In2Lo, In2Hi);
|
||||
SDValue Result = TLI.LowerOperation(
|
||||
DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), Op.getOperand(1), In2),
|
||||
DAG);
|
||||
AtomicSDNode* Anode = cast<AtomicSDNode>(Node);
|
||||
SDValue Replace =
|
||||
DAG.getAtomic(Op.getOpcode(), Op.getOperand(0), Op.getOperand(1), In2,
|
||||
Anode->getSrcValue(), Anode->getAlignment());
|
||||
SDValue Result = TLI.LowerOperation(Replace, DAG);
|
||||
ExpandOp(Result.getValue(0), Lo, Hi);
|
||||
// Remember that we legalized the chain.
|
||||
AddLegalizedOperand(SDValue(Node,1), LegalizeOp(Result.getValue(1)));
|
||||
|
@ -1209,14 +1209,15 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
||||
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
|
||||
return NULL;
|
||||
SDValue LSI = Node->getOperand(4); // MemOperand
|
||||
AddToISelQueue(Tmp0);
|
||||
AddToISelQueue(Tmp1);
|
||||
AddToISelQueue(Tmp2);
|
||||
AddToISelQueue(Tmp3);
|
||||
AddToISelQueue(In2L);
|
||||
AddToISelQueue(In2H);
|
||||
// For now, don't select the MemOperand object, we don't know how.
|
||||
AddToISelQueue(Chain);
|
||||
SDValue LSI = CurDAG->getMemOperand(cast<MemSDNode>(In1)->getMemOperand());
|
||||
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
|
||||
return CurDAG->getTargetNode(Opc, MVT::i32, MVT::i32, MVT::Other, Ops, 8);
|
||||
}
|
||||
|
@ -6026,9 +6026,12 @@ SDValue X86TargetLowering::LowerATOMIC_BINARY_64(SDValue Op,
|
||||
assert(Node->getOperand(2).getNode()->getOpcode()==ISD::BUILD_PAIR);
|
||||
SDValue In2L = Node->getOperand(2).getNode()->getOperand(0);
|
||||
SDValue In2H = Node->getOperand(2).getNode()->getOperand(1);
|
||||
SDValue Ops[] = { Chain, In1, In2L, In2H };
|
||||
// This is a generalized SDNode, not an AtomicSDNode, so it doesn't
|
||||
// have a MemOperand. Pass the info through as a normal operand.
|
||||
SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
|
||||
SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
|
||||
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
|
||||
SDValue Result = DAG.getNode(NewOp, Tys, Ops, 4);
|
||||
SDValue Result = DAG.getNode(NewOp, Tys, Ops, 5);
|
||||
SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
|
||||
SDValue ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2);
|
||||
SDValue Vals[2] = { ResultVal, Result.getValue(2) };
|
||||
@ -6415,7 +6418,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
|
||||
return nextMBB;
|
||||
}
|
||||
|
||||
// private utility function
|
||||
// private utility function: 64 bit atomics on 32 bit host.
|
||||
MachineBasicBlock *
|
||||
X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
|
||||
MachineBasicBlock *MBB,
|
||||
|
@ -2752,6 +2752,7 @@ def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
|
||||
let Constraints = "$val1 = $dst1, $val2 = $dst2",
|
||||
Defs = [EFLAGS, EAX, EBX, ECX, EDX],
|
||||
Uses = [EAX, EBX, ECX, EDX],
|
||||
mayLoad = 1, mayStore = 1,
|
||||
usesCustomDAGSchedInserter = 1 in {
|
||||
def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
|
||||
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
|
||||
|
Loading…
Reference in New Issue
Block a user