mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-08 06:32:24 +00:00
Remove unused MEMBARRIER DAG node; it's been replaced by ATOMIC_FENCE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179939 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
972b26b1d7
commit
6265d5c91a
@ -602,14 +602,6 @@ namespace ISD {
|
||||
/// specifier.
|
||||
PREFETCH,
|
||||
|
||||
/// OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
|
||||
/// store-store, device)
|
||||
/// This corresponds to the memory.barrier intrinsic.
|
||||
/// it takes an input chain, 4 operands to specify the type of barrier, an
|
||||
/// operand specifying if the barrier applies to device and uncached memory
|
||||
/// and produces an output chain.
|
||||
MEMBARRIER,
|
||||
|
||||
/// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
|
||||
/// This corresponds to the fence instruction. It takes an input chain, and
|
||||
/// two integer constants: an AtomicOrdering and a SynchronizationScope.
|
||||
|
@ -414,9 +414,6 @@ def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
|
||||
def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
def membarrier : SDNode<"ISD::MEMBARRIER" , SDTMemBarrier,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
def atomic_fence : SDNode<"ISD::ATOMIC_FENCE" , SDTAtomicFence,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
|
@ -243,7 +243,6 @@ namespace {
|
||||
SDValue visitCONCAT_VECTORS(SDNode *N);
|
||||
SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
|
||||
SDValue visitVECTOR_SHUFFLE(SDNode *N);
|
||||
SDValue visitMEMBARRIER(SDNode *N);
|
||||
|
||||
SDValue XformToShuffleWithZero(SDNode *N);
|
||||
SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
|
||||
@ -1165,7 +1164,6 @@ SDValue DAGCombiner::visit(SDNode *N) {
|
||||
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
|
||||
case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
|
||||
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
|
||||
case ISD::MEMBARRIER: return visitMEMBARRIER(N);
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
@ -9311,59 +9309,6 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
|
||||
if (!TLI.getShouldFoldAtomicFences())
|
||||
return SDValue();
|
||||
|
||||
SDValue atomic = N->getOperand(0);
|
||||
switch (atomic.getOpcode()) {
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
break;
|
||||
default:
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue fence = atomic.getOperand(0);
|
||||
if (fence.getOpcode() != ISD::MEMBARRIER)
|
||||
return SDValue();
|
||||
|
||||
switch (atomic.getOpcode()) {
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
|
||||
fence.getOperand(0),
|
||||
atomic.getOperand(1), atomic.getOperand(2),
|
||||
atomic.getOperand(3)), atomic.getResNo());
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
|
||||
fence.getOperand(0),
|
||||
atomic.getOperand(1), atomic.getOperand(2)),
|
||||
atomic.getResNo());
|
||||
default:
|
||||
return SDValue();
|
||||
}
|
||||
}
|
||||
|
||||
/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
|
||||
/// an AND to a vector_shuffle with the destination vector and a zero vector.
|
||||
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
|
||||
|
@ -2759,8 +2759,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
||||
Results.push_back(DAG.getConstant(0, MVT::i32));
|
||||
Results.push_back(Node->getOperand(0));
|
||||
break;
|
||||
case ISD::ATOMIC_FENCE:
|
||||
case ISD::MEMBARRIER: {
|
||||
case ISD::ATOMIC_FENCE: {
|
||||
// If the target didn't lower this, lower it to '__sync_synchronize()' call
|
||||
// FIXME: handle "fence singlethread" more efficiently.
|
||||
TargetLowering::ArgListTy Args;
|
||||
|
@ -777,7 +777,6 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
|
||||
Res = PromoteIntOp_CONVERT_RNDSAT(N); break;
|
||||
case ISD::INSERT_VECTOR_ELT:
|
||||
Res = PromoteIntOp_INSERT_VECTOR_ELT(N, OpNo);break;
|
||||
case ISD::MEMBARRIER: Res = PromoteIntOp_MEMBARRIER(N); break;
|
||||
case ISD::SCALAR_TO_VECTOR:
|
||||
Res = PromoteIntOp_SCALAR_TO_VECTOR(N); break;
|
||||
case ISD::VSELECT:
|
||||
@ -961,17 +960,6 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
|
||||
N->getOperand(1), Idx), 0);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
|
||||
SDValue NewOps[6];
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
NewOps[0] = N->getOperand(0);
|
||||
for (unsigned i = 1; i < array_lengthof(NewOps); ++i) {
|
||||
SDValue Flag = GetPromotedInteger(N->getOperand(i));
|
||||
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1);
|
||||
}
|
||||
return SDValue(DAG.UpdateNodeOperands(N, NewOps, array_lengthof(NewOps)), 0);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) {
|
||||
// Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote
|
||||
// the operand in place.
|
||||
|
@ -270,7 +270,6 @@ private:
|
||||
SDValue PromoteIntOp_EXTRACT_ELEMENT(SDNode *N);
|
||||
SDValue PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N);
|
||||
SDValue PromoteIntOp_CONCAT_VECTORS(SDNode *N);
|
||||
SDValue PromoteIntOp_MEMBARRIER(SDNode *N);
|
||||
SDValue PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N);
|
||||
SDValue PromoteIntOp_SELECT(SDNode *N, unsigned OpNo);
|
||||
SDValue PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo);
|
||||
|
@ -54,7 +54,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
|
||||
case ISD::DELETED_NODE: return "<<Deleted Node!>>";
|
||||
#endif
|
||||
case ISD::PREFETCH: return "Prefetch";
|
||||
case ISD::MEMBARRIER: return "MemBarrier";
|
||||
case ISD::ATOMIC_FENCE: return "AtomicFence";
|
||||
case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
|
||||
case ISD::ATOMIC_SWAP: return "AtomicSwap";
|
||||
|
@ -729,7 +729,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
(Subtarget->hasV6Ops() && !Subtarget->isThumb())) {
|
||||
// membarrier needs custom lowering; the rest are legal and handled
|
||||
// normally.
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
// Custom lowering for 64-bit ops
|
||||
setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
|
||||
@ -747,7 +746,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
setInsertFencesForAtomic(true);
|
||||
} else {
|
||||
// Set them all for expansion, which will force libcalls.
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
|
||||
@ -2484,35 +2482,6 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
|
||||
}
|
||||
}
|
||||
|
||||
static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
|
||||
const ARMSubtarget *Subtarget) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
if (!Subtarget->hasDataBarrier()) {
|
||||
// Some ARMv6 cpus can support data barriers with an mcr instruction.
|
||||
// Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
|
||||
// here.
|
||||
assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
|
||||
"Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
|
||||
return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
|
||||
DAG.getConstant(0, MVT::i32));
|
||||
}
|
||||
|
||||
SDValue Op5 = Op.getOperand(5);
|
||||
bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0;
|
||||
unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
||||
unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
|
||||
bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0);
|
||||
|
||||
ARM_MB::MemBOpt DMBOpt;
|
||||
if (isDeviceBarrier)
|
||||
DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY;
|
||||
else
|
||||
DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH;
|
||||
return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
|
||||
DAG.getConstant(DMBOpt, MVT::i32));
|
||||
}
|
||||
|
||||
|
||||
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
|
||||
const ARMSubtarget *Subtarget) {
|
||||
// FIXME: handle "fence singlethread" more efficiently.
|
||||
@ -5637,7 +5606,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::BR_CC: return LowerBR_CC(Op, DAG);
|
||||
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
|
||||
case ISD::VASTART: return LowerVASTART(Op, DAG);
|
||||
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget);
|
||||
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget);
|
||||
case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
|
||||
case ISD::SINT_TO_FP:
|
||||
|
@ -1002,14 +1002,6 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
|
||||
return FrameAddr;
|
||||
}
|
||||
|
||||
|
||||
SDValue HexagonTargetLowering::LowerMEMBARRIER(SDValue Op,
|
||||
SelectionDAG& DAG) const {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
|
||||
}
|
||||
|
||||
|
||||
SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
|
||||
SelectionDAG& DAG) const {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
@ -1377,7 +1369,6 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
|
||||
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BR_CC, MVT::i64, Expand);
|
||||
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
|
||||
setOperationAction(ISD::FSIN , MVT::f64, Expand);
|
||||
@ -1529,7 +1520,6 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
||||
case ISD::GlobalTLSAddress:
|
||||
llvm_unreachable("TLS not implemented for Hexagon.");
|
||||
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
|
||||
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
|
||||
case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
|
||||
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
|
||||
|
@ -122,7 +122,6 @@ namespace llvm {
|
||||
|
||||
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
|
||||
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
|
||||
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
|
@ -724,8 +724,7 @@ let usesCustomInserter=1 in {
|
||||
[(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$val))]>;
|
||||
|
||||
def MEMBARRIER : MBlazePseudo<(outs), (ins),
|
||||
"# memory barrier",
|
||||
[(membarrier (i32 imm), (i32 imm), (i32 imm), (i32 imm), (i32 imm))]>;
|
||||
"# memory barrier", []>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -53,7 +53,6 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
|
||||
if (Mips16HardFloat)
|
||||
setMips16HardFloatLibCalls();
|
||||
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
|
||||
|
@ -738,7 +738,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
|
||||
case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
|
||||
case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
|
||||
case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
|
||||
case ISD::MEMBARRIER: return lowerMEMBARRIER(Op, DAG);
|
||||
case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
|
||||
case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
|
||||
case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
|
||||
@ -1824,15 +1823,6 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
|
||||
Chain.getValue(1));
|
||||
}
|
||||
|
||||
// TODO: set SType according to the desired memory barrier behavior.
|
||||
SDValue
|
||||
MipsTargetLowering::lowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const {
|
||||
unsigned SType = 0;
|
||||
DebugLoc DL = Op.getDebugLoc();
|
||||
return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
|
||||
DAG.getConstant(SType, MVT::i32));
|
||||
}
|
||||
|
||||
SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
// FIXME: Need pseudo-fence for 'singlethread' fences
|
||||
|
@ -343,7 +343,6 @@ namespace llvm {
|
||||
SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
|
||||
SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
|
||||
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG& DAG) const;
|
||||
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG& DAG,
|
||||
|
@ -91,7 +91,6 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
|
||||
setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
|
||||
setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
|
||||
setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
setOperationAction(ISD::LOAD, MVT::i32, Custom);
|
||||
setOperationAction(ISD::STORE, MVT::i32, Custom);
|
||||
|
@ -1964,14 +1964,6 @@ def : Pat<(f64 (extloadf32 xaddr:$src)),
|
||||
def : Pat<(f64 (fextend f32:$src)),
|
||||
(COPY_TO_REGCLASS $src, F8RC)>;
|
||||
|
||||
// Memory barriers
|
||||
def : Pat<(membarrier (i32 imm /*ll*/),
|
||||
(i32 imm /*ls*/),
|
||||
(i32 imm /*sl*/),
|
||||
(i32 imm /*ss*/),
|
||||
(i32 imm /*device*/)),
|
||||
(SYNC)>;
|
||||
|
||||
def : Pat<(atomic_fence (imm), (imm)), (SYNC)>;
|
||||
|
||||
// Additional FNMSUB patterns: -a*c + b == -(a*c - b)
|
||||
|
@ -1221,7 +1221,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
|
||||
|
||||
// FIXME: There are instructions available for ATOMIC_FENCE
|
||||
// on SparcV8 and later.
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
|
||||
|
||||
setOperationAction(ISD::FSIN , MVT::f64, Expand);
|
||||
|
@ -526,7 +526,6 @@ void X86TargetLowering::resetOperationActions() {
|
||||
if (Subtarget->hasSSE1())
|
||||
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
|
||||
|
||||
setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
|
||||
|
||||
// On X86 and X86-64, atomic operations are lowered to locked instructions.
|
||||
@ -12124,50 +12123,6 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
|
||||
}
|
||||
}
|
||||
|
||||
static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// Go ahead and emit the fence on x86-64 even if we asked for no-sse2.
|
||||
// There isn't any reason to disable it if the target processor supports it.
|
||||
if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) {
|
||||
SDValue Chain = Op.getOperand(0);
|
||||
SDValue Zero = DAG.getConstant(0, MVT::i32);
|
||||
SDValue Ops[] = {
|
||||
DAG.getRegister(X86::ESP, MVT::i32), // Base
|
||||
DAG.getTargetConstant(1, MVT::i8), // Scale
|
||||
DAG.getRegister(0, MVT::i32), // Index
|
||||
DAG.getTargetConstant(0, MVT::i32), // Disp
|
||||
DAG.getRegister(0, MVT::i32), // Segment.
|
||||
Zero,
|
||||
Chain
|
||||
};
|
||||
SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
|
||||
return SDValue(Res, 0);
|
||||
}
|
||||
|
||||
unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
|
||||
if (!isDev)
|
||||
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
|
||||
|
||||
unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
||||
unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
|
||||
unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
|
||||
unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
|
||||
|
||||
// def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
|
||||
if (!Op1 && !Op2 && !Op3 && Op4)
|
||||
return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0));
|
||||
|
||||
// def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
|
||||
if (Op1 && !Op2 && !Op3 && !Op4)
|
||||
return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0));
|
||||
|
||||
// def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
|
||||
// (MFENCE)>;
|
||||
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
|
||||
}
|
||||
|
||||
static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
@ -12399,7 +12354,6 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
switch (Op.getOpcode()) {
|
||||
default: llvm_unreachable("Should not custom lower this!");
|
||||
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
|
||||
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG);
|
||||
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG);
|
||||
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
|
||||
|
Loading…
x
Reference in New Issue
Block a user