Pulling out previous patch, must've run the tests in

the wrong directory.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@109005 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eric Christopher
2010-07-21 09:23:56 +00:00
parent 87f41370a8
commit dab4dac2a0
3 changed files with 3 additions and 40 deletions

View File

@@ -343,9 +343,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->hasSSE1()) if (Subtarget->hasSSE1())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal); setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
// We may not have a libcall for MEMBARRIER so we should lower this. if (!Subtarget->hasSSE2())
setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
// On X86 and X86-64, atomic operations are lowered to locked instructions. // On X86 and X86-64, atomic operations are lowered to locked instructions.
// Locked instructions, in turn, have implicit fence semantics (all memory // Locked instructions, in turn, have implicit fence semantics (all memory
// operations are flushed before issuing the locked instruction, and they // operations are flushed before issuing the locked instruction, and they
@@ -7509,16 +7508,6 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
return Sum; return Sum;
} }
SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
DebugLoc dl = Op.getDebugLoc();
if (!Subtarget->hasSSE2())
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
DAG.getConstant(0, MVT::i32));
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}
SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
EVT T = Op.getValueType(); EVT T = Op.getValueType();
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
@@ -7608,7 +7597,6 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) { switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!"); default: llvm_unreachable("Should not custom lower this!");
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG);
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);

View File

@@ -265,10 +265,7 @@ namespace llvm {
ATOMXOR64_DAG, ATOMXOR64_DAG,
ATOMAND64_DAG, ATOMAND64_DAG,
ATOMNAND64_DAG, ATOMNAND64_DAG,
ATOMSWAP64_DAG, ATOMSWAP64_DAG
// Memory barrier
MEMBARRIER
// WARNING: Do not add anything in the end unless you want the node to // WARNING: Do not add anything in the end unless you want the node to
// have memop! In fact, starting from ATOMADD64_DAG all opcodes will be // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
@@ -718,7 +715,6 @@ namespace llvm {
SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue virtual SDValue
LowerFormalArguments(SDValue Chain, LowerFormalArguments(SDValue Chain,

View File

@@ -80,14 +80,6 @@ def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
def SDT_X86MEMBARRIERNoSSE : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
def X86MemBarrierNoSSE : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIERNoSSE,
[SDNPHasChain]>;
def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>; def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>; def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>; def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
@@ -3914,19 +3906,6 @@ def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
// Atomic support // Atomic support
// //
// Memory barriers
let hasSideEffects = 1 in {
def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
"#MEMBARRIER",
[(X86MemBarrier)]>, Requires<[HasSSE2]>;
let Uses = [ESP], isCodeGenOnly = 1 in
def Int_MemBarrierNoSSE : I<0x0B, Pseudo, (outs), (ins GR32:$zero),
"lock\n\t"
"or{l}\t{$zero, (%esp)|(%esp), $zero}",
[(X86MemBarrierNoSSE GR32:$zero)]>, LOCK;
}
// Atomic swap. These are just normal xchg instructions. But since a memory // Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured. // operand is referenced, the atomicity is ensured.
let Constraints = "$val = $dst" in { let Constraints = "$val = $dst" in {