From 5bf1b4eacdd87b0753279a378e93126c0b053144 Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Wed, 20 Aug 2008 00:28:16 +0000 Subject: [PATCH] Revert r55018 and apply the correct "fix" for the 64-bit sub_and_fetch atomic. Just expand it like the other X-bit sub_and_fetches. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55023 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/TargetSelectionDAG.td | 31 +++--------------------------- lib/Target/X86/X86ISelLowering.cpp | 2 ++ lib/Target/X86/X86Instr64bit.td | 7 ------- lib/Target/X86/X86InstrInfo.td | 16 --------------- 4 files changed, 5 insertions(+), 51 deletions(-) diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index eaca86a232c..5dba0bc9c05 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -358,10 +358,10 @@ def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, @@ -815,32 +815,6 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), return false; }]>; -def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec), - (atomic_load_sub node:$ptr, node:$dec), [{ - if (AtomicSDNode* V = dyn_cast(N)) - return V->getValueType(0) == MVT::i8; - return false; -}]>; -def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec), - (atomic_load_sub node:$ptr, node:$dec), [{ - if (AtomicSDNode* V = dyn_cast(N)) - return V->getValueType(0) == MVT::i16; - return false; -}]>; -def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec), - (atomic_load_sub node:$ptr, node:$dec), [{ - if (AtomicSDNode* V = dyn_cast(N)) - return V->getValueType(0) == MVT::i32; - return false; -}]>; -def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec), - (atomic_load_sub node:$ptr, node:$dec), [{ - if (AtomicSDNode* V = dyn_cast(N)) - return V->getValueType(0) == MVT::i64; - return false; -}]>; - - def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast(N)) @@ -867,6 +841,7 @@ def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), }]>; + // setcc convenience fragments. def setoeq : PatFrag<(ops node:$lhs, node:$rhs), (setcc node:$lhs, node:$rhs, SETOEQ)>; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7cf74bf79ea..2fb9a2e651e 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -297,9 +297,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom); setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom); setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand); setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand); setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand); // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion. setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 81abc291fc1..e49a548b766 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -1143,13 +1143,6 @@ def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "lock\n\txadd\t$val, $ptr", [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; - -let Defs = [EFLAGS] in -def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), - "lock\n\txadd\t$val, $ptr", - [(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>, - TB, LOCK; - def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "xchg\t$val, $ptr", [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>; diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 7b5ee91e9be..37a5fed51c2 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -2634,22 +2634,6 @@ def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), TB, LOCK; } -// Atomic exchange and subtract -let Constraints = "$val = $dst", Defs = [EFLAGS] in { -def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), - "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}", - [(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>, - TB, LOCK; -def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), - "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}", - [(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>, - TB, OpSize, LOCK; -def LXSUB8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), - "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}", - [(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>, - TB, LOCK; -} - // Atomic exchange, and, or, xor let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in {