diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index 4cbe3b02e49..ea35e69a486 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -464,13 +464,13 @@ public: /// getAtomic - Gets a node for an atomic op, produces result and chain and /// takes 3 operands - SDValue getAtomic(unsigned Opcode, SDValue Chain, SDValue Ptr, + SDValue getAtomic(unsigned Opcode, MVT MemVT, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal, unsigned Alignment=0); /// getAtomic - Gets a node for an atomic op, produces result and chain and /// takes 2 operands. - SDValue getAtomic(unsigned Opcode, SDValue Chain, SDValue Ptr, + SDValue getAtomic(unsigned Opcode, MVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value* PtrVal, unsigned Alignment = 0); diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 2fabe16cf83..74693dc8bfe 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -628,64 +628,28 @@ namespace ISD { // this corresponds to the atomic.lcs intrinsic. // cmp is compared to *ptr, and if equal, swap is stored in *ptr. // the return is always the original value in *ptr - ATOMIC_CMP_SWAP_8, - ATOMIC_CMP_SWAP_16, - ATOMIC_CMP_SWAP_32, - ATOMIC_CMP_SWAP_64, + ATOMIC_CMP_SWAP, // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) // this corresponds to the atomic.swap intrinsic. // amt is stored to *ptr atomically. // the return is always the original value in *ptr - ATOMIC_SWAP_8, - ATOMIC_SWAP_16, - ATOMIC_SWAP_32, - ATOMIC_SWAP_64, + ATOMIC_SWAP, // Val, OUTCHAIN = ATOMIC_L[OpName]S(INCHAIN, ptr, amt) // this corresponds to the atomic.[OpName] intrinsic. // op(*ptr, amt) is stored to *ptr atomically. // the return is always the original value in *ptr - ATOMIC_LOAD_ADD_8, - ATOMIC_LOAD_SUB_8, - ATOMIC_LOAD_AND_8, - ATOMIC_LOAD_OR_8, - ATOMIC_LOAD_XOR_8, - ATOMIC_LOAD_NAND_8, - ATOMIC_LOAD_MIN_8, - ATOMIC_LOAD_MAX_8, - ATOMIC_LOAD_UMIN_8, - ATOMIC_LOAD_UMAX_8, - ATOMIC_LOAD_ADD_16, - ATOMIC_LOAD_SUB_16, - ATOMIC_LOAD_AND_16, - ATOMIC_LOAD_OR_16, - ATOMIC_LOAD_XOR_16, - ATOMIC_LOAD_NAND_16, - ATOMIC_LOAD_MIN_16, - ATOMIC_LOAD_MAX_16, - ATOMIC_LOAD_UMIN_16, - ATOMIC_LOAD_UMAX_16, - ATOMIC_LOAD_ADD_32, - ATOMIC_LOAD_SUB_32, - ATOMIC_LOAD_AND_32, - ATOMIC_LOAD_OR_32, - ATOMIC_LOAD_XOR_32, - ATOMIC_LOAD_NAND_32, - ATOMIC_LOAD_MIN_32, - ATOMIC_LOAD_MAX_32, - ATOMIC_LOAD_UMIN_32, - ATOMIC_LOAD_UMAX_32, - ATOMIC_LOAD_ADD_64, - ATOMIC_LOAD_SUB_64, - ATOMIC_LOAD_AND_64, - ATOMIC_LOAD_OR_64, - ATOMIC_LOAD_XOR_64, - ATOMIC_LOAD_NAND_64, - ATOMIC_LOAD_MIN_64, - ATOMIC_LOAD_MAX_64, - ATOMIC_LOAD_UMIN_64, - ATOMIC_LOAD_UMAX_64, + ATOMIC_LOAD_ADD, + ATOMIC_LOAD_SUB, + ATOMIC_LOAD_AND, + ATOMIC_LOAD_OR, + ATOMIC_LOAD_XOR, + ATOMIC_LOAD_NAND, + ATOMIC_LOAD_MIN, + ATOMIC_LOAD_MAX, + ATOMIC_LOAD_UMIN, + ATOMIC_LOAD_UMAX, // BUILTIN_OP_END - This must be the last enum value in this list. BUILTIN_OP_END @@ -1615,58 +1579,18 @@ public: // with either an intrinsic or a target opcode. return N->getOpcode() == ISD::LOAD || N->getOpcode() == ISD::STORE || - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 || - N->getOpcode() == ISD::ATOMIC_SWAP_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 || - - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 || - N->getOpcode() == ISD::ATOMIC_SWAP_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 || - - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 || - N->getOpcode() == ISD::ATOMIC_SWAP_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 || - - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 || - N->getOpcode() == ISD::ATOMIC_SWAP_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64 || - + N->getOpcode() == ISD::ATOMIC_CMP_SWAP || + N->getOpcode() == ISD::ATOMIC_SWAP || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB || + N->getOpcode() == ISD::ATOMIC_LOAD_AND || + N->getOpcode() == ISD::ATOMIC_LOAD_OR || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX || N->getOpcode() == ISD::INTRINSIC_W_CHAIN || N->getOpcode() == ISD::INTRINSIC_VOID || N->isTargetOpcode(); @@ -1688,10 +1612,11 @@ class AtomicSDNode : public MemSDNode { // Swp: swap value // SrcVal: address to update as a Value (used for MemOperand) // Align: alignment of memory - AtomicSDNode(unsigned Opc, SDVTList VTL, SDValue Chain, SDValue Ptr, + AtomicSDNode(unsigned Opc, SDVTList VTL, MVT MemVT, + SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* SrcVal, unsigned Align=0) - : MemSDNode(Opc, VTL, Cmp.getValueType(), SrcVal, /*SVOffset=*/0, + : MemSDNode(Opc, VTL, MemVT, SrcVal, /*SVOffset=*/0, Align, /*isVolatile=*/true) { Ops[0] = Chain; Ops[1] = Ptr; @@ -1699,9 +1624,10 @@ class AtomicSDNode : public MemSDNode { Ops[3] = Swp; InitOperands(Ops, 4); } - AtomicSDNode(unsigned Opc, SDVTList VTL, SDValue Chain, SDValue Ptr, + AtomicSDNode(unsigned Opc, SDVTList VTL, MVT MemVT, + SDValue Chain, SDValue Ptr, SDValue Val, const Value* SrcVal, unsigned Align=0) - : MemSDNode(Opc, VTL, Val.getValueType(), SrcVal, /*SVOffset=*/0, + : MemSDNode(Opc, VTL, MemVT, SrcVal, /*SVOffset=*/0, Align, /*isVolatile=*/true) { Ops[0] = Chain; Ops[1] = Ptr; @@ -1714,63 +1640,24 @@ class AtomicSDNode : public MemSDNode { bool isCompareAndSwap() const { unsigned Op = getOpcode(); - return Op == ISD::ATOMIC_CMP_SWAP_8 || - Op == ISD::ATOMIC_CMP_SWAP_16 || - Op == ISD::ATOMIC_CMP_SWAP_32 || - Op == ISD::ATOMIC_CMP_SWAP_64; + return Op == ISD::ATOMIC_CMP_SWAP; } // Methods to support isa and dyn_cast static bool classof(const AtomicSDNode *) { return true; } static bool classof(const SDNode *N) { - return N->getOpcode() == ISD::ATOMIC_CMP_SWAP_8 || - N->getOpcode() == ISD::ATOMIC_SWAP_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_8 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_8 || - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_16 || - N->getOpcode() == ISD::ATOMIC_SWAP_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_16 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_16 || - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_32 || - N->getOpcode() == ISD::ATOMIC_SWAP_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_32 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_32 || - N->getOpcode() == ISD::ATOMIC_CMP_SWAP_64 || - N->getOpcode() == ISD::ATOMIC_SWAP_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_ADD_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_SUB_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_AND_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_OR_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_XOR_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_NAND_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_MIN_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_MAX_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMIN_64 || - N->getOpcode() == ISD::ATOMIC_LOAD_UMAX_64; + return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || + N->getOpcode() == ISD::ATOMIC_SWAP || + N->getOpcode() == ISD::ATOMIC_LOAD_ADD || + N->getOpcode() == ISD::ATOMIC_LOAD_SUB || + N->getOpcode() == ISD::ATOMIC_LOAD_AND || + N->getOpcode() == ISD::ATOMIC_LOAD_OR || + N->getOpcode() == ISD::ATOMIC_LOAD_XOR || + N->getOpcode() == ISD::ATOMIC_LOAD_NAND || + N->getOpcode() == ISD::ATOMIC_LOAD_MIN || + N->getOpcode() == ISD::ATOMIC_LOAD_MAX || + N->getOpcode() == ISD::ATOMIC_LOAD_UMIN || + N->getOpcode() == ISD::ATOMIC_LOAD_UMAX; } }; diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 2c655881eff..1e14a11d594 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -1492,7 +1492,7 @@ private: MVT TransformToType[MVT::LAST_VALUETYPE]; // Defines the capacity of the TargetLowering::OpActions table - static const int OpActionsCapacity = 222; + static const int OpActionsCapacity = 184; /// OpActions - For each operation and each value type, keep a LegalizeAction /// that indicates how instruction selection should deal with the operation. diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index c936f7af4c6..5d1b37d1c71 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -363,101 +363,29 @@ def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, [SDNPHasChain, SDNPSideEffect]>; -def atomic_cmp_swap_8 : SDNode<"ISD::ATOMIC_CMP_SWAP_8" , STDAtomic3, +def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_add_8 : SDNode<"ISD::ATOMIC_LOAD_ADD_8" , STDAtomic2, +def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap_8 : SDNode<"ISD::ATOMIC_SWAP_8", STDAtomic2, +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub_8 : SDNode<"ISD::ATOMIC_LOAD_SUB_8" , STDAtomic2, +def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_and_8 : SDNode<"ISD::ATOMIC_LOAD_AND_8" , STDAtomic2, +def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_or_8 : SDNode<"ISD::ATOMIC_LOAD_OR_8" , STDAtomic2, +def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_xor_8 : SDNode<"ISD::ATOMIC_LOAD_XOR_8" , STDAtomic2, +def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_nand_8: SDNode<"ISD::ATOMIC_LOAD_NAND_8", STDAtomic2, +def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_min_8 : SDNode<"ISD::ATOMIC_LOAD_MIN_8", STDAtomic2, +def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_max_8 : SDNode<"ISD::ATOMIC_LOAD_MAX_8", STDAtomic2, +def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umin_8 : SDNode<"ISD::ATOMIC_LOAD_UMIN_8", STDAtomic2, +def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umax_8 : SDNode<"ISD::ATOMIC_LOAD_UMAX_8", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_cmp_swap_16 : SDNode<"ISD::ATOMIC_CMP_SWAP_16" , STDAtomic3, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_add_16 : SDNode<"ISD::ATOMIC_LOAD_ADD_16" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap_16 : SDNode<"ISD::ATOMIC_SWAP_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub_16 : SDNode<"ISD::ATOMIC_LOAD_SUB_16" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_and_16 : SDNode<"ISD::ATOMIC_LOAD_AND_16" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_or_16 : SDNode<"ISD::ATOMIC_LOAD_OR_16" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_xor_16 : SDNode<"ISD::ATOMIC_LOAD_XOR_16" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_nand_16: SDNode<"ISD::ATOMIC_LOAD_NAND_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_min_16 : SDNode<"ISD::ATOMIC_LOAD_MIN_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_max_16 : SDNode<"ISD::ATOMIC_LOAD_MAX_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umin_16 : SDNode<"ISD::ATOMIC_LOAD_UMIN_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umax_16 : SDNode<"ISD::ATOMIC_LOAD_UMAX_16", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_cmp_swap_32 : SDNode<"ISD::ATOMIC_CMP_SWAP_32" , STDAtomic3, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_add_32 : SDNode<"ISD::ATOMIC_LOAD_ADD_32" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap_32 : SDNode<"ISD::ATOMIC_SWAP_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub_32 : SDNode<"ISD::ATOMIC_LOAD_SUB_32" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_and_32 : SDNode<"ISD::ATOMIC_LOAD_AND_32" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_or_32 : SDNode<"ISD::ATOMIC_LOAD_OR_32" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_xor_32 : SDNode<"ISD::ATOMIC_LOAD_XOR_32" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_nand_32: SDNode<"ISD::ATOMIC_LOAD_NAND_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_min_32 : SDNode<"ISD::ATOMIC_LOAD_MIN_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_max_32 : SDNode<"ISD::ATOMIC_LOAD_MAX_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umin_32 : SDNode<"ISD::ATOMIC_LOAD_UMIN_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umax_32 : SDNode<"ISD::ATOMIC_LOAD_UMAX_32", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_cmp_swap_64 : SDNode<"ISD::ATOMIC_CMP_SWAP_64" , STDAtomic3, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_add_64 : SDNode<"ISD::ATOMIC_LOAD_ADD_64" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap_64 : SDNode<"ISD::ATOMIC_SWAP_64", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_sub_64 : SDNode<"ISD::ATOMIC_LOAD_SUB_64" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_and_64 : SDNode<"ISD::ATOMIC_LOAD_AND_64" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_or_64 : SDNode<"ISD::ATOMIC_LOAD_OR_64" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_xor_64 : SDNode<"ISD::ATOMIC_LOAD_XOR_64" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_nand_64: SDNode<"ISD::ATOMIC_LOAD_NAND_64", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_min_64 : SDNode<"ISD::ATOMIC_LOAD_MIN_64", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_max_64 : SDNode<"ISD::ATOMIC_LOAD_MAX_64", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umin_64 : SDNode<"ISD::ATOMIC_LOAD_UMIN_64", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_load_umax_64 : SDNode<"ISD::ATOMIC_LOAD_UMAX_64", STDAtomic2, +def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; // Do not use ld, st directly. Use load, extload, sextload, zextload, store, @@ -794,6 +722,58 @@ def setle : PatFrag<(ops node:$lhs, node:$rhs), def setne : PatFrag<(ops node:$lhs, node:$rhs), (setcc node:$lhs, node:$rhs, SETNE)>; +def atomic_cmp_swap_8 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast(N)->getMemoryVT() == MVT::i8; +}]>; +def atomic_cmp_swap_16 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast(N)->getMemoryVT() == MVT::i16; +}]>; +def atomic_cmp_swap_32 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast(N)->getMemoryVT() == MVT::i32; +}]>; +def atomic_cmp_swap_64 : + PatFrag<(ops node:$ptr, node:$cmp, node:$swap), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ + return cast(N)->getMemoryVT() == MVT::i64; +}]>; + +multiclass binary_atomic_op { + def _8 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast(N)->getMemoryVT() == MVT::i8; + }]>; + def _16 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast(N)->getMemoryVT() == MVT::i16; + }]>; + def _32 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast(N)->getMemoryVT() == MVT::i32; + }]>; + def _64 : PatFrag<(ops node:$ptr, node:$val), + (atomic_op node:$ptr, node:$val), [{ + return cast(N)->getMemoryVT() == MVT::i64; + }]>; +} + +defm atomic_load_add : binary_atomic_op; +defm atomic_swap : binary_atomic_op; +defm atomic_load_sub : binary_atomic_op; +defm atomic_load_and : binary_atomic_op; +defm atomic_load_or : binary_atomic_op; +defm atomic_load_xor : binary_atomic_op; +defm atomic_load_nand : binary_atomic_op; +defm atomic_load_min : binary_atomic_op; +defm atomic_load_max : binary_atomic_op; +defm atomic_load_umin : binary_atomic_op; +defm atomic_load_umax : binary_atomic_op; + //===----------------------------------------------------------------------===// // Selection DAG CONVERT_RNDSAT patterns diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 8cff5b55585..c162d0e3dcb 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -413,6 +413,7 @@ MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { while (!BB->empty()) BB->remove(BB->begin()); + // Then re-insert them according to the given schedule. for (unsigned i = 0, e = Sequence.size(); i != e; i++) { SUnit *SU = Sequence[i]; if (!SU) { diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 4a2af06966d..d79f6bb1e1b 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1403,10 +1403,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { break; } - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_CMP_SWAP_64: { + case ISD::ATOMIC_CMP_SWAP: { unsigned int num_operands = 4; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); SDValue Ops[4]; @@ -1426,50 +1423,17 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.getResNo()); } - case ISD::ATOMIC_LOAD_ADD_8: - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_AND_8: - case ISD::ATOMIC_LOAD_OR_8: - case ISD::ATOMIC_LOAD_XOR_8: - case ISD::ATOMIC_LOAD_NAND_8: - case ISD::ATOMIC_LOAD_MIN_8: - case ISD::ATOMIC_LOAD_MAX_8: - case ISD::ATOMIC_LOAD_UMIN_8: - case ISD::ATOMIC_LOAD_UMAX_8: - case ISD::ATOMIC_SWAP_8: - case ISD::ATOMIC_LOAD_ADD_16: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_AND_16: - case ISD::ATOMIC_LOAD_OR_16: - case ISD::ATOMIC_LOAD_XOR_16: - case ISD::ATOMIC_LOAD_NAND_16: - case ISD::ATOMIC_LOAD_MIN_16: - case ISD::ATOMIC_LOAD_MAX_16: - case ISD::ATOMIC_LOAD_UMIN_16: - case ISD::ATOMIC_LOAD_UMAX_16: - case ISD::ATOMIC_SWAP_16: - case ISD::ATOMIC_LOAD_ADD_32: - case ISD::ATOMIC_LOAD_SUB_32: - case ISD::ATOMIC_LOAD_AND_32: - case ISD::ATOMIC_LOAD_OR_32: - case ISD::ATOMIC_LOAD_XOR_32: - case ISD::ATOMIC_LOAD_NAND_32: - case ISD::ATOMIC_LOAD_MIN_32: - case ISD::ATOMIC_LOAD_MAX_32: - case ISD::ATOMIC_LOAD_UMIN_32: - case ISD::ATOMIC_LOAD_UMAX_32: - case ISD::ATOMIC_SWAP_32: - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_LOAD_MIN_64: - case ISD::ATOMIC_LOAD_MAX_64: - case ISD::ATOMIC_LOAD_UMIN_64: - case ISD::ATOMIC_LOAD_UMAX_64: - case ISD::ATOMIC_SWAP_64: { + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_SWAP: { unsigned int num_operands = 3; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); SDValue Ops[3]; @@ -4718,14 +4682,12 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) { break; } - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_CMP_SWAP_64: { + case ISD::ATOMIC_CMP_SWAP: { AtomicSDNode* AtomNode = cast(Node); Tmp2 = PromoteOp(Node->getOperand(2)); Tmp3 = PromoteOp(Node->getOperand(3)); - Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(), + Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getMemoryVT(), + AtomNode->getChain(), AtomNode->getBasePtr(), Tmp2, Tmp3, AtomNode->getSrcValue(), AtomNode->getAlignment()); @@ -4733,53 +4695,21 @@ SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) { AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1))); break; } - case ISD::ATOMIC_LOAD_ADD_8: - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_AND_8: - case ISD::ATOMIC_LOAD_OR_8: - case ISD::ATOMIC_LOAD_XOR_8: - case ISD::ATOMIC_LOAD_NAND_8: - case ISD::ATOMIC_LOAD_MIN_8: - case ISD::ATOMIC_LOAD_MAX_8: - case ISD::ATOMIC_LOAD_UMIN_8: - case ISD::ATOMIC_LOAD_UMAX_8: - case ISD::ATOMIC_SWAP_8: - case ISD::ATOMIC_LOAD_ADD_16: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_AND_16: - case ISD::ATOMIC_LOAD_OR_16: - case ISD::ATOMIC_LOAD_XOR_16: - case ISD::ATOMIC_LOAD_NAND_16: - case ISD::ATOMIC_LOAD_MIN_16: - case ISD::ATOMIC_LOAD_MAX_16: - case ISD::ATOMIC_LOAD_UMIN_16: - case ISD::ATOMIC_LOAD_UMAX_16: - case ISD::ATOMIC_SWAP_16: - case ISD::ATOMIC_LOAD_ADD_32: - case ISD::ATOMIC_LOAD_SUB_32: - case ISD::ATOMIC_LOAD_AND_32: - case ISD::ATOMIC_LOAD_OR_32: - case ISD::ATOMIC_LOAD_XOR_32: - case ISD::ATOMIC_LOAD_NAND_32: - case ISD::ATOMIC_LOAD_MIN_32: - case ISD::ATOMIC_LOAD_MAX_32: - case ISD::ATOMIC_LOAD_UMIN_32: - case ISD::ATOMIC_LOAD_UMAX_32: - case ISD::ATOMIC_SWAP_32: - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_LOAD_MIN_64: - case ISD::ATOMIC_LOAD_MAX_64: - case ISD::ATOMIC_LOAD_UMIN_64: - case ISD::ATOMIC_LOAD_UMAX_64: - case ISD::ATOMIC_SWAP_64: { + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_SWAP: { AtomicSDNode* AtomNode = cast(Node); Tmp2 = PromoteOp(Node->getOperand(2)); - Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(), + Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getMemoryVT(), + AtomNode->getChain(), AtomNode->getBasePtr(), Tmp2, AtomNode->getSrcValue(), AtomNode->getAlignment()); @@ -6769,7 +6699,7 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){ break; } - case ISD::ATOMIC_CMP_SWAP_64: { + case ISD::ATOMIC_CMP_SWAP: { // This operation does not need a loop. SDValue Tmp = TLI.LowerOperation(Op, DAG); assert(Tmp.getNode() && "Node must be custom expanded!"); @@ -6779,13 +6709,13 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){ break; } - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_SWAP_64: { + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_SWAP: { // These operations require a loop to be generated. We can't do that yet, // so substitute a target-dependent pseudo and expand that later. SDValue In2Lo, In2Hi, In2; @@ -6793,7 +6723,8 @@ void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){ In2 = DAG.getNode(ISD::BUILD_PAIR, VT, In2Lo, In2Hi); AtomicSDNode* Anode = cast(Node); SDValue Replace = - DAG.getAtomic(Op.getOpcode(), Op.getOperand(0), Op.getOperand(1), In2, + DAG.getAtomic(Op.getOpcode(), Anode->getMemoryVT(), + Op.getOperand(0), Op.getOperand(1), In2, Anode->getSrcValue(), Anode->getAlignment()); SDValue Result = TLI.LowerOperation(Replace, DAG); ExpandOp(Result.getValue(0), Lo, Hi); @@ -8318,54 +8249,18 @@ SDValue SelectionDAGLegalize::WidenVectorOp(SDValue Op, MVT WidenVT) { Node->getOperand(2)); break; } - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_CMP_SWAP_64: - case ISD::ATOMIC_LOAD_ADD_8: - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_AND_8: - case ISD::ATOMIC_LOAD_OR_8: - case ISD::ATOMIC_LOAD_XOR_8: - case ISD::ATOMIC_LOAD_NAND_8: - case ISD::ATOMIC_LOAD_MIN_8: - case ISD::ATOMIC_LOAD_MAX_8: - case ISD::ATOMIC_LOAD_UMIN_8: - case ISD::ATOMIC_LOAD_UMAX_8: - case ISD::ATOMIC_SWAP_8: - case ISD::ATOMIC_LOAD_ADD_16: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_AND_16: - case ISD::ATOMIC_LOAD_OR_16: - case ISD::ATOMIC_LOAD_XOR_16: - case ISD::ATOMIC_LOAD_NAND_16: - case ISD::ATOMIC_LOAD_MIN_16: - case ISD::ATOMIC_LOAD_MAX_16: - case ISD::ATOMIC_LOAD_UMIN_16: - case ISD::ATOMIC_LOAD_UMAX_16: - case ISD::ATOMIC_SWAP_16: - case ISD::ATOMIC_LOAD_ADD_32: - case ISD::ATOMIC_LOAD_SUB_32: - case ISD::ATOMIC_LOAD_AND_32: - case ISD::ATOMIC_LOAD_OR_32: - case ISD::ATOMIC_LOAD_XOR_32: - case ISD::ATOMIC_LOAD_NAND_32: - case ISD::ATOMIC_LOAD_MIN_32: - case ISD::ATOMIC_LOAD_MAX_32: - case ISD::ATOMIC_LOAD_UMIN_32: - case ISD::ATOMIC_LOAD_UMAX_32: - case ISD::ATOMIC_SWAP_32: - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_LOAD_MIN_64: - case ISD::ATOMIC_LOAD_MAX_64: - case ISD::ATOMIC_LOAD_UMIN_64: - case ISD::ATOMIC_LOAD_UMAX_64: - case ISD::ATOMIC_SWAP_64: { + case ISD::ATOMIC_CMP_SWAP: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_SWAP: { // For now, we assume that using vectors for these operations don't make // much sense so we just split it. We return an empty result SDValue X, Y; diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 5f236778807..7ae53eb7a9d 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -98,56 +98,20 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { case ISD::SMULO: case ISD::UMULO: Result = PromoteIntRes_XMULO(N, ResNo); break; - case ISD::ATOMIC_LOAD_ADD_8: - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_AND_8: - case ISD::ATOMIC_LOAD_OR_8: - case ISD::ATOMIC_LOAD_XOR_8: - case ISD::ATOMIC_LOAD_NAND_8: - case ISD::ATOMIC_LOAD_MIN_8: - case ISD::ATOMIC_LOAD_MAX_8: - case ISD::ATOMIC_LOAD_UMIN_8: - case ISD::ATOMIC_LOAD_UMAX_8: - case ISD::ATOMIC_SWAP_8: - case ISD::ATOMIC_LOAD_ADD_16: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_AND_16: - case ISD::ATOMIC_LOAD_OR_16: - case ISD::ATOMIC_LOAD_XOR_16: - case ISD::ATOMIC_LOAD_NAND_16: - case ISD::ATOMIC_LOAD_MIN_16: - case ISD::ATOMIC_LOAD_MAX_16: - case ISD::ATOMIC_LOAD_UMIN_16: - case ISD::ATOMIC_LOAD_UMAX_16: - case ISD::ATOMIC_SWAP_16: - case ISD::ATOMIC_LOAD_ADD_32: - case ISD::ATOMIC_LOAD_SUB_32: - case ISD::ATOMIC_LOAD_AND_32: - case ISD::ATOMIC_LOAD_OR_32: - case ISD::ATOMIC_LOAD_XOR_32: - case ISD::ATOMIC_LOAD_NAND_32: - case ISD::ATOMIC_LOAD_MIN_32: - case ISD::ATOMIC_LOAD_MAX_32: - case ISD::ATOMIC_LOAD_UMIN_32: - case ISD::ATOMIC_LOAD_UMAX_32: - case ISD::ATOMIC_SWAP_32: - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_LOAD_MIN_64: - case ISD::ATOMIC_LOAD_MAX_64: - case ISD::ATOMIC_LOAD_UMIN_64: - case ISD::ATOMIC_LOAD_UMAX_64: - case ISD::ATOMIC_SWAP_64: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_SWAP: Result = PromoteIntRes_Atomic1(cast(N)); break; - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_CMP_SWAP_64: + case ISD::ATOMIC_CMP_SWAP: Result = PromoteIntRes_Atomic2(cast(N)); break; } @@ -170,7 +134,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_AssertZext(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) { SDValue Op2 = GetPromotedInteger(N->getOperand(2)); - SDValue Res = DAG.getAtomic(N->getOpcode(), N->getChain(), N->getBasePtr(), + SDValue Res = DAG.getAtomic(N->getOpcode(), N->getMemoryVT(), + N->getChain(), N->getBasePtr(), Op2, N->getSrcValue(), N->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. @@ -181,7 +146,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic1(AtomicSDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) { SDValue Op2 = GetPromotedInteger(N->getOperand(2)); SDValue Op3 = GetPromotedInteger(N->getOperand(3)); - SDValue Res = DAG.getAtomic(N->getOpcode(), N->getChain(), N->getBasePtr(), + SDValue Res = DAG.getAtomic(N->getOpcode(), N->getMemoryVT(), + N->getChain(), N->getBasePtr(), Op2, Op3, N->getSrcValue(), N->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 33eddae7f90..5da693cae88 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -442,54 +442,18 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { ID.AddInteger(ST->getRawFlags()); break; } - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_SWAP_8: - case ISD::ATOMIC_LOAD_ADD_8: - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_AND_8: - case ISD::ATOMIC_LOAD_OR_8: - case ISD::ATOMIC_LOAD_XOR_8: - case ISD::ATOMIC_LOAD_NAND_8: - case ISD::ATOMIC_LOAD_MIN_8: - case ISD::ATOMIC_LOAD_MAX_8: - case ISD::ATOMIC_LOAD_UMIN_8: - case ISD::ATOMIC_LOAD_UMAX_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_SWAP_16: - case ISD::ATOMIC_LOAD_ADD_16: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_AND_16: - case ISD::ATOMIC_LOAD_OR_16: - case ISD::ATOMIC_LOAD_XOR_16: - case ISD::ATOMIC_LOAD_NAND_16: - case ISD::ATOMIC_LOAD_MIN_16: - case ISD::ATOMIC_LOAD_MAX_16: - case ISD::ATOMIC_LOAD_UMIN_16: - case ISD::ATOMIC_LOAD_UMAX_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_SWAP_32: - case ISD::ATOMIC_LOAD_ADD_32: - case ISD::ATOMIC_LOAD_SUB_32: - case ISD::ATOMIC_LOAD_AND_32: - case ISD::ATOMIC_LOAD_OR_32: - case ISD::ATOMIC_LOAD_XOR_32: - case ISD::ATOMIC_LOAD_NAND_32: - case ISD::ATOMIC_LOAD_MIN_32: - case ISD::ATOMIC_LOAD_MAX_32: - case ISD::ATOMIC_LOAD_UMIN_32: - case ISD::ATOMIC_LOAD_UMAX_32: - case ISD::ATOMIC_CMP_SWAP_64: - case ISD::ATOMIC_SWAP_64: - case ISD::ATOMIC_LOAD_ADD_64: - case ISD::ATOMIC_LOAD_SUB_64: - case ISD::ATOMIC_LOAD_AND_64: - case ISD::ATOMIC_LOAD_OR_64: - case ISD::ATOMIC_LOAD_XOR_64: - case ISD::ATOMIC_LOAD_NAND_64: - case ISD::ATOMIC_LOAD_MIN_64: - case ISD::ATOMIC_LOAD_MAX_64: - case ISD::ATOMIC_LOAD_UMIN_64: - case ISD::ATOMIC_LOAD_UMAX_64: { + case ISD::ATOMIC_CMP_SWAP: + case ISD::ATOMIC_SWAP: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: { const AtomicSDNode *AT = cast(N); ID.AddInteger(AT->getRawFlags()); break; @@ -3287,20 +3251,18 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDValue Dst, return CallResult.second; } -SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, +SDValue SelectionDAG::getAtomic(unsigned Opcode, MVT MemVT, + SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal, unsigned Alignment) { - assert((Opcode == ISD::ATOMIC_CMP_SWAP_8 || - Opcode == ISD::ATOMIC_CMP_SWAP_16 || - Opcode == ISD::ATOMIC_CMP_SWAP_32 || - Opcode == ISD::ATOMIC_CMP_SWAP_64) && "Invalid Atomic Op"); + assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); MVT VT = Cmp.getValueType(); if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = getMVTAlignment(VT); + Alignment = getMVTAlignment(MemVT); SDVTList VTs = getVTList(VT, MVT::Other); FoldingSetNodeID ID; @@ -3310,65 +3272,35 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); SDNode* N = NodeAllocator.Allocate(); - new (N) AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, PtrVal, Alignment); + new (N) AtomicSDNode(Opcode, VTs, MemVT, + Chain, Ptr, Cmp, Swp, PtrVal, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); return SDValue(N, 0); } -SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, +SDValue SelectionDAG::getAtomic(unsigned Opcode, MVT MemVT, + SDValue Chain, SDValue Ptr, SDValue Val, const Value* PtrVal, unsigned Alignment) { - assert((Opcode == ISD::ATOMIC_LOAD_ADD_8 || - Opcode == ISD::ATOMIC_LOAD_SUB_8 || - Opcode == ISD::ATOMIC_LOAD_AND_8 || - Opcode == ISD::ATOMIC_LOAD_OR_8 || - Opcode == ISD::ATOMIC_LOAD_XOR_8 || - Opcode == ISD::ATOMIC_LOAD_NAND_8 || - Opcode == ISD::ATOMIC_LOAD_MIN_8 || - Opcode == ISD::ATOMIC_LOAD_MAX_8 || - Opcode == ISD::ATOMIC_LOAD_UMIN_8 || - Opcode == ISD::ATOMIC_LOAD_UMAX_8 || - Opcode == ISD::ATOMIC_SWAP_8 || - Opcode == ISD::ATOMIC_LOAD_ADD_16 || - Opcode == ISD::ATOMIC_LOAD_SUB_16 || - Opcode == ISD::ATOMIC_LOAD_AND_16 || - Opcode == ISD::ATOMIC_LOAD_OR_16 || - Opcode == ISD::ATOMIC_LOAD_XOR_16 || - Opcode == ISD::ATOMIC_LOAD_NAND_16 || - Opcode == ISD::ATOMIC_LOAD_MIN_16 || - Opcode == ISD::ATOMIC_LOAD_MAX_16 || - Opcode == ISD::ATOMIC_LOAD_UMIN_16 || - Opcode == ISD::ATOMIC_LOAD_UMAX_16 || - Opcode == ISD::ATOMIC_SWAP_16 || - Opcode == ISD::ATOMIC_LOAD_ADD_32 || - Opcode == ISD::ATOMIC_LOAD_SUB_32 || - Opcode == ISD::ATOMIC_LOAD_AND_32 || - Opcode == ISD::ATOMIC_LOAD_OR_32 || - Opcode == ISD::ATOMIC_LOAD_XOR_32 || - Opcode == ISD::ATOMIC_LOAD_NAND_32 || - Opcode == ISD::ATOMIC_LOAD_MIN_32 || - Opcode == ISD::ATOMIC_LOAD_MAX_32 || - Opcode == ISD::ATOMIC_LOAD_UMIN_32 || - Opcode == ISD::ATOMIC_LOAD_UMAX_32 || - Opcode == ISD::ATOMIC_SWAP_32 || - Opcode == ISD::ATOMIC_LOAD_ADD_64 || - Opcode == ISD::ATOMIC_LOAD_SUB_64 || - Opcode == ISD::ATOMIC_LOAD_AND_64 || - Opcode == ISD::ATOMIC_LOAD_OR_64 || - Opcode == ISD::ATOMIC_LOAD_XOR_64 || - Opcode == ISD::ATOMIC_LOAD_NAND_64 || - Opcode == ISD::ATOMIC_LOAD_MIN_64 || - Opcode == ISD::ATOMIC_LOAD_MAX_64 || - Opcode == ISD::ATOMIC_LOAD_UMIN_64 || - Opcode == ISD::ATOMIC_LOAD_UMAX_64 || - Opcode == ISD::ATOMIC_SWAP_64) && "Invalid Atomic Op"); + assert((Opcode == ISD::ATOMIC_LOAD_ADD || + Opcode == ISD::ATOMIC_LOAD_SUB || + Opcode == ISD::ATOMIC_LOAD_AND || + Opcode == ISD::ATOMIC_LOAD_OR || + Opcode == ISD::ATOMIC_LOAD_XOR || + Opcode == ISD::ATOMIC_LOAD_NAND || + Opcode == ISD::ATOMIC_LOAD_MIN || + Opcode == ISD::ATOMIC_LOAD_MAX || + Opcode == ISD::ATOMIC_LOAD_UMIN || + Opcode == ISD::ATOMIC_LOAD_UMAX || + Opcode == ISD::ATOMIC_SWAP) && + "Invalid Atomic Op"); MVT VT = Val.getValueType(); if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = getMVTAlignment(VT); + Alignment = getMVTAlignment(MemVT); SDVTList VTs = getVTList(VT, MVT::Other); FoldingSetNodeID ID; @@ -3378,7 +3310,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); SDNode* N = NodeAllocator.Allocate(); - new (N) AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, PtrVal, Alignment); + new (N) AtomicSDNode(Opcode, VTs, MemVT, + Chain, Ptr, Val, PtrVal, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); return SDValue(N, 0); @@ -5060,54 +4993,18 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { #endif case ISD::PREFETCH: return "Prefetch"; case ISD::MEMBARRIER: return "MemBarrier"; - case ISD::ATOMIC_CMP_SWAP_8: return "AtomicCmpSwap8"; - case ISD::ATOMIC_SWAP_8: return "AtomicSwap8"; - case ISD::ATOMIC_LOAD_ADD_8: return "AtomicLoadAdd8"; - case ISD::ATOMIC_LOAD_SUB_8: return "AtomicLoadSub8"; - case ISD::ATOMIC_LOAD_AND_8: return "AtomicLoadAnd8"; - case ISD::ATOMIC_LOAD_OR_8: return "AtomicLoadOr8"; - case ISD::ATOMIC_LOAD_XOR_8: return "AtomicLoadXor8"; - case ISD::ATOMIC_LOAD_NAND_8: return "AtomicLoadNand8"; - case ISD::ATOMIC_LOAD_MIN_8: return "AtomicLoadMin8"; - case ISD::ATOMIC_LOAD_MAX_8: return "AtomicLoadMax8"; - case ISD::ATOMIC_LOAD_UMIN_8: return "AtomicLoadUMin8"; - case ISD::ATOMIC_LOAD_UMAX_8: return "AtomicLoadUMax8"; - case ISD::ATOMIC_CMP_SWAP_16: return "AtomicCmpSwap16"; - case ISD::ATOMIC_SWAP_16: return "AtomicSwap16"; - case ISD::ATOMIC_LOAD_ADD_16: return "AtomicLoadAdd16"; - case ISD::ATOMIC_LOAD_SUB_16: return "AtomicLoadSub16"; - case ISD::ATOMIC_LOAD_AND_16: return "AtomicLoadAnd16"; - case ISD::ATOMIC_LOAD_OR_16: return "AtomicLoadOr16"; - case ISD::ATOMIC_LOAD_XOR_16: return "AtomicLoadXor16"; - case ISD::ATOMIC_LOAD_NAND_16: return "AtomicLoadNand16"; - case ISD::ATOMIC_LOAD_MIN_16: return "AtomicLoadMin16"; - case ISD::ATOMIC_LOAD_MAX_16: return "AtomicLoadMax16"; - case ISD::ATOMIC_LOAD_UMIN_16: return "AtomicLoadUMin16"; - case ISD::ATOMIC_LOAD_UMAX_16: return "AtomicLoadUMax16"; - case ISD::ATOMIC_CMP_SWAP_32: return "AtomicCmpSwap32"; - case ISD::ATOMIC_SWAP_32: return "AtomicSwap32"; - case ISD::ATOMIC_LOAD_ADD_32: return "AtomicLoadAdd32"; - case ISD::ATOMIC_LOAD_SUB_32: return "AtomicLoadSub32"; - case ISD::ATOMIC_LOAD_AND_32: return "AtomicLoadAnd32"; - case ISD::ATOMIC_LOAD_OR_32: return "AtomicLoadOr32"; - case ISD::ATOMIC_LOAD_XOR_32: return "AtomicLoadXor32"; - case ISD::ATOMIC_LOAD_NAND_32: return "AtomicLoadNand32"; - case ISD::ATOMIC_LOAD_MIN_32: return "AtomicLoadMin32"; - case ISD::ATOMIC_LOAD_MAX_32: return "AtomicLoadMax32"; - case ISD::ATOMIC_LOAD_UMIN_32: return "AtomicLoadUMin32"; - case ISD::ATOMIC_LOAD_UMAX_32: return "AtomicLoadUMax32"; - case ISD::ATOMIC_CMP_SWAP_64: return "AtomicCmpSwap64"; - case ISD::ATOMIC_SWAP_64: return "AtomicSwap64"; - case ISD::ATOMIC_LOAD_ADD_64: return "AtomicLoadAdd64"; - case ISD::ATOMIC_LOAD_SUB_64: return "AtomicLoadSub64"; - case ISD::ATOMIC_LOAD_AND_64: return "AtomicLoadAnd64"; - case ISD::ATOMIC_LOAD_OR_64: return "AtomicLoadOr64"; - case ISD::ATOMIC_LOAD_XOR_64: return "AtomicLoadXor64"; - case ISD::ATOMIC_LOAD_NAND_64: return "AtomicLoadNand64"; - case ISD::ATOMIC_LOAD_MIN_64: return "AtomicLoadMin64"; - case ISD::ATOMIC_LOAD_MAX_64: return "AtomicLoadMax64"; - case ISD::ATOMIC_LOAD_UMIN_64: return "AtomicLoadUMin64"; - case ISD::ATOMIC_LOAD_UMAX_64: return "AtomicLoadUMax64"; + case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap"; + case ISD::ATOMIC_SWAP: return "AtomicSwap"; + case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd"; + case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub"; + case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd"; + case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr"; + case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor"; + case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand"; + case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin"; + case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax"; + case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin"; + case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax"; case ISD::PCMARKER: return "PCMarker"; case ISD::READCYCLECOUNTER: return "ReadCycleCounter"; case ISD::SRCVALUE: return "SrcValue"; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp index 853e0549f75..920cca4ea4d 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp @@ -2959,10 +2959,12 @@ getF32Constant(SelectionDAG &DAG, unsigned Flt) { const char * SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) { SDValue Root = getRoot(); - SDValue L = DAG.getAtomic(Op, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - I.getOperand(1)); + SDValue L = + DAG.getAtomic(Op, getValue(I.getOperand(2)).getValueType().getSimpleVT(), + Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + I.getOperand(1)); setValue(&I, L); DAG.setRoot(L.getValue(1)); return 0; @@ -4145,198 +4147,40 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::atomic_cmp_swap: { SDValue Root = getRoot(); - SDValue L; - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_8, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - I.getOperand(1)); - break; - case MVT::i16: - L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_16, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - I.getOperand(1)); - break; - case MVT::i32: - L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_32, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - I.getOperand(1)); - break; - case MVT::i64: - L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_64, Root, - getValue(I.getOperand(1)), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - I.getOperand(1)); - break; - default: - assert(0 && "Invalid atomic type"); - abort(); - } + SDValue L = + DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, + getValue(I.getOperand(2)).getValueType().getSimpleVT(), + Root, + getValue(I.getOperand(1)), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + I.getOperand(1)); setValue(&I, L); DAG.setRoot(L.getValue(1)); return 0; } case Intrinsic::atomic_load_add: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD); case Intrinsic::atomic_load_sub: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB); case Intrinsic::atomic_load_or: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR); case Intrinsic::atomic_load_xor: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR); case Intrinsic::atomic_load_and: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND); case Intrinsic::atomic_load_nand: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND); case Intrinsic::atomic_load_max: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX); case Intrinsic::atomic_load_min: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN); case Intrinsic::atomic_load_umin: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN); case Intrinsic::atomic_load_umax: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX); case Intrinsic::atomic_swap: - switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { - case MVT::i8: - return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_8); - case MVT::i16: - return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_16); - case MVT::i32: - return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_32); - case MVT::i64: - return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_64); - default: - assert(0 && "Invalid atomic type"); - abort(); - } + return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP); } } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 96ad2eb97c1..398170e4959 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -306,24 +306,24 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); // Expand certain atomics - setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB_8 , MVT::i8, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); if (!Subtarget->is64Bit()) { - setOperationAction(ISD::ATOMIC_LOAD_ADD_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_OR_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_XOR_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_NAND_64, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_SWAP_64, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); } // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion. @@ -6313,13 +6313,8 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { MVT T = Node->getValueType(0); SDValue negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Node->getOperand(2)); - return DAG.getAtomic((Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_8 ? - ISD::ATOMIC_LOAD_ADD_8 : - Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_16 ? - ISD::ATOMIC_LOAD_ADD_16 : - Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_32 ? - ISD::ATOMIC_LOAD_ADD_32 : - ISD::ATOMIC_LOAD_ADD_64), + return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, + cast(Node)->getMemoryVT(), Node->getOperand(0), Node->getOperand(1), negOp, cast(Node)->getSrcValue(), @@ -6331,14 +6326,8 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); - case ISD::ATOMIC_CMP_SWAP_8: - case ISD::ATOMIC_CMP_SWAP_16: - case ISD::ATOMIC_CMP_SWAP_32: - case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG); - case ISD::ATOMIC_LOAD_SUB_8: - case ISD::ATOMIC_LOAD_SUB_16: - case ISD::ATOMIC_LOAD_SUB_32: return LowerLOAD_SUB(Op,DAG); - case ISD::ATOMIC_LOAD_SUB_64: return LowerLOAD_SUB(Op,DAG); + case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); + case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); @@ -6445,7 +6434,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(edx.getValue(1)); return; } - case ISD::ATOMIC_CMP_SWAP_64: { + case ISD::ATOMIC_CMP_SWAP: { MVT T = N->getValueType(0); assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); SDValue cpInL, cpInH; @@ -6479,25 +6468,25 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(cpOutH.getValue(1)); return; } - case ISD::ATOMIC_LOAD_ADD_64: + case ISD::ATOMIC_LOAD_ADD: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); return; - case ISD::ATOMIC_LOAD_AND_64: + case ISD::ATOMIC_LOAD_AND: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); return; - case ISD::ATOMIC_LOAD_NAND_64: + case ISD::ATOMIC_LOAD_NAND: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); return; - case ISD::ATOMIC_LOAD_OR_64: + case ISD::ATOMIC_LOAD_OR: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); return; - case ISD::ATOMIC_LOAD_SUB_64: + case ISD::ATOMIC_LOAD_SUB: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); return; - case ISD::ATOMIC_LOAD_XOR_64: + case ISD::ATOMIC_LOAD_XOR: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); return; - case ISD::ATOMIC_SWAP_64: + case ISD::ATOMIC_SWAP: ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); return; }