diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index c71b3c0cad1..1bd1e00fcd0 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1424,6 +1424,20 @@ SDValue DAGCombiner::visitADD(SDNode *N) { N0.getOperand(0).getOperand(1), N0.getOperand(1))); + if (N1.getOpcode() == ISD::AND) { + SDValue AndOp0 = N1.getOperand(0); + ConstantSDNode *AndOp1 = dyn_cast(N1->getOperand(1)); + unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); + unsigned DestBits = VT.getScalarType().getSizeInBits(); + + // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) + // and similar xforms where the inner op is either ~0 or 0. + if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) { + DebugLoc DL = N->getDebugLoc(); + return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); + } + } + return SDValue(); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ad9ef77a9db..ab57353ba1e 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1021,7 +1021,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::OR); setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::ADD); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); @@ -9980,6 +9979,16 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, } } +unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, + unsigned Depth) const { + // SETCC_CARRY sets the dest to ~0 for true or 0 for false. + if (Op.getOpcode() == X86ISD::SETCC_CARRY) + return Op.getValueType().getScalarType().getSizeInBits(); + + // Fallback case. + return 1; +} + /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the /// node is a GlobalAddress + offset. bool X86TargetLowering::isGAPlusOffset(SDNode *N, @@ -10454,27 +10463,6 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -/// PerformAddCombine - Optimize ADD when combined with X86 opcodes. -static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI) { - if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) - return SDValue(); - - EVT VT = N->getValueType(0); - SDValue Op1 = N->getOperand(1); - if (Op1->getOpcode() == ISD::AND) { - SDValue AndOp0 = Op1->getOperand(0); - ConstantSDNode *AndOp1 = dyn_cast(Op1->getOperand(1)); - // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) - if (AndOp0->getOpcode() == X86ISD::SETCC_CARRY && - AndOp1 && AndOp1->getZExtValue() == 1) { - DebugLoc DL = N->getDebugLoc(); - return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); - } - } - - return SDValue(); -} /// PerformMulCombine - Optimize a single multiply with constant into two /// in order to implement it with two cheaper instructions, e.g. @@ -10961,7 +10949,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); - case ISD::ADD: return PerformAddCombine(N, DAG, DCI); case ISD::MUL: return PerformMulCombine(N, DAG, DCI); case ISD::SHL: case ISD::SRA: diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 6e6f25c93a9..9bb5f64b56d 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -523,6 +523,11 @@ namespace llvm { const SelectionDAG &DAG, unsigned Depth = 0) const; + // ComputeNumSignBitsForTargetNode - Determine the number of bits in the + // operation that are sign bits. + virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + unsigned Depth) const; + virtual bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;