Add a fold for add that exchanges it with a constant shift if possible, so

that the shift may be more easily folded into other operations.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@26286 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nate Begeman
2006-02-18 02:43:25 +00:00
parent db41024a85
commit 003a272319

View File

@@ -459,6 +459,24 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask,
CountTrailingZeros_64(~KnownZero2)); CountTrailingZeros_64(~KnownZero2));
KnownZero = (1ULL << KnownZeroOut) - 1; KnownZero = (1ULL << KnownZeroOut) - 1;
KnownOne = 0; KnownOne = 0;
SDOperand SH = Op.getOperand(0);
// fold (add (shl x, c1), (shl c2, c1)) -> (shl (add x, c2), c1)
if (KnownZero && SH.getOpcode() == ISD::SHL && SH.Val->hasOneUse() &&
Op.Val->hasOneUse()) {
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(SH.getOperand(1))) {
MVT::ValueType VT = Op.getValueType();
unsigned ShiftAmt = SA->getValue();
uint64_t AddAmt = AA->getValue();
uint64_t AddShr = AddAmt >> ShiftAmt;
if (AddAmt == (AddShr << ShiftAmt)) {
SDOperand ADD = TLO.DAG.getNode(ISD::ADD, VT, SH.getOperand(0),
TLO.DAG.getConstant(AddShr, VT));
SDOperand SHL = TLO.DAG.getNode(ISD::SHL, VT, ADD,SH.getOperand(1));
return TLO.CombineTo(Op, SHL);
}
}
}
} }
break; break;
case ISD::CTTZ: case ISD::CTTZ:
@@ -577,7 +595,7 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
KnownOne <<= SA->getValue(); KnownOne <<= SA->getValue();
KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
} }
break; return;
case ISD::SRL: case ISD::SRL:
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
@@ -585,12 +603,12 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue();
Mask <<= SA->getValue(); Mask <<= SA->getValue();
ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero >>= SA->getValue(); KnownZero >>= SA->getValue();
KnownOne >>= SA->getValue(); KnownOne >>= SA->getValue();
KnownZero |= HighBits; // high bits known zero. KnownZero |= HighBits; // high bits known zero.
} }
break; return;
case ISD::SRA: case ISD::SRA:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
uint64_t HighBits = (1ULL << SA->getValue())-1; uint64_t HighBits = (1ULL << SA->getValue())-1;
@@ -611,7 +629,7 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
KnownOne |= HighBits; KnownOne |= HighBits;
} }
} }
break; return;
case ISD::CTTZ: case ISD::CTTZ:
case ISD::CTLZ: case ISD::CTLZ:
case ISD::CTPOP: { case ISD::CTPOP: {
@@ -666,12 +684,12 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
// We know that the top bits of C-X are clear if X contains less bits // We know that the top bits of C-X are clear if X contains less bits
// than C (i.e. no wrap-around can happen). For example, 20-X is // than C (i.e. no wrap-around can happen). For example, 20-X is
// positive if we can prove that X is >= 0 and < 16. // positive if we can prove that X is >= 0 and < 16.
break; return;
default: default:
// Allow the target to implement this method for its nodes. // Allow the target to implement this method for its nodes.
if (Op.getOpcode() >= ISD::BUILTIN_OP_END) if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne); computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne);
break; return;
} }
} }