Revert an earlier change to SIGN_EXTEND_INREG for vectors. The VTSDNode

really does need to be a vector type, because
TargetLowering::getOperationAction for SIGN_EXTEND_INREG uses that type,
and it needs to be able to distinguish between vectors and scalars.

Also, fix some more issues with legalization of vector casts.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@93043 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2010-01-09 02:13:55 +00:00
parent 70644e92d8
commit d199636039
9 changed files with 156 additions and 67 deletions

View File

@ -2577,10 +2577,14 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// sext_inreg.
if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
EVT EVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
if ((!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT)))
EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
if (VT.isVector())
ExtVT = EVT::getVectorVT(*DAG.getContext(),
ExtVT, VT.getVectorNumElements());
if ((!LegalOperations ||
TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT)))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
N0.getOperand(0), DAG.getValueType(EVT));
N0.getOperand(0), DAG.getValueType(ExtVT));
}
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
@ -3064,9 +3068,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// See if the value being truncated is already sign extended. If so, just
// eliminate the trunc/sext pair.
SDValue Op = N0.getOperand(0);
unsigned OpBits = Op.getValueType().getSizeInBits();
unsigned MidBits = N0.getValueType().getSizeInBits();
unsigned DestBits = VT.getSizeInBits();
unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits();
unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits();
unsigned DestBits = VT.getScalarType().getSizeInBits();
unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
if (OpBits == DestBits) {
@ -3089,12 +3093,12 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// fold (sext (truncate x)) -> (sextinreg x).
if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
N0.getValueType())) {
if (Op.getValueType().bitsLT(VT))
if (OpBits < DestBits)
Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op);
else if (Op.getValueType().bitsGT(VT))
else if (OpBits > DestBits)
Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op,
DAG.getValueType(N0.getValueType().getScalarType()));
DAG.getValueType(N0.getValueType()));
}
}
@ -3547,7 +3551,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if (VT.isVector())
return SDValue();
// Special case: SIGN_EXTEND_INREG is basically truncating to EVT then
// Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
// extended to VT.
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
@ -3621,7 +3625,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
EVT VT = N->getValueType(0);
EVT EVT = cast<VTSDNode>(N1)->getVT();
unsigned VTBits = VT.getScalarType().getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
unsigned EVTBits = EVT.getScalarType().getSizeInBits();
// fold (sext_in_reg c1) -> c1
if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)

View File

@ -2293,12 +2293,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
EVT VT = Node->getValueType(0);
EVT ShiftAmountTy = TLI.getShiftAmountTy();
if (VT.isVector()) {
if (VT.isVector())
ShiftAmountTy = VT;
VT = VT.getVectorElementType();
}
unsigned BitsDiff = VT.getSizeInBits() -
ExtraVT.getSizeInBits();
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
ExtraVT.getScalarType().getSizeInBits();
SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy);
Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
Node->getOperand(0), ShiftCst);

View File

@ -509,6 +509,7 @@ private:
void ScalarizeVectorResult(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecRes_BinOp(SDNode *N);
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_InregOp(SDNode *N);
SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N);
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
@ -550,6 +551,7 @@ private:
void SplitVectorResult(SDNode *N, unsigned OpNo);
void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi);
@ -615,6 +617,7 @@ private:
SDValue WidenVecRes_Convert(SDNode *N);
SDValue WidenVecRes_Shift(SDNode *N);
SDValue WidenVecRes_Unary(SDNode *N);
SDValue WidenVecRes_InregOp(SDNode *N);
// Widen Vector Operand.
bool WidenVectorOperand(SDNode *N, unsigned ResNo);

View File

@ -179,9 +179,12 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FFLOOR:
case ISD::SIGN_EXTEND_INREG:
QueryType = Node->getValueType(0);
break;
case ISD::SIGN_EXTEND_INREG:
case ISD::FP_ROUND_INREG:
QueryType = cast<VTSDNode>(Node->getOperand(1))->getVT();
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
QueryType = Node->getOperand(0).getValueType();

View File

@ -50,11 +50,12 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::FP_ROUND_INREG: R = ScalarizeVecRes_InregOp(N); break;
case ISD::FPOWI: R = ScalarizeVecRes_FPOWI(N); break;
case ISD::INSERT_VECTOR_ELT: R = ScalarizeVecRes_INSERT_VECTOR_ELT(N); break;
case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));break;
case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_SIGN_EXTEND_INREG(N); break;
case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_InregOp(N); break;
case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
@ -186,6 +187,14 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), DestVT, Op);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_InregOp(SDNode *N) {
EVT EltVT = N->getValueType(0).getVectorElementType();
EVT ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT().getVectorElementType();
SDValue LHS = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), EltVT,
LHS, DAG.getValueType(ExtVT));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
// If the operand is wider than the vector element type then it is implicitly
// truncated. Make that explicit here.
@ -196,13 +205,6 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
return InOp;
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SIGN_EXTEND_INREG(SDNode *N) {
EVT EltVT = N->getValueType(0).getVectorElementType();
SDValue LHS = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), EltVT,
LHS, N->getOperand(1));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(1));
return DAG.getNode(ISD::SELECT, N->getDebugLoc(),
@ -406,10 +408,11 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
case ISD::CONVERT_RNDSAT: SplitVecRes_CONVERT_RNDSAT(N, Lo, Hi); break;
case ISD::EXTRACT_SUBVECTOR: SplitVecRes_EXTRACT_SUBVECTOR(N, Lo, Hi); break;
case ISD::FP_ROUND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi); break;
case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::SCALAR_TO_VECTOR: SplitVecRes_SCALAR_TO_VECTOR(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: SplitVecRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: SplitVecRes_InregOp(N, Lo, Hi); break;
case ISD::LOAD:
SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
break;
@ -654,6 +657,21 @@ void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(ISD::FPOWI, dl, Hi.getValueType(), Hi, N->getOperand(1));
}
void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LHSLo, LHSHi;
GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
DebugLoc dl = N->getDebugLoc();
EVT LoVT, HiVT;
GetSplitDestVTs(cast<VTSDNode>(N->getOperand(1))->getVT(), LoVT, HiVT);
Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo,
DAG.getValueType(LoVT));
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi,
DAG.getValueType(HiVT));
}
void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
@ -709,18 +727,6 @@ void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
Hi = DAG.getUNDEF(HiVT);
}
void DAGTypeLegalizer::SplitVecRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LHSLo, LHSHi;
GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
DebugLoc dl = N->getDebugLoc();
Lo = DAG.getNode(N->getOpcode(), dl, LHSLo.getValueType(), LHSLo,
N->getOperand(1));
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi,
N->getOperand(1));
}
void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
SDValue &Hi) {
assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
@ -1159,10 +1165,11 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR(N); break;
case ISD::FP_ROUND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT(N); break;
case ISD::LOAD: Res = WidenVecRes_LOAD(N); break;
case ISD::SCALAR_TO_VECTOR: Res = WidenVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_SIGN_EXTEND_INREG(N); break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
@ -1331,6 +1338,17 @@ SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) {
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), WidenVT, InOp);
}
SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
cast<VTSDNode>(N->getOperand(1))->getVT()
.getVectorElementType(),
WidenVT.getVectorNumElements());
SDValue WidenLHS = GetWidenedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), N->getDebugLoc(),
WidenVT, WidenLHS, DAG.getValueType(ExtVT));
}
SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType();
@ -1713,13 +1731,6 @@ SDValue DAGTypeLegalizer::WidenVecRes_SCALAR_TO_VECTOR(SDNode *N) {
WidenVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::WidenVecRes_SIGN_EXTEND_INREG(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue WidenLHS = GetWidenedVector(N->getOperand(0));
return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(),
WidenVT, WidenLHS, N->getOperand(1));
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();

View File

@ -1741,7 +1741,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
case ISD::SIGN_EXTEND_INREG: {
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
unsigned EBits = EVT.getSizeInBits();
unsigned EBits = EVT.getScalarType().getSizeInBits();
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
@ -1786,7 +1786,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (ISD::isZEXTLoad(Op.getNode())) {
LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT VT = LD->getMemoryVT();
unsigned MemBits = VT.getSizeInBits();
unsigned MemBits = VT.getScalarType().getSizeInBits();
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
}
return;
@ -2025,7 +2025,8 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
case ISD::SIGN_EXTEND_INREG:
// Max of the input and what this extends.
Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
Tmp =
cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
Tmp = VTBits-Tmp+1;
Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
@ -2169,10 +2170,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
switch (ExtType) {
default: break;
case ISD::SEXTLOAD: // '17' bits known
Tmp = LD->getMemoryVT().getSizeInBits();
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
return VTBits-Tmp+1;
case ISD::ZEXTLOAD: // '16' bits known
Tmp = LD->getMemoryVT().getSizeInBits();
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
return VTBits-Tmp;
}
}
@ -2664,6 +2665,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
assert(VT == N1.getValueType() && "Not an inreg round!");
assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
"Cannot FP_ROUND_INREG integer types");
assert(EVT.isVector() == VT.isVector() &&
"FP_ROUND_INREG type should be vector iff the operand "
"type is vector!");
assert((!EVT.isVector() ||
EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
"Vector element counts must match in FP_ROUND_INREG");
assert(EVT.bitsLE(VT) && "Not rounding down!");
if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
break;
@ -2693,15 +2700,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
assert(VT == N1.getValueType() && "Not an inreg extend!");
assert(VT.isInteger() && EVT.isInteger() &&
"Cannot *_EXTEND_INREG FP types");
assert(!EVT.isVector() &&
"SIGN_EXTEND_INREG type should be the vector element type rather "
"than the vector type!");
assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
assert(EVT.isVector() == VT.isVector() &&
"SIGN_EXTEND_INREG type should be vector iff the operand "
"type is vector!");
assert((!EVT.isVector() ||
EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
"Vector element counts must match in SIGN_EXTEND_INREG");
assert(EVT.bitsLE(VT) && "Not extending!");
if (EVT == VT) return N1; // Not actually extending
if (N1C) {
APInt Val = N1C->getAPIntValue();
unsigned FromBits = EVT.getSizeInBits();
unsigned FromBits = EVT.getScalarType().getSizeInBits();
Val <<= Val.getBitWidth()-FromBits;
Val = Val.ashr(Val.getBitWidth()-FromBits);
return getConstant(Val, VT);
@ -4109,7 +4119,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
// If the and is only masking out bits that cannot effect the shift,
// eliminate the and.
unsigned NumBits = VT.getSizeInBits()*2;
unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
}
@ -5946,6 +5956,13 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
getShiftAmountOperand(Operands[1])));
break;
case ISD::SIGN_EXTEND_INREG:
case ISD::FP_ROUND_INREG: {
EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
Operands[0],
getValueType(ExtVT)));
}
}
}

View File

@ -1272,19 +1272,21 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits = APInt::getHighBitsSet(BitWidth,
BitWidth - EVT.getSizeInBits()) &
NewMask;
APInt NewBits =
APInt::getHighBitsSet(BitWidth,
BitWidth - EVT.getScalarType().getSizeInBits()) &
NewMask;
// If none of the extended bits are demanded, eliminate the sextinreg.
if (NewBits == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits());
APInt InSignBit = APInt::getSignBit(EVT.getScalarType().getSizeInBits());
InSignBit.zext(BitWidth);
APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth,
EVT.getSizeInBits()) &
NewMask;
APInt InputDemandedBits =
APInt::getLowBitsSet(BitWidth,
EVT.getScalarType().getSizeInBits()) &
NewMask;
// Since the sign extended bits are demanded, we know that the sign
// bit is demanded.
@ -1313,7 +1315,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::ZERO_EXTEND: {
unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
APInt InMask = NewMask;
InMask.trunc(OperandBitWidth);
@ -1336,7 +1339,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
case ISD::SIGN_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
APInt NewBits = ~InMask & NewMask;
@ -1376,7 +1379,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::ANY_EXTEND: {
unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
unsigned OperandBitWidth =
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
APInt InMask = NewMask;
InMask.trunc(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), InMask,

View File

@ -0,0 +1,48 @@
; RUN: llc < %s -march=x86-64
; RUN: llc < %s -march=x86-64 -disable-mmx
define <8 x i32> @a(<8 x i16> %a) nounwind {
%c = sext <8 x i16> %a to <8 x i32>
ret <8 x i32> %c
}
define <3 x i32> @b(<3 x i16> %a) nounwind {
%c = sext <3 x i16> %a to <3 x i32>
ret <3 x i32> %c
}
define <1 x i32> @c(<1 x i16> %a) nounwind {
%c = sext <1 x i16> %a to <1 x i32>
ret <1 x i32> %c
}
define <8 x i32> @d(<8 x i16> %a) nounwind {
%c = zext <8 x i16> %a to <8 x i32>
ret <8 x i32> %c
}
define <3 x i32> @e(<3 x i16> %a) nounwind {
%c = zext <3 x i16> %a to <3 x i32>
ret <3 x i32> %c
}
define <1 x i32> @f(<1 x i16> %a) nounwind {
%c = zext <1 x i16> %a to <1 x i32>
ret <1 x i32> %c
}
; TODO: Legalize doesn't yet handle this.
;define <8 x i16> @g(<8 x i32> %a) nounwind {
; %c = trunc <8 x i32> %a to <8 x i16>
; ret <8 x i16> %c
;}
define <3 x i16> @h(<3 x i32> %a) nounwind {
%c = trunc <3 x i32> %a to <3 x i16>
ret <3 x i16> %c
}
define <1 x i16> @i(<1 x i32> %a) nounwind {
%c = trunc <1 x i32> %a to <1 x i16>
ret <1 x i16> %c
}

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64
; RUN: llc < %s -march=x86-64 -disable-mmx
define <8 x i32> @a(<8 x i32> %a) nounwind {
%b = trunc <8 x i32> %a to <8 x i16>