1
0
mirror of https://github.com/c64scene-ar/llvm-6502.git synced 2025-03-27 14:32:21 +00:00

Introduce the BuildVectorSDNode class that encapsulates the ISD::BUILD_VECTOR

instruction. The class also consolidates the code for detecting constant
splats that's shared across PowerPC and the CellSPU backends (and might be
useful for other backends.) Also introduces SelectionDAG::getBUID_VECTOR() for
generating new BUILD_VECTOR nodes.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@65296 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Scott Michel 2009-02-22 23:36:09 +00:00
parent ca4286295f
commit 4214a5531c
14 changed files with 397 additions and 318 deletions

@ -403,6 +403,14 @@ public:
(unsigned)Ops.size() - (InFlag.getNode() == 0 ? 1 : 0));
}
/// getBUILD_VECTOR - Return a new BUILD_VECTOR node
SDValue getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1);
SDValue getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1, SDValue E2);
SDValue getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1, SDValue E2,
SDValue E3, SDValue E4);
SDValue getBUILD_VECTOR(MVT vecVT, DebugLoc dl, const SDValue *Elts,
unsigned NumElts);
/// getUNDEF - Return an UNDEF node. UNDEF does not have a useful DebugLoc.
SDValue getUNDEF(MVT VT) {
return getNode(ISD::UNDEF, DebugLoc::getUnknownLoc(), VT);

@ -1929,6 +1929,73 @@ public:
}
};
/// BuildVectorSDNode - A container for ISD::BUILD_VECTOR. This is used to
/// encapsulate common BUILD_VECTOR code and operations such as constant splat
/// testing.
class BuildVectorSDNode : public SDNode {
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// Constant splat state:
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
//! We've computed the splat already?
bool computedSplat;
//! It is a splat?
bool isSplatVector;
//! Splat has undefined bits in it
bool hasUndefSplatBitsFlag;
//! The splat value
uint64_t SplatBits;
//! The undefined part of the splat
uint64_t SplatUndef;
//! The splat's size (1, 2, 4 or 8 bytes)
unsigned SplatSize;
protected:
friend class SelectionDAG;
//! Arbitrary element ISD::BUILD_VECTOR constructor
explicit BuildVectorSDNode(MVT vecVT, DebugLoc dl, const SDValue *Elts,
unsigned NumElts);
public:
//! Constant splat predicate.
/*!
Determine if this ISD::BUILD_VECTOR is a constant splat. The results are
cached to prevent recomputation.
@param MinSplatBits: minimum number of bits in the constant splat, defaults
to 0 for 'don't care', but normally one of [8, 16, 32, 64].
@return true if the splat has the required minimum number of bits and the
splat really is a constant splat (accounting for undef bits).
*/
bool isConstantSplat(int MinSplatBits = 0);
//! Get the splatbits
uint64_t getSplatBits() const {
assert(computedSplat && "BuildVectorSDNode: compute splat bits first!");
return SplatBits;
}
uint64_t getSplatUndef() const {
assert(computedSplat && "BuildVectorSDNode: compute splat bits first!");
return SplatUndef;
}
unsigned getSplatSize() const {
assert(computedSplat && "BuildVectorSDNode: compute splat bits first!");
return SplatSize;
}
bool hasAnyUndefBits() const {
assert(computedSplat && "BuildVectorSDNode: compute splat bits first!");
return hasUndefSplatBitsFlag;
}
static bool classof(const BuildVectorSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::BUILD_VECTOR;
}
};
/// SrcValueSDNode - An SDNode that holds an arbitrary LLVM IR Value. This is
/// used when the SelectionDAG needs to make a simple reference to something
/// in the LLVM IR representation.

@ -2386,8 +2386,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
// Produce a vector of zeros.
SDValue El = DAG.getConstant(0, VT.getVectorElementType());
std::vector<SDValue> Ops(VT.getVectorNumElements(), El);
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(VT, N->getDebugLoc(), &Ops[0], Ops.size());
}
}
@ -3858,8 +3857,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
}
MVT VT = MVT::getVectorVT(DstEltVT,
BV->getValueType(0).getVectorNumElements());
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(VT, BV->getDebugLoc(), &Ops[0], Ops.size());
}
// Otherwise, we're growing or shrinking the elements. To avoid having to
@ -3915,8 +3913,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
}
MVT VT = MVT::getVectorVT(DstEltVT, Ops.size());
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(VT, BV->getDebugLoc(), &Ops[0], Ops.size());
}
// Finally, this must be the case where we are shrinking elements: each input
@ -3950,8 +3947,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(VT, BV->getDebugLoc(), &Ops[0], Ops.size());
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
@ -5084,8 +5080,8 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
InVec.getNode()->op_end());
if (Elt < Ops.size())
Ops[Elt] = InVal;
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
InVec.getValueType(), &Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(InVec.getValueType(), N->getDebugLoc(),
&Ops[0], Ops.size());
}
return SDValue();
@ -5270,13 +5266,13 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// Use an undef build_vector as input for the second operand.
std::vector<SDValue> UnOps(NumInScalars,
DAG.getUNDEF(EltType));
Ops[1] = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
&UnOps[0], UnOps.size());
Ops[1] = DAG.getBUILD_VECTOR(VT, N->getDebugLoc(),
&UnOps[0], UnOps.size());
AddToWorkList(Ops[1].getNode());
}
Ops[2] = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), BuildVecVT,
&BuildVecIndices[0], BuildVecIndices.size());
Ops[2] = DAG.getBUILD_VECTOR(BuildVecVT, N->getDebugLoc(),
&BuildVecIndices[0], BuildVecIndices.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(), VT, Ops, 3);
}
@ -5419,9 +5415,8 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
}
}
ShufMask = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
ShufMask.getValueType(),
&MappedOps[0], MappedOps.size());
ShufMask = DAG.getBUILD_VECTOR(ShufMask.getValueType(), N->getDebugLoc(),
&MappedOps[0], MappedOps.size());
AddToWorkList(ShufMask.getNode());
return DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(),
N->getValueType(0), N0,
@ -5471,10 +5466,10 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
Ops.push_back(LHS);
AddToWorkList(LHS.getNode());
std::vector<SDValue> ZeroOps(NumElts, DAG.getConstant(0, EVT));
Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
VT, &ZeroOps[0], ZeroOps.size()));
Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
MaskVT, &IdxOps[0], IdxOps.size()));
Ops.push_back(DAG.getBUILD_VECTOR(VT, N->getDebugLoc(),
&ZeroOps[0], ZeroOps.size()));
Ops.push_back(DAG.getBUILD_VECTOR(MaskVT, N->getDebugLoc(),
&IdxOps[0], IdxOps.size()));
SDValue Result = DAG.getNode(ISD::VECTOR_SHUFFLE, N->getDebugLoc(),
VT, &Ops[0], Ops.size());
@ -5543,8 +5538,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
if (Ops.size() == LHS.getNumOperands()) {
MVT VT = LHS.getValueType();
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(VT, N->getDebugLoc(), &Ops[0], Ops.size());
}
}

@ -353,8 +353,7 @@ SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDValue Mask) const {
}
}
}
Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getDebugLoc(),
NVT, &Ops[0], Ops.size());
Mask = DAG.getBUILD_VECTOR(NVT, Mask.getDebugLoc(), &Ops[0], Ops.size());
}
VT = NVT;
break;
@ -931,7 +930,7 @@ SDValue SelectionDAGLegalize::UnrollVectorOp(SDValue Op) {
}
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Scalars[0], Scalars.size());
return DAG.getBUILD_VECTOR(VT, dl, &Scalars[0], Scalars.size());
}
/// GetFPLibCall - Return the right libcall for the given floating point type.
@ -1290,7 +1289,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
unsigned Line = DSP->getLine();
unsigned Col = DSP->getColumn();
const Function *F = DAG.getMachineFunction().getFunction();
if (!F->hasFnAttr(Attribute::OptimizeForSize)) {
@ -1676,8 +1674,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
else
ShufOps.push_back(DAG.getConstant(NumElts, ShufMaskEltVT));
}
SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, ShufMaskVT,
&ShufOps[0], ShufOps.size());
SDValue ShufMask = DAG.getBUILD_VECTOR(ShufMaskVT, dl,
&ShufOps[0], ShufOps.size());
Result = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Tmp1.getValueType(),
Tmp1, ScVec, ShufMask);
@ -1756,7 +1754,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
DAG.getConstant(Idx - NumElems, PtrVT)));
}
}
Result = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
Result = DAG.getBUILD_VECTOR(VT, dl, &Ops[0], Ops.size());
break;
}
case TargetLowering::Promote: {
@ -1808,8 +1806,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
DAG.getConstant(j, PtrVT)));
}
}
return LegalizeOp(DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
&Ops[0], Ops.size()));
return LegalizeOp(DAG.getBUILD_VECTOR(Node->getValueType(0), dl,
&Ops[0], Ops.size()));
}
case ISD::CALLSEQ_START: {
@ -3162,7 +3160,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
APInt::getAllOnesValue(EltVT.getSizeInBits()),
EltVT), DAG.getConstant(0, EltVT));
}
Result = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElems);
Result = DAG.getBUILD_VECTOR(VT, dl, &Ops[0], NumElems);
break;
}
}
@ -5557,8 +5555,8 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
SDValue Zero = DAG.getConstant(0, MaskVT.getVectorElementType());
std::vector<SDValue> ZeroVec(NumElems, Zero);
SDValue SplatMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&ZeroVec[0], ZeroVec.size());
SDValue SplatMask = DAG.getBUILD_VECTOR(MaskVT, dl,
&ZeroVec[0], ZeroVec.size());
// If the target supports VECTOR_SHUFFLE and this shuffle mask, use it.
if (isShuffleLegal(Node->getValueType(0), SplatMask)) {
@ -5610,8 +5608,8 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
else
MaskVec[Val2Elts[i]] = DAG.getUNDEF(MaskEltVT);
SDValue ShuffleMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
SDValue ShuffleMask = DAG.getBUILD_VECTOR(MaskVT, dl,
&MaskVec[0], MaskVec.size());
// If the target supports SCALAR_TO_VECTOR and this shuffle mask, use it.
if (TLI.isOperationLegalOrCustom(ISD::SCALAR_TO_VECTOR,
@ -5957,7 +5955,7 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDValue Source, DebugLoc dl) {
SDValue Scalar = ScalarizeVectorOp(Source);
SDValue Result = LegalizeINT_TO_FP(SDValue(), isSigned,
DestEltTy, Scalar, dl);
return DAG.getNode(ISD::BUILD_VECTOR, dl, DestTy, Result);
return DAG.getBUILD_VECTOR(DestTy, dl, Result);
}
SDValue Lo, Hi;
SplitVectorOp(Source, Lo, Hi);
@ -7572,7 +7570,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDValue Op, SDValue &Lo,
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewEltVT, InVec,
DAG.getConstant(Idx, PtrVT)));
}
Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT_Lo, &Ops[0], Ops.size());
Lo = DAG.getBUILD_VECTOR(NewVT_Lo, dl, &Ops[0], Ops.size());
Ops.clear();
for (unsigned i = NewNumElts_Lo; i != NumElements; ++i) {
@ -7590,17 +7588,17 @@ void SelectionDAGLegalize::SplitVectorOp(SDValue Op, SDValue &Lo,
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewEltVT, InVec,
DAG.getConstant(Idx, PtrVT)));
}
Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT_Hi, &Ops[0], Ops.size());
Hi = DAG.getBUILD_VECTOR(NewVT_Hi, dl, &Ops[0], Ops.size());
break;
}
case ISD::BUILD_VECTOR: {
SmallVector<SDValue, 8> LoOps(Node->op_begin(),
Node->op_begin()+NewNumElts_Lo);
Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT_Lo, &LoOps[0], LoOps.size());
Lo = DAG.getBUILD_VECTOR(NewVT_Lo, dl, &LoOps[0], LoOps.size());
SmallVector<SDValue, 8> HiOps(Node->op_begin()+NewNumElts_Lo,
Node->op_end());
Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT_Hi, &HiOps[0], HiOps.size());
Hi = DAG.getBUILD_VECTOR(NewVT_Hi, dl, &HiOps[0], HiOps.size());
break;
}
case ISD::CONCAT_VECTORS: {
@ -8066,8 +8064,7 @@ SDValue SelectionDAGLegalize::WidenVectorOp(SDValue Op, MVT WidenVT) {
for (unsigned i = NumElts; i < NewNumElts; ++i) {
NewOps.push_back(DAG.getUNDEF(EVT));
}
Result = DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT,
&NewOps[0], NewOps.size());
Result = DAG.getBUILD_VECTOR(WidenVT, dl, &NewOps[0], NewOps.size());
break;
}
case ISD::INSERT_VECTOR_ELT: {
@ -8104,9 +8101,8 @@ SDValue SelectionDAGLegalize::WidenVectorOp(SDValue Op, MVT WidenVT) {
NewOps.push_back(DAG.getUNDEF(PVT));
}
SDValue Tmp3 = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(PVT, NewOps.size()),
&NewOps[0], NewOps.size());
SDValue Tmp3 = DAG.getBUILD_VECTOR(MVT::getVectorVT(PVT, NewOps.size()), dl,
&NewOps[0], NewOps.size());
Result = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, WidenVT, Tmp1, Tmp2, Tmp3);
break;
@ -8152,7 +8148,7 @@ SDValue SelectionDAGLegalize::WidenVectorOp(SDValue Op, MVT WidenVT) {
Ops[i] = UndefVal;
MVT NewInVT = MVT::getVectorVT(InVT, NewNumElts);
Result = DAG.getNode(ISD::BUILD_VECTOR, dl, NewInVT, &Ops[0], NewNumElts);
Result = DAG.getBUILD_VECTOR(NewInVT, dl, &Ops[0], NewNumElts);
Result = DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, Result);
}
break;

@ -810,9 +810,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
NewElts.push_back(JoinIntegers(Lo, Hi));
}
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(NewVT, NewElts.size()),
&NewElts[0], NewElts.size());
SDValue NewVec = DAG.getBUILD_VECTOR(MVT::getVectorVT(NewVT, NewElts.size()),
dl, &NewElts[0], NewElts.size());
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);

@ -246,7 +246,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
if (TLI.isBigEndian())
std::swap(Parts[0], Parts[1]);
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Parts, 2);
SDValue Vec = DAG.getBUILD_VECTOR(NVT, dl, Parts, 2);
return DAG.getNode(ISD::BIT_CONVERT, dl, N->getValueType(0), Vec);
}
}
@ -277,9 +277,8 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
NewElts.push_back(Hi);
}
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(NewVT, NewElts.size()),
&NewElts[0], NewElts.size());
SDValue NewVec = DAG.getBUILD_VECTOR(MVT::getVectorVT(NewVT, NewElts.size()),
dl, &NewElts[0], NewElts.size());
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
@ -335,7 +334,7 @@ SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(Ops[0].getValueType());
for (unsigned i = 1; i < NumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
return DAG.getBUILD_VECTOR(VT, dl, &Ops[0], NumElts);
}
SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {

@ -309,8 +309,8 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) {
SmallVector<SDValue, 8> Ops(N->getNumOperands());
for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
Ops[i] = GetScalarizedVector(N->getOperand(i));
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), N->getValueType(0),
&Ops[0], Ops.size());
return DAG.getBUILD_VECTOR(N->getValueType(0), N->getDebugLoc(),
&Ops[0], Ops.size());
}
/// ScalarizeVecOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to
@ -501,10 +501,10 @@ void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
unsigned LoNumElts = LoVT.getVectorNumElements();
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
Lo = DAG.getNode(ISD::BUILD_VECTOR, dl, LoVT, &LoOps[0], LoOps.size());
Lo = DAG.getBUILD_VECTOR(LoVT, dl, &LoOps[0], LoOps.size());
SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
Hi = DAG.getNode(ISD::BUILD_VECTOR, dl, HiVT, &HiOps[0], HiOps.size());
Hi = DAG.getBUILD_VECTOR(HiVT, dl, &HiOps[0], HiOps.size());
}
void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
@ -805,15 +805,14 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDValue &Lo,
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
Output = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, &Ops[0], Ops.size());
Output = DAG.getBUILD_VECTOR(NewVT, dl, &Ops[0], Ops.size());
} else if (InputUsed[0] == -1U) {
// No input vectors were used! The result is undefined.
Output = DAG.getUNDEF(NewVT);
} else {
// At least one input vector was used. Create a new shuffle vector.
SDValue NewMask = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(IdxVT, Ops.size()),
&Ops[0], Ops.size());
SDValue NewMask = DAG.getBUILD_VECTOR(MVT::getVectorVT(IdxVT, Ops.size()),
dl, &Ops[0], Ops.size());
SDValue Op0 = Inputs[InputUsed[0]];
// If only one input was used, use an undefined vector for the other.
SDValue Op1 = InputUsed[1] == -1U ?
@ -1080,8 +1079,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo) {
}
return DAG.UpdateNodeOperands(SDValue(N,0),
N->getOperand(0), N->getOperand(1),
DAG.getNode(ISD::BUILD_VECTOR, dl,
VecVT, &Ops[0], Ops.size()));
DAG.getBUILD_VECTOR(VecVT, dl,
&Ops[0], Ops.size()));
}
// Continuing is pointless - failure is certain.
@ -1246,7 +1245,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
return DAG.getBUILD_VECTOR(WidenVT, dl, &Ops[0], WidenNumElts);
}
SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
@ -1344,8 +1343,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl,
NewInVT, &Ops[0], NewNumElts);
else
NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
NewInVT, &Ops[0], NewNumElts);
NewVec = DAG.getBUILD_VECTOR(NewInVT, dl, &Ops[0], NewNumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, NewVec);
}
}
@ -1379,7 +1377,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
for (unsigned i = NumElts; i < WidenNumElts; ++i)
NewOps.push_back(DAG.getUNDEF(EltVT));
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &NewOps[0], NewOps.size());
return DAG.getBUILD_VECTOR(WidenVT, dl, &NewOps[0], NewOps.size());
}
SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
@ -1425,8 +1423,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
MaskOps[i] = DAG.getConstant(i, PtrVT);
MaskOps[i+WidenNumElts/2] = DAG.getConstant(i+WidenNumElts, PtrVT);
}
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(PtrVT, WidenNumElts),
SDValue Mask =
DAG.getBUILD_VECTOR(MVT::getVectorVT(PtrVT, WidenNumElts), dl,
&MaskOps[0], WidenNumElts);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, WidenVT,
GetWidenedVector(N->getOperand(0)),
@ -1451,7 +1449,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
return DAG.getBUILD_VECTOR(WidenVT, dl, &Ops[0], WidenNumElts);
}
SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
@ -1529,7 +1527,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
return DAG.getBUILD_VECTOR(WidenVT, dl, &Ops[0], WidenNumElts);
}
SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
@ -1582,7 +1580,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
Ops[i] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
return DAG.getBUILD_VECTOR(WidenVT, dl, &Ops[0], WidenNumElts);
}
SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
@ -1639,7 +1637,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
for (; i != WidenNumElts; ++i)
Ops[i] = UndefVal;
Result = DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], Ops.size());
Result = DAG.getBUILD_VECTOR(WidenVT, dl, &Ops[0], Ops.size());
} else {
assert(LdVT.getVectorElementType() == WidenVT.getVectorElementType());
unsigned int LdWidth = LdVT.getSizeInBits();
@ -1735,9 +1733,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(SDNode *N) {
}
for (unsigned i = NumElts; i < WidenNumElts; ++i)
MaskOps[i] = DAG.getUNDEF(IdxVT);
SDValue NewMask = DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(IdxVT, WidenNumElts),
&MaskOps[0], WidenNumElts);
SDValue NewMask = DAG.getBUILD_VECTOR(MVT::getVectorVT(IdxVT, WidenNumElts),
dl, &MaskOps[0], WidenNumElts);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, WidenVT, InOp1, InOp2, NewMask);
}
@ -1830,7 +1827,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
DAG.getIntPtrConstant(i)));
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
return DAG.getBUILD_VECTOR(VT, dl, &Ops[0], NumElts);
}
SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
@ -1889,7 +1886,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
DAG.getIntPtrConstant(j));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
return DAG.getBUILD_VECTOR(VT, dl, &Ops[0], NumElts);
}
SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
@ -2179,5 +2176,5 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, MVT NVT) {
SDValue UndefVal = DAG.getUNDEF(EltVT);
for ( ; Idx < WidenNumElts; ++Idx)
Ops[Idx] = UndefVal;
return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &Ops[0], WidenNumElts);
return DAG.getBUILD_VECTOR(NVT, dl, &Ops[0], WidenNumElts);
}

@ -847,7 +847,7 @@ SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, MVT VT) {
SDValue NegOneElt =
getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), EltVT);
std::vector<SDValue> NegOnes(VT.getVectorNumElements(), NegOneElt);
NegOne = getNode(ISD::BUILD_VECTOR, DL, VT, &NegOnes[0], NegOnes.size());
NegOne = getBUILD_VECTOR(VT, DL, &NegOnes[0], NegOnes.size());
} else {
NegOne = getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
}
@ -893,8 +893,8 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, MVT VT, bool isT) {
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
VT, &Ops[0], Ops.size());
Result = getBUILD_VECTOR(VT, DebugLoc::getUnknownLoc(),
&Ops[0], Ops.size());
}
return Result;
}
@ -937,9 +937,8 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, MVT VT, bool isTarget){
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
// FIXME DebugLoc info might be appropriate here
Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
VT, &Ops[0], Ops.size());
Result = getBUILD_VECTOR(VT, DebugLoc::getUnknownLoc(),
&Ops[0], Ops.size());
}
return Result;
}
@ -1078,6 +1077,39 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
return SDValue(N, 0);
}
SDValue SelectionDAG::getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1) {
return getBUILD_VECTOR(vecVT, dl, &E1, 1);
}
SDValue SelectionDAG::getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1,
SDValue E2) {
SDValue Ops[2] = { E1, E2 };
return getBUILD_VECTOR(vecVT, dl, &Ops[0], 2);
}
SDValue SelectionDAG::getBUILD_VECTOR(MVT vecVT, DebugLoc dl, SDValue E1,
SDValue E2, SDValue E3, SDValue E4) {
SDValue Ops[4] = { E1, E2, E3, E4 };
return getBUILD_VECTOR(vecVT, dl, &Ops[0], 4);
}
SDValue SelectionDAG::getBUILD_VECTOR(MVT vecVT, DebugLoc dl,
const SDValue *Elts, unsigned NumElts) {
FoldingSetNodeID ID;
void *IP = 0;
SDNode *N = 0;
AddNodeIDNode(ID, ISD::BUILD_VECTOR, getVTList(vecVT), Elts, NumElts);
if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)) == 0) {
N = NodeAllocator.Allocate<BuildVectorSDNode>();
new (N) BuildVectorSDNode(vecVT, dl, Elts, NumElts);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
}
return SDValue(N, 0);
}
SDValue SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0);
@ -2409,7 +2441,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
N2.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
return getBUILD_VECTOR(VT, DL, &Elts[0], Elts.size());
}
break;
case ISD::AND:
@ -2763,7 +2795,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
return getBUILD_VECTOR(VT, DL, &Elts[0], Elts.size());
}
break;
case ISD::SETCC: {
@ -4822,6 +4854,111 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
assert(isVolatile() == vol && "Volatile representation error!");
}
BuildVectorSDNode::BuildVectorSDNode(MVT vecVT, DebugLoc dl,
const SDValue *Elts, unsigned NumElts)
: SDNode(ISD::BUILD_VECTOR, dl, getSDVTList(vecVT), Elts, NumElts),
computedSplat(false), isSplatVector(false), hasUndefSplatBitsFlag(false),
SplatBits(0LL), SplatUndef(0LL), SplatSize(0)
{ }
bool BuildVectorSDNode::isConstantSplat(int MinSplatBits) {
unsigned int nOps = getNumOperands();
assert(nOps > 0 && "isConstantSplat has 0-size build vector");
// Return early if we already know the answer:
if (computedSplat)
return isSplatVector;
// The vector's used (non-undef) bits
uint64_t VectorBits[2] = { 0, 0 };
// The vector's undefined bits
uint64_t UndefBits[2] = { 0, 0 };
// Assume that this isn't a constant splat.
isSplatVector = false;
// Gather the constant and undefined bits
unsigned EltBitSize = getOperand(0).getValueType().getSizeInBits();
for (unsigned i = 0; i < nOps; ++i) {
SDValue OpVal = getOperand(i);
unsigned PartNo = i >= nOps/2; // In the upper 128 bits?
unsigned SlotNo = nOps/2 - (i & (nOps/2-1))-1;// Which subpiece of the uint64_t.
uint64_t EltBits = 0;
if (OpVal.getOpcode() == ISD::UNDEF) {
uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
continue;
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
EltBits = CN->getZExtValue();
if (EltBitSize <= 32)
EltBits &= (~0U >> (32-EltBitSize));
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
const APFloat &apf = CN->getValueAPF();
if (OpVal.getValueType() == MVT::f32)
EltBits = FloatToBits(apf.convertToFloat());
else
EltBits = DoubleToBits(apf.convertToDouble());
} else {
// Nonconstant element -> not a splat.
computedSplat = true;
return isSplatVector;
}
VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
}
if ((VectorBits[0] & ~UndefBits[1]) != (VectorBits[1] & ~UndefBits[0])) {
// Can't be a splat if two pieces don't match.
computedSplat = true;
return isSplatVector;
}
// Don't let undefs prevent splats from matching. See if the top 64-bits
// are the same as the lower 64-bits, ignoring undefs.
uint64_t Bits64 = VectorBits[0] | VectorBits[1];
uint64_t Undef64 = UndefBits[0] & UndefBits[1];
uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
bool splat64 =
(VectorBits[0] & ~UndefBits[1]) == (VectorBits[1] & ~UndefBits[0]);
bool splat32 = (Bits64 & (~Undef64 >> 32)) == ((Bits64 >> 32) & ~Undef64);
bool splat16 = (Bits32 & (~Undef32 >> 16)) == ((Bits32 >> 16) & ~Undef32);
bool splat8 =
(Bits16 & (uint16_t(~Undef16) >> 8)) == ((Bits16 >> 8) & ~Undef16);
hasUndefSplatBitsFlag = ((UndefBits[0] | UndefBits[1]) != 0);
if (splat64 && (MinSplatBits >= 64 || !splat32)) {
SplatBits = VectorBits[0];
SplatUndef = UndefBits[0];
SplatSize = 8;
isSplatVector = true;
} else if (splat32 && (MinSplatBits >= 32 || !splat16)) {
SplatBits = Bits32;
SplatUndef = Undef32;
SplatSize = 4;
isSplatVector = true;
} else if (splat16 && (MinSplatBits >= 16 || !splat8)) {
SplatBits = Bits16;
SplatUndef = Undef16;
SplatSize = 2;
isSplatVector = true;
} else if (splat8) {
SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
SplatSize = 1;
isSplatVector = true;
}
computedSplat = true;
return isSplatVector;
}
/// getMemOperand - Return a MachineMemOperand object describing the memory
/// reference performed by this memory reference.
MachineMemOperand MemSDNode::getMemOperand() const {

@ -533,7 +533,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
assert(ValueVT.getVectorElementType() == PartVT &&
ValueVT.getVectorNumElements() == 1 &&
"Only trivial scalar-to-vector conversions should get here!");
return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
return DAG.getBUILD_VECTOR(ValueVT, dl, Val);
}
if (PartVT.isInteger() &&
@ -935,8 +935,8 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
}
// Create a BUILD_VECTOR node.
return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
VT, &Ops[0], Ops.size());
return NodeMap[V] = DAG.getBUILD_VECTOR(VT, getCurDebugLoc(),
&Ops[0], Ops.size());
}
// If this is a static alloca, generate it as the frameindex instead of
@ -2470,9 +2470,8 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
MaskEltVT));
}
}
Mask = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
Mask.getValueType(),
&MappedOps[0], MappedOps.size());
Mask = DAG.getBUILD_VECTOR(Mask.getValueType(), getCurDebugLoc(),
&MappedOps[0], MappedOps.size());
setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, getCurDebugLoc(),
VT, Src1, Src2, Mask));
@ -2570,9 +2569,8 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
}
}
}
Mask = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
Mask.getValueType(),
&MappedOps[0], MappedOps.size());
Mask = DAG.getBUILD_VECTOR(Mask.getValueType(), getCurDebugLoc(),
&MappedOps[0], MappedOps.size());
setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, getCurDebugLoc(),
VT, Src1, Src2, Mask));
return;
@ -2601,8 +2599,7 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
DAG.getConstant(Idx - SrcNumElts, PtrVT)));
}
}
setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
VT, &Ops[0], Ops.size()));
setValue(&I, DAG.getBUILD_VECTOR(VT, getCurDebugLoc(), &Ops[0], Ops.size()));
}
void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {

@ -705,7 +705,7 @@ SPUDAGToDAGISel::Select(SDValue Op) {
/*NOTREACHED*/
break;
case MVT::i32:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x00010203, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),
@ -713,7 +713,7 @@ SPUDAGToDAGISel::Select(SDValue Op) {
break;
case MVT::i16:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x80800203, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),
@ -721,7 +721,7 @@ SPUDAGToDAGISel::Select(SDValue Op) {
break;
case MVT::i8:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
shufMask = CurDAG->getBUILD_VECTOR(MVT::v4i32, dl,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x80808003, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),

@ -920,7 +920,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
SDValue T = DAG.getConstant(dbits, MVT::i64);
SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
SDValue Tvec = DAG.getBUILD_VECTOR(MVT::v2i64, dl, T, T);
return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
}
@ -1620,8 +1620,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(Value32, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v4i32, T, T, T, T));
DAG.getBUILD_VECTOR(MVT::v4i32, dl, T, T, T, T));
break;
}
case MVT::v2f64: {
@ -1631,7 +1630,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(f64val, MVT::i64);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
DAG.getBUILD_VECTOR(MVT::v2i64, dl, T, T));
break;
}
case MVT::v16i8: {
@ -1641,7 +1640,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
for (int i = 0; i < 8; ++i)
Ops[i] = DAG.getConstant(Value16, MVT::i16);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, Ops, 8));
DAG.getBUILD_VECTOR(MVT::v8i16, dl, Ops, 8));
}
case MVT::v8i16: {
unsigned short Value16;
@ -1652,17 +1651,17 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(Value16, VT.getVectorElementType());
SDValue Ops[8];
for (int i = 0; i < 8; ++i) Ops[i] = T;
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops, 8);
return DAG.getBUILD_VECTOR(VT, dl, Ops, 8);
}
case MVT::v4i32: {
unsigned int Value = SplatBits;
SDValue T = DAG.getConstant(Value, VT.getVectorElementType());
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
return DAG.getBUILD_VECTOR(VT, dl, T, T, T, T);
}
case MVT::v2i32: {
unsigned int Value = SplatBits;
SDValue T = DAG.getConstant(Value, VT.getVectorElementType());
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T);
return DAG.getBUILD_VECTOR(VT, dl, T, T);
}
case MVT::v2i64: {
return SPU::LowerSplat_v2i64(VT, DAG, SplatBits, dl);
@ -1682,8 +1681,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Magic constant that can be matched by IL, ILA, et. al.
SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Val, Val, Val, Val));
DAG.getBUILD_VECTOR(MVT::v4i32, dl,
Val, Val, Val, Val));
} else {
SDValue LO32;
SDValue HI32;
@ -1703,16 +1702,16 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
if (!lower_special) {
SDValue LO32C = DAG.getConstant(lower, MVT::i32);
LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
LO32C, LO32C, LO32C, LO32C));
DAG.getBUILD_VECTOR(MVT::v4i32, dl,
LO32C, LO32C, LO32C, LO32C));
}
// Create upper vector if not a special pattern
if (!upper_special) {
SDValue HI32C = DAG.getConstant(upper, MVT::i32);
HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
HI32C, HI32C, HI32C, HI32C));
DAG.getBUILD_VECTOR(MVT::v4i32, dl,
HI32C, HI32C, HI32C, HI32C));
}
// If either upper or lower are special, then the two input operands are
@ -1725,8 +1724,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Unhappy situation... both upper and lower are special, so punt with
// a target constant:
SDValue Zero = DAG.getConstant(0, MVT::i32);
HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Zero, Zero,
Zero, Zero);
HI32 = LO32 = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Zero, Zero,
Zero, Zero);
}
for (int i = 0; i < 4; ++i) {
@ -1756,8 +1755,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
}
return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufBytes[0], ShufBytes.size()));
DAG.getBUILD_VECTOR(MVT::v4i32, dl,
&ShufBytes[0], ShufBytes.size()));
}
}
@ -1886,8 +1885,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
}
}
SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
&ResultMask[0], ResultMask.size());
SDValue VPermMask = DAG.getBUILD_VECTOR(MVT::v16i8, dl,
&ResultMask[0], ResultMask.size());
return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
}
}
@ -1921,8 +1920,8 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
for (size_t j = 0; j < n_copies; ++j)
ConstVecValues.push_back(CValue);
return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(),
&ConstVecValues[0], ConstVecValues.size());
return DAG.getBUILD_VECTOR(Op.getValueType(), dl,
&ConstVecValues[0], ConstVecValues.size());
} else {
// Otherwise, copy the value from one register to another:
switch (Op0.getValueType().getSimpleVT()) {
@ -2022,9 +2021,9 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
ShufMask[i] = DAG.getConstant(bits, MVT::i32);
}
SDValue ShufMaskVec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufMask[0],
sizeof(ShufMask) / sizeof(ShufMask[0]));
SDValue ShufMaskVec =
DAG.getBUILD_VECTOR(MVT::v4i32, dl,
&ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(),
@ -2067,29 +2066,29 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
/*NOTREACHED*/
case MVT::i8: {
SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor,
factor, factor);
replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor,
factor, factor);
break;
}
case MVT::i16: {
SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor,
factor, factor);
replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor,
factor, factor);
break;
}
case MVT::i32:
case MVT::f32: {
SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor,
factor, factor);
replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl, factor, factor,
factor, factor);
break;
}
case MVT::i64:
case MVT::f64: {
SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
loFactor, hiFactor, loFactor, hiFactor);
replicate = DAG.getBUILD_VECTOR(MVT::v4i32, dl,
loFactor, hiFactor, loFactor, hiFactor);
break;
}
}
@ -2249,8 +2248,8 @@ SDValue SPU::getCarryGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) {
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufBytes[0], ShufBytes.size());
return DAG.getBUILD_VECTOR(MVT::v4i32, dl,
&ShufBytes[0], ShufBytes.size());
}
//! Generate the borrow-generate shuffle mask
@ -2264,8 +2263,8 @@ SDValue SPU::getBorrowGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) {
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufBytes[0], ShufBytes.size());
return DAG.getBUILD_VECTOR(MVT::v4i32, dl,
&ShufBytes[0], ShufBytes.size());
}
//! Lower byte immediate operations for v16i8 vectors:
@ -2309,8 +2308,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
tcVec[i] = tc;
return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg,
DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
tcVec, tcVecSize));
DAG.getBUILD_VECTOR(VT, dl, tcVec, tcVecSize));
}
}
@ -2663,11 +2661,11 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
unsigned maskHigh = 0x08090a0b;
unsigned maskLow = 0x0c0d0e0f;
// Use a shuffle to perform the truncation
SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
DAG.getConstant(maskHigh, MVT::i32),
DAG.getConstant(maskLow, MVT::i32),
DAG.getConstant(maskHigh, MVT::i32),
DAG.getConstant(maskLow, MVT::i32));
SDValue shufMask = DAG.getBUILD_VECTOR(MVT::v4i32, dl,
DAG.getConstant(maskHigh, MVT::i32),
DAG.getConstant(maskLow, MVT::i32),
DAG.getConstant(maskHigh, MVT::i32),
DAG.getConstant(maskLow, MVT::i32));
SDValue PromoteScalar = DAG.getNode(SPUISD::PREFSLOT2VEC, dl,

@ -3093,100 +3093,6 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
// Vector related lowering.
//
// If this is a vector of constants or undefs, get the bits. A bit in
// UndefBits is set if the corresponding element of the vector is an
// ISD::UNDEF value. For undefs, the corresponding VectorBits values are
// zero. Return true if this is not an array of constants, false if it is.
//
static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
uint64_t UndefBits[2]) {
// Start with zero'd results.
VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDValue OpVal = BV->getOperand(i);
unsigned PartNo = i >= e/2; // In the upper 128 bits?
unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
uint64_t EltBits = 0;
if (OpVal.getOpcode() == ISD::UNDEF) {
uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
continue;
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
EltBits = CN->getZExtValue() & (~0U >> (32-EltBitSize));
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
EltBits = FloatToBits(CN->getValueAPF().convertToFloat());
} else {
// Nonconstant element.
return true;
}
VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
}
//printf("%llx %llx %llx %llx\n",
// VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
return false;
}
// If this is a splat (repetition) of a value across the whole vector, return
// the smallest size that splats it. For example, "0x01010101010101..." is a
// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
// SplatSize = 1 byte.
static bool isConstantSplat(const uint64_t Bits128[2],
const uint64_t Undef128[2],
unsigned &SplatBits, unsigned &SplatUndef,
unsigned &SplatSize) {
// Don't let undefs prevent splats from matching. See if the top 64-bits are
// the same as the lower 64-bits, ignoring undefs.
if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
return false; // Can't be a splat if two pieces don't match.
uint64_t Bits64 = Bits128[0] | Bits128[1];
uint64_t Undef64 = Undef128[0] & Undef128[1];
// Check that the top 32-bits are the same as the lower 32-bits, ignoring
// undefs.
if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
return false; // Can't be a splat if two pieces don't match.
uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
// If the top 16-bits are different than the lower 16-bits, ignoring
// undefs, we have an i32 splat.
if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
SplatBits = Bits32;
SplatUndef = Undef32;
SplatSize = 4;
return true;
}
uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
// If the top 8-bits are different than the lower 8-bits, ignoring
// undefs, we have an i16 splat.
if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
SplatBits = Bits16;
SplatUndef = Undef16;
SplatSize = 2;
return true;
}
// Otherwise, we have an 8-bit splat.
SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
SplatSize = 1;
return true;
}
/// BuildSplatI - Build a canonical splati of Val with an element size of
/// SplatSize. Cast the result to VT.
static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
@ -3209,8 +3115,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
SmallVector<SDValue, 8> Ops;
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
&Ops[0], Ops.size());
SDValue Res = DAG.getBUILD_VECTOR(CanonicalVT, dl, &Ops[0], Ops.size());
return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
}
@ -3247,7 +3152,7 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
for (unsigned i = 0; i != 16; ++i)
Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16));
DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops,16));
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
}
@ -3262,19 +3167,19 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// UndefBits is set if the corresponding element of the vector is an
// ISD::UNDEF value. For undefs, the corresponding VectorBits values are
// zero.
uint64_t VectorBits[2];
uint64_t UndefBits[2];
DebugLoc dl = Op.getDebugLoc();
if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits))
return SDValue(); // Not a constant vector.
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
// If this is a splat (repetition) of a value across the whole vector, return
// the smallest size that splats it. For example, "0x01010101010101..." is a
// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
// SplatSize = 1 byte.
unsigned SplatBits, SplatUndef, SplatSize;
if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
if (BVN->isConstantSplat()) {
uint64_t SplatBits = BVN->getSplatBits();
uint64_t SplatUndef = BVN->getSplatUndef();
unsigned SplatSize = BVN->getSplatSize();
bool HasAnyUndefs = BVN->hasAnyUndefBits();
// First, handle single instruction cases.
@ -3283,7 +3188,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// Canonicalize all zero vectors to be v4i32.
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
SDValue Z = DAG.getConstant(0, MVT::i32);
Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
Z = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Z, Z, Z, Z);
Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
}
return Op;
@ -3496,7 +3401,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(),
OpLHS, OpRHS,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16));
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
@ -3619,8 +3524,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
MVT::i8));
}
SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
&ResultMask[0], ResultMask.size());
SDValue VPermMask = DAG.getBUILD_VECTOR(MVT::v16i8, dl,
&ResultMask[0], ResultMask.size());
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
}
@ -3808,7 +3713,7 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
}
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
DAG.getBUILD_VECTOR(MVT::v16i8, dl, Ops, 16));
} else {
assert(0 && "Unknown mul to lower!");
abort();

@ -2769,7 +2769,7 @@ static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1,
}
std::swap(V1, V2);
Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems);
Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], NumElems);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask);
}
@ -2794,7 +2794,7 @@ SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG, DebugLoc dl) {
else
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], NumElems);
return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], NumElems);
}
@ -2945,13 +2945,13 @@ static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG,
SDValue Vec;
if (VT.getSizeInBits() == 64) { // MMX
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
Vec = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst);
} else if (HasSSE2) { // SSE2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Vec = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst);
} else { // SSE1
SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
Vec = DAG.getBUILD_VECTOR(MVT::v4f32, dl, Cst, Cst, Cst, Cst);
}
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
}
@ -2966,9 +2966,9 @@ static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec;
if (VT.getSizeInBits() == 64) // MMX
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
Vec = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst);
else // SSE
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Vec = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
}
@ -2994,9 +2994,8 @@ static SDValue NormalizeMask(SDValue Mask, SelectionDAG &DAG) {
}
if (Changed)
Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getDebugLoc(),
Mask.getValueType(),
&MaskVec[0], MaskVec.size());
Mask = DAG.getBUILD_VECTOR(Mask.getValueType(), Mask.getDebugLoc(),
&MaskVec[0], MaskVec.size());
return Mask;
}
@ -3010,8 +3009,7 @@ static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG, DebugLoc dl) {
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
for (unsigned i = 1; i != NumElems; ++i)
MaskVec.push_back(DAG.getConstant(i, BaseVT));
return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
}
/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
@ -3025,8 +3023,7 @@ static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG,
MaskVec.push_back(DAG.getConstant(i, BaseVT));
MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
}
/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
@ -3041,8 +3038,7 @@ static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG,
MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
}
/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps
@ -3057,8 +3053,7 @@ static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt,
MaskVec.push_back(DAG.getConstant(DestElt, BaseVT));
for (unsigned i = 1; i != NumElems; ++i)
MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT));
return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
return DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
}
/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32.
@ -3089,7 +3084,7 @@ static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) {
NumElems >>= 1;
}
SDValue Cst = DAG.getConstant(EltNo, MVT::i32);
Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
Mask = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst, Cst, Cst, Cst);
}
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1);
@ -3125,13 +3120,12 @@ static SDValue CanonicalizeMovddup(SDValue Op, SDValue V1, SDValue Mask,
unsigned NumElems = PVT.getVectorNumElements();
if (NumElems == 2) {
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
Mask = DAG.getBUILD_VECTOR(MVT::v2i32, dl, Cst, Cst);
} else {
assert(NumElems == 4);
SDValue Cst0 = DAG.getTargetConstant(0, MVT::i32);
SDValue Cst1 = DAG.getTargetConstant(1, MVT::i32);
Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Cst0, Cst1, Cst0, Cst1);
Mask = DAG.getBUILD_VECTOR(MVT::v4i32, dl, Cst0, Cst1, Cst0, Cst1);
}
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1);
@ -3160,8 +3154,7 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
MaskVec.push_back(DAG.getConstant(NumElems, EVT));
else
MaskVec.push_back(DAG.getConstant(i, EVT));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, Mask);
}
@ -3447,8 +3440,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, Item,
DAG.getUNDEF(VT), Mask);
}
@ -3537,8 +3529,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
else
MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size());
SDValue ShufMask = DAG.getBUILD_VECTOR(MaskVT, dl,
&MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V[0], V[1], ShufMask);
}
@ -3630,7 +3622,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2,
else
MaskVec.push_back(DAG.getConstant(1, MVT::i32));
SDValue Mask= DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, &MaskVec[0],2);
SDValue Mask= DAG.getBUILD_VECTOR(MVT::v2i32, dl, &MaskVec[0],2);
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v2i64,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), Mask);
@ -3664,8 +3656,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2,
if (AnyOutOrder) {
for (unsigned i = 4; i != 8; ++i)
MaskVec.push_back(DAG.getConstant(i, MaskEVT));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], 8);
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], 8);
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16,
NewV, NewV, Mask);
}
@ -3699,8 +3690,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2,
}
if (AnyOutOrder) {
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl,
MaskVT, &MaskVec[0], 8);
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], 8);
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16,
NewV, NewV, Mask);
}
@ -3789,7 +3779,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2,
else
MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT));
}
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &MaskVec[0], 8);
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], 8);
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, V1, V1, Mask);
}
@ -3876,8 +3866,7 @@ SDValue RewriteAsNarrowerShuffle(SDValue V1, SDValue V2,
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1);
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, NewVT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskVec[0], MaskVec.size()));
DAG.getBUILD_VECTOR(MaskVT, dl, &MaskVec[0], MaskVec.size()));
}
/// getVZextMovL - Return a zero-extending vector move low node.
@ -3954,8 +3943,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
// The second shuffle, which takes the first shuffle as both of its
// vector operands, put the elements into the right order.
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&Mask1[0], Mask1.size()));
DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], Mask1.size()));
SmallVector<SDValue, 8> Mask2(4, DAG.getUNDEF(MaskEVT));
for (unsigned i = 0; i != 4; ++i) {
@ -3969,8 +3957,8 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
}
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&Mask2[0], Mask2.size()));
DAG.getBUILD_VECTOR(MaskVT, dl,
&Mask2[0], Mask2.size()));
} else if (NumLo == 3 || NumHi == 3) {
// Otherwise, we must have three elements from one vector, call it X, and
// one element from the other, call it Y. First, use a shufps to build an
@ -4001,7 +3989,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
Mask1[2] = PermMask.getOperand(HiIndex^1);
Mask1[3] = DAG.getUNDEF(MaskEVT);
V2 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &Mask1[0], 4));
DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4));
if (HiIndex >= 2) {
Mask1[0] = PermMask.getOperand(0);
@ -4009,8 +3997,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
Mask1[2] = DAG.getConstant(HiIndex & 1 ? 6 : 4, MaskEVT);
Mask1[3] = DAG.getConstant(HiIndex & 1 ? 4 : 6, MaskEVT);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MaskVT, &Mask1[0], 4));
DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4));
} else {
Mask1[0] = DAG.getConstant(HiIndex & 1 ? 2 : 0, MaskEVT);
Mask1[1] = DAG.getConstant(HiIndex & 1 ? 0 : 2, MaskEVT);
@ -4025,8 +4012,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
DAG.getConstant(cast<ConstantSDNode>(Mask1[3])->getZExtValue()+4,
MaskEVT);
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V2, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl,
MaskVT, &Mask1[0], 4));
DAG.getBUILD_VECTOR(MaskVT, dl, &Mask1[0], 4));
}
}
@ -4060,10 +4046,10 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
}
SDValue LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
DAG.getBUILD_VECTOR(MaskVT, dl,
&LoMask[0], LoMask.size()));
SDValue HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
DAG.getBUILD_VECTOR(MaskVT, dl,
&HiMask[0], HiMask.size()));
SmallVector<SDValue, 8> MaskOps;
for (unsigned i = 0; i != 4; ++i) {
@ -4075,8 +4061,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
}
}
return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, LoShuffle, HiShuffle,
DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&MaskOps[0], MaskOps.size()));
DAG.getBUILD_VECTOR(MaskVT, dl, &MaskOps[0], MaskOps.size()));
}
SDValue
@ -4403,8 +4388,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
push_back(DAG.getUNDEF(MaskVT.getVectorElementType()));
IdxVec.
push_back(DAG.getUNDEF(MaskVT.getVectorElementType()));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&IdxVec[0], IdxVec.size());
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &IdxVec[0], IdxVec.size());
SDValue Vec = Op.getOperand(0);
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Vec.getValueType(),
Vec, DAG.getUNDEF(Vec.getValueType()), Mask);
@ -4426,8 +4410,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType()));
IdxVec.
push_back(DAG.getUNDEF(MaskVT.getVectorElementType()));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
&IdxVec[0], IdxVec.size());
SDValue Mask = DAG.getBUILD_VECTOR(MaskVT, dl, &IdxVec[0], IdxVec.size());
SDValue Vec = Op.getOperand(0);
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Vec.getValueType(),
Vec, DAG.getUNDEF(Vec.getValueType()),
@ -4928,13 +4911,13 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) {
MaskVec.push_back(DAG.getConstant(4, MVT::i32));
MaskVec.push_back(DAG.getConstant(1, MVT::i32));
MaskVec.push_back(DAG.getConstant(5, MVT::i32));
SDValue UnpcklMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&MaskVec[0], MaskVec.size());
SDValue UnpcklMask = DAG.getBUILD_VECTOR(MVT::v4i32, dl,
&MaskVec[0], MaskVec.size());
SmallVector<SDValue, 4> MaskVec2;
MaskVec2.push_back(DAG.getConstant(1, MVT::i32));
MaskVec2.push_back(DAG.getConstant(0, MVT::i32));
SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32,
&MaskVec2[0], MaskVec2.size());
SDValue ShufMask = DAG.getBUILD_VECTOR(MVT::v2i32, dl,
&MaskVec2[0], MaskVec2.size());
SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
@ -5399,8 +5382,7 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()),
EltVT);
std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0],
SignBits.size());
SDValue SignVec = DAG.getBUILD_VECTOR(VT, dl, &SignBits[0], SignBits.size());
Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec);
Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec);
}

@ -1,5 +1,5 @@
; RUN: llvm-as < %s | llc -march=x86 -mattr=sse2 -disable-mmx -o %t -f
; RUN: grep divdi3 %t | count 2
; RUN: grep divdi3 %t | grep call | count 2
; Test case for r63760 where we generate a legalization assert that an illegal
@ -12,4 +12,4 @@
define <2 x i64> @test_long_div(<2 x i64> %num, <2 x i64> %div) {
%div.r = sdiv <2 x i64> %num, %div
ret <2 x i64> %div.r
}
}