diff --git a/include/llvm/Support/PatternMatch.h b/include/llvm/Support/PatternMatch.h index d68c265e9d3..221fa8b3ebf 100644 --- a/include/llvm/Support/PatternMatch.h +++ b/include/llvm/Support/PatternMatch.h @@ -98,12 +98,19 @@ struct apint_match { Res = &CI->getValue(); return true; } + // FIXME: Remove this. if (ConstantVector *CV = dyn_cast(V)) if (ConstantInt *CI = dyn_cast_or_null(CV->getSplatValue())) { Res = &CI->getValue(); return true; } + if (ConstantDataVector *CV = dyn_cast(V)) + if (ConstantInt *CI = + dyn_cast_or_null(CV->getSplatValue())) { + Res = &CI->getValue(); + return true; + } return false; } }; @@ -144,9 +151,13 @@ struct cst_pred_ty : public Predicate { bool match(ITy *V) { if (const ConstantInt *CI = dyn_cast(V)) return this->isValue(CI->getValue()); + // FIXME: Remove this. if (const ConstantVector *CV = dyn_cast(V)) if (ConstantInt *CI = dyn_cast_or_null(CV->getSplatValue())) return this->isValue(CI->getValue()); + if (const ConstantDataVector *CV = dyn_cast(V)) + if (ConstantInt *CI = dyn_cast_or_null(CV->getSplatValue())) + return this->isValue(CI->getValue()); return false; } }; @@ -164,12 +175,22 @@ struct api_pred_ty : public Predicate { Res = &CI->getValue(); return true; } + + // FIXME: remove. if (const ConstantVector *CV = dyn_cast(V)) if (ConstantInt *CI = dyn_cast_or_null(CV->getSplatValue())) if (this->isValue(CI->getValue())) { Res = &CI->getValue(); return true; } + + if (const ConstantDataVector *CV = dyn_cast(V)) + if (ConstantInt *CI = dyn_cast_or_null(CV->getSplatValue())) + if (this->isValue(CI->getValue())) { + Res = &CI->getValue(); + return true; + } + return false; } }; @@ -611,11 +632,11 @@ struct not_match { } private: bool matchIfNot(Value *LHS, Value *RHS) { - if (ConstantInt *CI = dyn_cast(RHS)) - return CI->isAllOnesValue() && L.match(LHS); - if (ConstantVector *CV = dyn_cast(RHS)) - return CV->isAllOnesValue() && L.match(LHS); - return false; + return (isa(RHS) || isa(RHS) || + // FIXME: Remove CV. + isa(RHS)) && + cast(RHS)->isAllOnesValue() && + L.match(LHS); } }; @@ -638,9 +659,9 @@ struct neg_match { } private: bool matchIfNeg(Value *LHS, Value *RHS) { - if (ConstantInt *C = dyn_cast(LHS)) - return C->isZero() && L.match(RHS); - return false; + return ((isa(LHS) && cast(LHS)->isZero()) || + isa(LHS)) && + L.match(RHS); } }; diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 6a49e6d98ab..fe28926f1b2 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -65,17 +65,17 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, } // If this is a bitcast from constant vector -> vector, fold it. - ConstantVector *CV = dyn_cast(C); - if (CV == 0) + // FIXME: Remove ConstantVector support. + if (!isa(C) && !isa(C)) return ConstantExpr::getBitCast(C, DestTy); // If the element types match, VMCore can fold it. unsigned NumDstElt = DestVTy->getNumElements(); - unsigned NumSrcElt = CV->getNumOperands(); + unsigned NumSrcElt = C->getType()->getVectorNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - Type *SrcEltTy = CV->getType()->getElementType(); + Type *SrcEltTy = C->getType()->getVectorElementType(); Type *DstEltTy = DestVTy->getElementType(); // Otherwise, we're changing the number of elements in a vector, which @@ -95,7 +95,6 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); // Recursively handle this integer conversion, if possible. C = FoldBitCast(C, DestIVTy, TD); - if (!C) return ConstantExpr::getBitCast(C, DestTy); // Finally, VMCore can handle this now that #elts line up. return ConstantExpr::getBitCast(C, DestTy); @@ -109,8 +108,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); // Ask VMCore to do the conversion now that #elts line up. C = ConstantExpr::getBitCast(C, SrcIVTy); - CV = dyn_cast(C); - if (!CV) // If VMCore wasn't able to fold it, bail out. + // If VMCore wasn't able to fold it, bail out. + if (!isa(C) && // FIXME: Remove ConstantVector. + !isa(C)) return C; } @@ -132,7 +132,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, Constant *Elt = Zero; unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); for (unsigned j = 0; j != Ratio; ++j) { - Constant *Src = dyn_cast(CV->getOperand(SrcElt++)); + Constant *Src =dyn_cast(C->getAggregateElement(SrcElt++)); if (!Src) // Reject constantexpr elements. return ConstantExpr::getBitCast(C, DestTy); @@ -149,28 +149,29 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, } Result.push_back(Elt); } - } else { - // Handle: bitcast (<2 x i64> to <4 x i32>) - unsigned Ratio = NumDstElt/NumSrcElt; - unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); + return ConstantVector::get(Result); + } + + // Handle: bitcast (<2 x i64> to <4 x i32>) + unsigned Ratio = NumDstElt/NumSrcElt; + unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); + + // Loop over each source value, expanding into multiple results. + for (unsigned i = 0; i != NumSrcElt; ++i) { + Constant *Src = dyn_cast(C->getAggregateElement(i)); + if (!Src) // Reject constantexpr elements. + return ConstantExpr::getBitCast(C, DestTy); - // Loop over each source value, expanding into multiple results. - for (unsigned i = 0; i != NumSrcElt; ++i) { - Constant *Src = dyn_cast(CV->getOperand(i)); - if (!Src) // Reject constantexpr elements. - return ConstantExpr::getBitCast(C, DestTy); + unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); + for (unsigned j = 0; j != Ratio; ++j) { + // Shift the piece of the value into the right place, depending on + // endianness. + Constant *Elt = ConstantExpr::getLShr(Src, + ConstantInt::get(Src->getType(), ShiftAmt)); + ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); - for (unsigned j = 0; j != Ratio; ++j) { - // Shift the piece of the value into the right place, depending on - // endianness. - Constant *Elt = ConstantExpr::getLShr(Src, - ConstantInt::get(Src->getType(), ShiftAmt)); - ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - - // Truncate and remember this piece. - Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); - } + // Truncate and remember this piece. + Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); } } @@ -311,6 +312,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, // not reached. } + // FIXME: Remove ConstantVector if (isa(C) || isa(C) || isa(C)) { Type *EltTy = cast(C->getType())->getElementType(); @@ -1115,11 +1117,8 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), /// available for the result. Returns null if the conversion cannot be /// performed, otherwise returns the Constant value resulting from the /// conversion. -static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero, - Type *Ty) { - assert(Op && "Called with NULL operand"); - APFloat Val(Op->getValueAPF()); - +static Constant *ConstantFoldConvertToInt(const APFloat &Val, + bool roundTowardZero, Type *Ty) { // All of these conversion intrinsics form an integer of at most 64bits. unsigned ResultWidth = cast(Ty)->getBitWidth(); assert(ResultWidth <= 64 && @@ -1271,24 +1270,31 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, } } - if (ConstantVector *Op = dyn_cast(Operands[0])) { + // Support ConstantVector in case we have an Undef in the top. + if (isa(Operands[0]) || + isa(Operands[0])) { + Constant *Op = cast(Operands[0]); switch (F->getIntrinsicID()) { default: break; case Intrinsic::x86_sse_cvtss2si: case Intrinsic::x86_sse_cvtss2si64: case Intrinsic::x86_sse2_cvtsd2si: case Intrinsic::x86_sse2_cvtsd2si64: - if (ConstantFP *FPOp = dyn_cast(Op->getOperand(0))) - return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty); + if (ConstantFP *FPOp = + dyn_cast_or_null(Op->getAggregateElement(0U))) + return ConstantFoldConvertToInt(FPOp->getValueAPF(), + /*roundTowardZero=*/false, Ty); case Intrinsic::x86_sse_cvttss2si: case Intrinsic::x86_sse_cvttss2si64: case Intrinsic::x86_sse2_cvttsd2si: case Intrinsic::x86_sse2_cvttsd2si64: - if (ConstantFP *FPOp = dyn_cast(Op->getOperand(0))) - return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty); + if (ConstantFP *FPOp = + dyn_cast_or_null(Op->getAggregateElement(0U))) + return ConstantFoldConvertToInt(FPOp->getValueAPF(), + /*roundTowardZero=*/true, Ty); } } - + if (isa(Operands[0])) { if (F->getIntrinsicID() == Intrinsic::bswap) return Operands[0]; diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 21008a14678..6403f03b01c 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -89,6 +89,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, } // Handle a constant vector by taking the intersection of the known bits of // each element. + // FIXME: Remove. if (ConstantVector *CV = dyn_cast(V)) { KnownZero.setAllBits(); KnownOne.setAllBits(); for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { @@ -1005,30 +1006,28 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; - case Instruction::AShr: + case Instruction::AShr: { Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); - // ashr X, C -> adds C sign bits. - if (ConstantInt *C = dyn_cast(U->getOperand(1))) { - Tmp += C->getZExtValue(); + // ashr X, C -> adds C sign bits. Vectors too. + const APInt *ShAmt; + if (match(U->getOperand(1), m_APInt(ShAmt))) { + Tmp += ShAmt->getZExtValue(); if (Tmp > TyBits) Tmp = TyBits; } - // vector ashr X, -> adds C sign bits - if (ConstantVector *C = dyn_cast(U->getOperand(1))) { - if (ConstantInt *CI = dyn_cast_or_null(C->getSplatValue())) { - Tmp += CI->getZExtValue(); - if (Tmp > TyBits) Tmp = TyBits; - } - } return Tmp; - case Instruction::Shl: - if (ConstantInt *C = dyn_cast(U->getOperand(1))) { + } + case Instruction::Shl: { + const APInt *ShAmt; + if (match(U->getOperand(1), m_APInt(ShAmt))) { // shl destroys sign bits. Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); - if (C->getZExtValue() >= TyBits || // Bad shift. - C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. - return Tmp - C->getZExtValue(); + Tmp2 = ShAmt->getZExtValue(); + if (Tmp2 >= TyBits || // Bad shift. + Tmp2 >= Tmp) break; // Shifted all sign bits out. + return Tmp - Tmp2; } break; + } case Instruction::And: case Instruction::Or: case Instruction::Xor: // NOT is handled here. diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index 6bfd97055c8..a7204670b1f 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -655,9 +655,12 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, if (A->getType() == B->getType()) return false; // For now, only support constants with the same size. - if (TD->getTypeStoreSize(A->getType()) != TD->getTypeStoreSize(B->getType())) + uint64_t StoreSize = TD->getTypeStoreSize(A->getType()); + if (StoreSize != TD->getTypeStoreSize(B->getType()) || + StoreSize > 128) return false; + // If a floating-point value and an integer value have the same encoding, // they can share a constant-pool entry. if (const ConstantFP *AFP = dyn_cast(A))