De-constify pointers to Type since they can't be modified. NFC

This was already done in most places a while ago. This just fixes the ones that crept in over time.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@243842 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Craig Topper 2015-08-01 22:20:21 +00:00
parent 6ebf387fdd
commit 84bbcfe200
30 changed files with 118 additions and 121 deletions

View File

@ -573,7 +573,7 @@ public:
namespace AttributeFuncs {
/// \brief Which attributes cannot be applied to a type.
AttrBuilder typeIncompatible(const Type *Ty);
AttrBuilder typeIncompatible(Type *Ty);
} // end AttributeFuncs namespace

View File

@ -590,7 +590,7 @@ public:
/// formed with a vector or array of the specified element type.
/// ConstantDataArray only works with normal float and int types that are
/// stored densely in memory, not with things like i42 or x86_f80.
static bool isElementTypeCompatible(const Type *Ty);
static bool isElementTypeCompatible(Type *Ty);
/// getElementAsInteger - If this is a sequential container of integers (of
/// any size), return the specified element in the low bits of a uint64_t.

View File

@ -250,7 +250,7 @@ public:
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
/// isSized - Return true if this is a sized type.
bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
/// hasName - Return true if this is a named struct that has a non-empty name.
bool hasName() const { return SymbolTableEntry != nullptr; }

View File

@ -265,7 +265,7 @@ public:
/// get the actual size for a particular target, it is reasonable to use the
/// DataLayout subsystem to do this.
///
bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const {
bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
// If it's a primitive, it is always sized.
if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
getTypeID() == PointerTyID ||
@ -423,7 +423,7 @@ private:
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
bool isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
};
// Printing of types.

View File

@ -830,11 +830,11 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
/// \brief Check whether the access through \p Ptr has a constant stride.
int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
// Make sure that the pointer does not point to aggregate types.
const PointerType *PtrTy = cast<PointerType>(Ty);
auto *PtrTy = cast<PointerType>(Ty);
if (PtrTy->getElementType()->isAggregateType()) {
DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
<< *Ptr << "\n");

View File

@ -283,7 +283,7 @@ llvm::Value *llvm::getUniqueCastUse(llvm::Value *Ptr, Loop *Lp, Type *Ty) {
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
llvm::Value *llvm::getStrideFromPointer(llvm::Value *Ptr, ScalarEvolution *SE,
Loop *Lp) {
const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
if (!PtrTy || PtrTy->isAggregateType())
return nullptr;

View File

@ -2905,7 +2905,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
}
}
SmallPtrSet<const Type*, 4> Visited;
SmallPtrSet<Type*, 4> Visited;
if (!Indices.empty() && !Ty->isSized(&Visited))
return Error(ID.Loc, "base element of getelementptr must be sized");
@ -5817,7 +5817,7 @@ int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
Indices.push_back(Val);
}
SmallPtrSet<const Type*, 4> Visited;
SmallPtrSet<Type*, 4> Visited;
if (!Indices.empty() && !Ty->isSized(&Visited))
return Error(Loc, "base element of getelementptr must be sized");

View File

@ -630,8 +630,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
break;
case Type::VectorTyID:
// if the whole vector is 'undef' just reserve memory for the value.
const VectorType* VTy = dyn_cast<VectorType>(C->getType());
const Type *ElemTy = VTy->getElementType();
auto* VTy = dyn_cast<VectorType>(C->getType());
Type *ElemTy = VTy->getElementType();
unsigned int elemNum = VTy->getNumElements();
Result.AggregateVal.resize(elemNum);
if (ElemTy->isIntegerTy())
@ -1152,8 +1152,8 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
break;
}
case Type::VectorTyID: {
const VectorType *VT = cast<VectorType>(Ty);
const Type *ElemT = VT->getElementType();
auto *VT = cast<VectorType>(Ty);
Type *ElemT = VT->getElementType();
const unsigned numElems = VT->getNumElements();
if (ElemT->isFloatTy()) {
Result.AggregateVal.resize(numElems);

View File

@ -593,7 +593,7 @@ static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
}
static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
const Type *Ty, const bool val) {
Type *Ty, const bool val) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
@ -788,7 +788,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
}
static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
GenericValue Src3, const Type *Ty) {
GenericValue Src3, Type *Ty) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
@ -805,7 +805,7 @@ static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
void Interpreter::visitSelectInst(SelectInst &I) {
ExecutionContext &SF = ECStack.back();
const Type * Ty = I.getOperand(0)->getType();
Type * Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
@ -1139,7 +1139,7 @@ void Interpreter::visitShl(BinaryOperator &I) {
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
@ -1166,7 +1166,7 @@ void Interpreter::visitLShr(BinaryOperator &I) {
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
@ -1193,7 +1193,7 @@ void Interpreter::visitAShr(BinaryOperator &I) {
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
Type *Ty = I.getType();
if (Ty->isVectorTy()) {
size_t src1Size = Src1.AggregateVal.size();
@ -1237,10 +1237,10 @@ GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
const Type *SrcTy = SrcVal->getType();
Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
const Type *DstVecTy = DstTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
@ -1248,7 +1248,7 @@ GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
} else {
const IntegerType *DITy = cast<IntegerType>(DstTy);
auto *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.sext(DBitWidth);
}
@ -1257,10 +1257,10 @@ GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
const Type *SrcTy = SrcVal->getType();
Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
const Type *DstVecTy = DstTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
@ -1269,7 +1269,7 @@ GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
} else {
const IntegerType *DITy = cast<IntegerType>(DstTy);
auto *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.zext(DBitWidth);
}
@ -1327,8 +1327,8 @@ GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
const Type *SrcVecTy = SrcTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
@ -1365,8 +1365,8 @@ GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
const Type *SrcVecTy = SrcTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
@ -1401,7 +1401,7 @@ GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
@ -1433,7 +1433,7 @@ GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
@ -1499,8 +1499,8 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
// scalar src bitcast to vector dst
bool isLittleEndian = getDataLayout().isLittleEndian();
GenericValue TempDst, TempSrc, SrcVec;
const Type *SrcElemTy;
const Type *DstElemTy;
Type *SrcElemTy;
Type *DstElemTy;
unsigned SrcBitSize;
unsigned DstBitSize;
unsigned SrcNum;

View File

@ -1382,7 +1382,7 @@ AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) {
//===----------------------------------------------------------------------===//
/// \brief Which attributes cannot be applied to a type.
AttrBuilder AttributeFuncs::typeIncompatible(const Type *Ty) {
AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
AttrBuilder Incompatible;
if (!Ty->isIntegerTy())

View File

@ -1997,17 +1997,17 @@ static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
}
/// \brief Test whether a given ConstantInt is in-range for a SequentialType.
static bool isIndexInRangeOfSequentialType(const SequentialType *STy,
static bool isIndexInRangeOfSequentialType(SequentialType *STy,
const ConstantInt *CI) {
if (const PointerType *PTy = dyn_cast<PointerType>(STy))
if (auto *PTy = dyn_cast<PointerType>(STy))
// Only handle pointers to sized types, not pointers to functions.
return PTy->getElementType()->isSized();
uint64_t NumElements = 0;
// Determine the number of elements in our sequential type.
if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
if (auto *ATy = dyn_cast<ArrayType>(STy))
NumElements = ATy->getNumElements();
else if (const VectorType *VTy = dyn_cast<VectorType>(STy))
else if (auto *VTy = dyn_cast<VectorType>(STy))
NumElements = VTy->getNumElements();
assert((isa<ArrayType>(STy) || NumElements > 0) &&
@ -2178,7 +2178,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
// dimension.
NewIdxs.resize(Idxs.size());
uint64_t NumElements = 0;
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
if (auto *ATy = dyn_cast<ArrayType>(Ty))
NumElements = ATy->getNumElements();
else
NumElements = cast<VectorType>(Ty)->getNumElements();

View File

@ -797,10 +797,10 @@ Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const {
}
unsigned ConstantAggregateZero::getNumElements() const {
const Type *Ty = getType();
if (const auto *AT = dyn_cast<ArrayType>(Ty))
Type *Ty = getType();
if (auto *AT = dyn_cast<ArrayType>(Ty))
return AT->getNumElements();
if (const auto *VT = dyn_cast<VectorType>(Ty))
if (auto *VT = dyn_cast<VectorType>(Ty))
return VT->getNumElements();
return Ty->getStructNumElements();
}
@ -838,10 +838,10 @@ UndefValue *UndefValue::getElementValue(unsigned Idx) const {
}
unsigned UndefValue::getNumElements() const {
const Type *Ty = getType();
if (const auto *AT = dyn_cast<ArrayType>(Ty))
Type *Ty = getType();
if (auto *AT = dyn_cast<ArrayType>(Ty))
return AT->getNumElements();
if (const auto *VT = dyn_cast<VectorType>(Ty))
if (auto *VT = dyn_cast<VectorType>(Ty))
return VT->getNumElements();
return Ty->getStructNumElements();
}
@ -2430,9 +2430,9 @@ StringRef ConstantDataSequential::getRawDataValues() const {
/// formed with a vector or array of the specified element type.
/// ConstantDataArray only works with normal float and int types that are
/// stored densely in memory, not with things like i42 or x86_f80.
bool ConstantDataSequential::isElementTypeCompatible(const Type *Ty) {
bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) {
if (Ty->isFloatTy() || Ty->isDoubleTy()) return true;
if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
if (auto *IT = dyn_cast<IntegerType>(Ty)) {
switch (IT->getBitWidth()) {
case 8:
case 16:

View File

@ -74,8 +74,8 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
// Vector -> Vector conversions are always lossless if the two vector types
// have the same size, otherwise not. Also, 64-bit vector types can be
// converted to x86mmx.
if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) {
if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
if (auto *thisPTy = dyn_cast<VectorType>(this)) {
if (auto *thatPTy = dyn_cast<VectorType>(Ty))
return thisPTy->getBitWidth() == thatPTy->getBitWidth();
if (Ty->getTypeID() == Type::X86_MMXTyID &&
thisPTy->getBitWidth() == 64)
@ -83,7 +83,7 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
}
if (this->getTypeID() == Type::X86_MMXTyID)
if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
if (auto *thatPTy = dyn_cast<VectorType>(Ty))
if (thatPTy->getBitWidth() == 64)
return true;
@ -91,8 +91,8 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
// remaining and ptr->ptr. Just select the lossless conversions. Everything
// else is not lossless. Conservatively assume we can't losslessly convert
// between pointers with different address spaces.
if (const PointerType *PTy = dyn_cast<PointerType>(this)) {
if (const PointerType *OtherPTy = dyn_cast<PointerType>(Ty))
if (auto *PTy = dyn_cast<PointerType>(this)) {
if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
return false;
}
@ -100,14 +100,12 @@ bool Type::canLosslesslyBitCastTo(Type *Ty) const {
}
bool Type::isEmptyTy() const {
const ArrayType *ATy = dyn_cast<ArrayType>(this);
if (ATy) {
if (auto *ATy = dyn_cast<ArrayType>(this)) {
unsigned NumElements = ATy->getNumElements();
return NumElements == 0 || ATy->getElementType()->isEmptyTy();
}
const StructType *STy = dyn_cast<StructType>(this);
if (STy) {
if (auto *STy = dyn_cast<StructType>(this)) {
unsigned NumElements = STy->getNumElements();
for (unsigned i = 0; i < NumElements; ++i)
if (!STy->getElementType(i)->isEmptyTy())
@ -144,7 +142,7 @@ unsigned Type::getScalarSizeInBits() const {
/// is only valid on floating point types. If the FP type does not
/// have a stable mantissa (e.g. ppc long double), this method returns -1.
int Type::getFPMantissaWidth() const {
if (const VectorType *VTy = dyn_cast<VectorType>(this))
if (auto *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType()->getFPMantissaWidth();
assert(isFloatingPointTy() && "Not a floating point type!");
if (getTypeID() == HalfTyID) return 11;
@ -159,11 +157,11 @@ int Type::getFPMantissaWidth() const {
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
bool Type::isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited) const {
if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
bool Type::isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited) const {
if (auto *ATy = dyn_cast<ArrayType>(this))
return ATy->getElementType()->isSized(Visited);
if (const VectorType *VTy = dyn_cast<VectorType>(this))
if (auto *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType()->isSized(Visited);
return cast<StructType>(this)->isSized(Visited);
@ -556,13 +554,13 @@ StructType *StructType::create(StringRef Name, Type *type, ...) {
return Ret;
}
bool StructType::isSized(SmallPtrSetImpl<const Type*> *Visited) const {
bool StructType::isSized(SmallPtrSetImpl<Type*> *Visited) const {
if ((getSubclassData() & SCDB_IsSized) != 0)
return true;
if (isOpaque())
return false;
if (Visited && !Visited->insert(this).second)
if (Visited && !Visited->insert(const_cast<StructType*>(this)).second)
return false;
// Okay, our struct is sized if all of the elements are, but if one of the
@ -647,7 +645,7 @@ Type *CompositeType::getTypeAtIndex(unsigned Idx) {
return cast<SequentialType>(this)->getElementType();
}
bool CompositeType::indexValid(const Value *V) const {
if (const StructType *STy = dyn_cast<StructType>(this)) {
if (auto *STy = dyn_cast<StructType>(this)) {
// Structure indexes require (vectors of) 32-bit integer constants. In the
// vector case all of the indices must be equal.
if (!V->getType()->getScalarType()->isIntegerTy(32))
@ -664,7 +662,7 @@ bool CompositeType::indexValid(const Value *V) const {
}
bool CompositeType::indexValid(unsigned Idx) const {
if (const StructType *STy = dyn_cast<StructType>(this))
if (auto *STy = dyn_cast<StructType>(this))
return Idx < STy->getNumElements();
// Sequential types can be indexed by any integer.
return true;

View File

@ -1365,7 +1365,7 @@ void Verifier::VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
V);
if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
SmallPtrSet<const Type*, 4> Visited;
SmallPtrSet<Type*, 4> Visited;
if (!PTy->getElementType()->isSized(&Visited)) {
Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
!Attrs.hasAttribute(Idx, Attribute::InAlloca),
@ -1554,7 +1554,7 @@ void Verifier::VerifyStatepoint(ImmutableCallSite CS) {
&CI);
const Value *Target = CS.getArgument(2);
const PointerType *PT = dyn_cast<PointerType>(Target->getType());
auto *PT = dyn_cast<PointerType>(Target->getType());
Assert(PT && PT->getElementType()->isFunctionTy(),
"gc.statepoint callee must be of function pointer type", &CI, Target);
FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
@ -2674,7 +2674,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
}
void Verifier::visitAllocaInst(AllocaInst &AI) {
SmallPtrSet<const Type*, 4> Visited;
SmallPtrSet<Type*, 4> Visited;
PointerType *PTy = AI.getType();
Assert(PTy->getAddressSpace() == 0,
"Allocation instruction pointer not in the generic address space!",
@ -3469,9 +3469,8 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// Assert that result type matches wrapped callee.
const Value *Target = StatepointCS.getArgument(2);
const PointerType *PT = cast<PointerType>(Target->getType());
const FunctionType *TargetFuncType =
cast<FunctionType>(PT->getElementType());
auto *PT = cast<PointerType>(Target->getType());
auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
Assert(CS.getType() == TargetFuncType->getReturnType(),
"gc.result result type does not match wrapped callee", CS);
break;

View File

@ -523,7 +523,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
U = C;
}
if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
if (Ty->getAddressSpace() > 255)
// Fast instruction selection doesn't support the special
// address spaces.

View File

@ -54,7 +54,7 @@ bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
const FunctionType *FTy = F.getFunctionType();
FunctionType *FTy = F.getFunctionType();
LocalMemAvailable = ST.getLocalMemorySize();
@ -63,7 +63,7 @@ bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
// possible these arguments require the entire local memory space, so
// we cannot use local memory in the pass.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
const Type *ParamTy = FTy->getParamType(i);
Type *ParamTy = FTy->getParamType(i);
if (ParamTy->isPointerTy() &&
ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
LocalMemAvailable = 0;
@ -101,7 +101,7 @@ bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
return false;
}
static VectorType *arrayTypeToVecType(const Type *ArrayTy) {
static VectorType *arrayTypeToVecType(Type *ArrayTy) {
return VectorType::get(ArrayTy->getArrayElementType(),
ArrayTy->getArrayNumElements());
}

View File

@ -617,7 +617,7 @@ SDValue SITargetLowering::LowerFormalArguments(
Offset, Ins[i].Flags.isSExt());
Chains.push_back(Arg.getValue(1));
const PointerType *ParamTy =
auto *ParamTy =
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {

View File

@ -11797,14 +11797,14 @@ enum HABaseType {
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
uint64_t &Members) {
if (const StructType *ST = dyn_cast<StructType>(Ty)) {
if (auto *ST = dyn_cast<StructType>(Ty)) {
for (unsigned i = 0; i < ST->getNumElements(); ++i) {
uint64_t SubMembers = 0;
if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
return false;
Members += SubMembers;
}
} else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
} else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t SubMembers = 0;
if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
return false;
@ -11819,7 +11819,7 @@ static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
return false;
Members = 1;
Base = HA_DOUBLE;
} else if (const VectorType *VT = dyn_cast<VectorType>(Ty)) {
} else if (auto *VT = dyn_cast<VectorType>(Ty)) {
Members = 1;
switch (Base) {
case HA_FLOAT:

View File

@ -182,7 +182,7 @@ static bool needsFPReturnHelper(Function &F) {
return whichFPReturnVariant(RetType) != NoFPRet;
}
static bool needsFPReturnHelper(const FunctionType &FT) {
static bool needsFPReturnHelper(FunctionType &FT) {
Type* RetType = FT.getReturnType();
return whichFPReturnVariant(RetType) != NoFPRet;
}
@ -419,11 +419,11 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
CallInst::Create(F, Params, "", &Inst );
} else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
const Value* V = CI->getCalledValue();
const Type* T = nullptr;
Type* T = nullptr;
if (V) T = V->getType();
const PointerType *PFT=nullptr;
PointerType *PFT = nullptr;
if (T) PFT = dyn_cast<PointerType>(T);
const FunctionType *FT=nullptr;
FunctionType *FT = nullptr;
if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
Function *F_ = CI->getCalledFunction();
if (FT && needsFPReturnHelper(*FT) &&

View File

@ -44,7 +44,7 @@ static bool isF128SoftLibCall(const char *CallSym) {
/// This function returns true if Ty is fp128, {f128} or i128 which was
/// originally a fp128.
static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
static bool originalTypeIsF128(Type *Ty, const SDNode *CallNode) {
if (Ty->isFP128Ty())
return true;

View File

@ -355,7 +355,7 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
if (isABI) {
if (Ty->isFloatingPointTy() || Ty->isIntegerTy()) {
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
size = ITy->getBitWidth();
if (size < 32)
size = 32;
@ -680,7 +680,7 @@ static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
if (!gv->hasInternalLinkage())
return false;
const PointerType *Pty = gv->getType();
PointerType *Pty = gv->getType();
if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
return false;
@ -1030,7 +1030,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
if (GVar->hasExternalLinkage()) {
@ -1297,7 +1297,7 @@ void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
}
std::string
NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
NVPTXAsmPrinter::getPTXFundamentalTypeStr(Type *Ty, bool useB4PTR) const {
switch (Ty->getTypeID()) {
default:
llvm_unreachable("unexpected type");
@ -1340,7 +1340,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
O << ".";
@ -1387,11 +1387,11 @@ static unsigned int getOpenCLAlignment(const DataLayout &DL, Type *Ty) {
if (Ty->isSingleValueType())
return DL.getPrefTypeAlignment(Ty);
const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
auto *ATy = dyn_cast<ArrayType>(Ty);
if (ATy)
return getOpenCLAlignment(DL, ATy->getElementType());
const StructType *STy = dyn_cast<StructType>(Ty);
auto *STy = dyn_cast<StructType>(Ty);
if (STy) {
unsigned int alignStruct = 1;
// Go through each element of the struct and find the
@ -1405,7 +1405,7 @@ static unsigned int getOpenCLAlignment(const DataLayout &DL, Type *Ty) {
return alignStruct;
}
const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
auto *FTy = dyn_cast<FunctionType>(Ty);
if (FTy)
return DL.getPointerPrefAlignment();
return DL.getPrefTypeAlignment(Ty);
@ -1493,7 +1493,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
continue;
}
// Just a scalar
const PointerType *PTy = dyn_cast<PointerType>(Ty);
auto *PTy = dyn_cast<PointerType>(Ty);
if (isKernelFunc) {
if (PTy) {
// Special handling for pointer arguments to kernel
@ -1554,7 +1554,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
}
// param has byVal attribute. So should be a pointer
const PointerType *PTy = dyn_cast<PointerType>(Ty);
auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
@ -1798,7 +1798,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
switch (CPV->getType()->getTypeID()) {
case Type::IntegerTyID: {
const Type *ETy = CPV->getType();
Type *ETy = CPV->getType();
if (ETy == Type::getInt8Ty(CPV->getContext())) {
unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue();
ConvertIntToBytes<>(ptr, c);
@ -1858,7 +1858,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
case Type::FloatTyID:
case Type::DoubleTyID: {
const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
const Type *Ty = CFP->getType();
Type *Ty = CFP->getType();
if (Ty == Type::getFloatTy(CPV->getContext())) {
float float32 = (float) CFP->getValueAPF().convertToFloat();
ConvertFloatToBytes(ptr, float32);
@ -1949,9 +1949,9 @@ void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
// buildTypeNameMap - Run through symbol table looking for type names.
//
bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
bool NVPTXAsmPrinter::isImageType(Type *Ty) {
std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
std::map<Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
return PI != TypeNameMap.end() && (!PI->second.compare("struct._image1d_t") ||
!PI->second.compare("struct._image2d_t") ||

View File

@ -233,7 +233,7 @@ private:
void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O);
void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF);
void emitFunctionTempData(const MachineFunction &MF, unsigned &FrameSize);
bool isImageType(const Type *Ty);
bool isImageType(Type *Ty);
void printReturnValStr(const Function *, raw_ostream &O);
void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
@ -271,7 +271,7 @@ private:
// Build the map between type name and ID based on module's type
// symbol table.
std::map<const Type *, std::string> TypeNameMap;
std::map<Type *, std::string> TypeNameMap;
// List of variables demoted to a function scope.
std::map<const Function *, std::vector<const GlobalVariable *> > localDecls;
@ -282,7 +282,7 @@ private:
void emitPTXGlobalVariable(const GlobalVariable *GVar, raw_ostream &O);
void emitPTXAddressSpace(unsigned int AddressSpace, raw_ostream &O) const;
std::string getPTXFundamentalTypeStr(const Type *Ty, bool = true) const;
std::string getPTXFundamentalTypeStr(Type *Ty, bool = true) const;
void printScalarConstant(const Constant *CPV, raw_ostream &O);
void printFPConstant(const ConstantFP *Fp, raw_ostream &O);
void bufferLEByte(const Constant *CPV, int Bytes, AggBuffer *aggBuffer);

View File

@ -531,7 +531,7 @@ static unsigned int getCodeAddrSpace(MemSDNode *N) {
if (!Src)
return NVPTX::PTXLdStInstCode::GENERIC;
if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) {
if (auto *PT = dyn_cast<PointerType>(Src->getType())) {
switch (PT->getAddressSpace()) {
case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL;
case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL;
@ -5075,7 +5075,7 @@ bool NVPTXDAGToDAGISel::ChkMemSDNodeAddressSpace(SDNode *N,
}
if (!Src)
return false;
if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
if (auto *PT = dyn_cast<PointerType>(Src->getType()))
return (PT->getAddressSpace() == spN);
return false;
}

View File

@ -910,7 +910,7 @@ std::string NVPTXTargetLowering::getPrototype(
O << "(";
if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
size = ITy->getBitWidth();
if (size < 32)
size = 32;
@ -981,7 +981,7 @@ std::string NVPTXTargetLowering::getPrototype(
O << "_";
continue;
}
const PointerType *PTy = dyn_cast<PointerType>(Ty);
auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
@ -1318,7 +1318,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// struct or vector
SmallVector<EVT, 16> vtparts;
SmallVector<uint64_t, 16> Offsets;
const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
assert(PTy && "Type of a byval parameter should be pointer");
ComputePTXValueVTs(*this, DAG.getDataLayout(), PTy->getElementType(),
vtparts, &Offsets, 0);
@ -2040,8 +2040,8 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
"struct._image3d_t",
"struct._sampler_t" };
const Type *Ty = arg->getType();
const PointerType *PTy = dyn_cast<PointerType>(Ty);
Type *Ty = arg->getType();
auto *PTy = dyn_cast<PointerType>(Ty);
if (!PTy)
return false;
@ -2049,7 +2049,7 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
if (!context)
return false;
const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
auto *STy = dyn_cast<StructType>(PTy->getElementType());
const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)

View File

@ -17971,7 +17971,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
@ -17996,7 +17996,7 @@ bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
TargetLoweringBase::AtomicRMWExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
const Type *MemType = AI->getType();
Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
// and default to library calls otherwise.
@ -18042,7 +18042,7 @@ static bool hasMFENCE(const X86Subtarget& Subtarget) {
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
const Type *MemType = AI->getType();
Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
// harmful as it introduces a mfence.

View File

@ -1053,7 +1053,7 @@ namespace llvm {
LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
bool needsCmpXchgNb(const Type *MemType) const;
bool needsCmpXchgNb(Type *MemType) const;
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to

View File

@ -500,9 +500,9 @@ int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) {
unsigned TyLWidth = 0;
unsigned TyRWidth = 0;
if (const VectorType *VecTyL = dyn_cast<VectorType>(TyL))
if (auto *VecTyL = dyn_cast<VectorType>(TyL))
TyLWidth = VecTyL->getBitWidth();
if (const VectorType *VecTyR = dyn_cast<VectorType>(TyR))
if (auto *VecTyR = dyn_cast<VectorType>(TyR))
TyRWidth = VecTyR->getBitWidth();
if (TyLWidth != TyRWidth)

View File

@ -1410,7 +1410,7 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
const std::string ExpStr = Exp ? "exp_" : "";
const std::string SuffixStr = CompileKernel ? "N" : "_n";
const std::string EndingStr = CompileKernel ? "_noabort" : "";
const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
// TODO(glider): for KASan builds add _noabort to error reporting
// functions and make them actually noabort (remove the UnreachableInst).
AsanErrorCallbackSized[AccessIsWrite][Exp] =

View File

@ -197,8 +197,8 @@ static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
// TODO: Once we can get to the GCStrategy, this becomes
// Optional<bool> isGCManagedPointer(const Value *V) const override {
static bool isGCPointerType(const Type *T) {
if (const PointerType *PT = dyn_cast<PointerType>(T))
static bool isGCPointerType(Type *T) {
if (auto *PT = dyn_cast<PointerType>(T))
// For the sake of this example GC, we arbitrarily pick addrspace(1) as our
// GC managed heap. We know that a pointer into this heap needs to be
// updated and that no other pointer does.

View File

@ -3664,7 +3664,7 @@ namespace {
/// Return true if a table with TableSize elements of
/// type ElementType would fit in a target-legal register.
static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize,
const Type *ElementType);
Type *ElementType);
private:
// Depending on the contents of the table, it can be represented in
@ -3880,8 +3880,8 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL,
uint64_t TableSize,
const Type *ElementType) {
const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
Type *ElementType) {
auto *IT = dyn_cast<IntegerType>(ElementType);
if (!IT)
return false;
// FIXME: If the type is wider than it needs to be, e.g. i8 but all values