Make some DataLayout pointers const.

No functionality change. Just reduces the noise of an upcoming patch.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202087 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Rafael Espindola 2014-02-24 23:12:18 +00:00
parent 70b3b7d06f
commit ec89b9fb9e
24 changed files with 57 additions and 55 deletions

View File

@ -122,7 +122,7 @@ class IVUsers : public LoopPass {
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
SmallPtrSet<Instruction*,16> Processed;
/// IVUses - A list of all tracked IV uses of induction variable expressions

View File

@ -26,7 +26,7 @@ namespace llvm {
/// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
/// information.
class LazyValueInfo : public FunctionPass {
class DataLayout *DL;
const DataLayout *DL;
class TargetLibraryInfo *TLI;
void *PImpl;
LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;

View File

@ -323,7 +323,7 @@ namespace llvm {
/// Current AA implementation, just a cache.
AliasAnalysis *AA;
DataLayout *DL;
const DataLayout *DL;
DominatorTree *DT;
OwningPtr<PredIteratorCache> PredCache;
public:

View File

@ -227,7 +227,7 @@ namespace llvm {
/// The DataLayout information for the target we are targeting.
///
DataLayout *DL;
const DataLayout *DL;
/// TLI - The target library information for the target we are targeting.
///

View File

@ -102,7 +102,7 @@ namespace {
Module *Mod;
AliasAnalysis *AA;
DominatorTree *DT;
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
std::string Messages;
@ -503,7 +503,7 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I);
}
static bool isZero(Value *V, DataLayout *DL) {
static bool isZero(Value *V, const DataLayout *DL) {
// Assume undef could be zero.
if (isa<UndefValue>(V))
return true;

View File

@ -104,7 +104,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
DataLayout *DL = &getAnalysis<DataLayout>();
const DataLayout *DL = &getAnalysis<DataLayout>();
LLVMContext &Context = F.getParent()->getContext();
//

View File

@ -109,7 +109,7 @@ namespace {
PPCTargetMachine *TM;
LoopInfo *LI;
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
DominatorTree *DT;
const TargetLibraryInfo *LibInfo;
};

View File

@ -84,7 +84,7 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
};
}
@ -266,7 +266,8 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
DataLayout *DL, TargetLibraryInfo *TLI) {
const DataLayout *DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
// we delete a constant array, we may also be holding pointer to one of its
@ -743,7 +744,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
DataLayout *DL,
const DataLayout *DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
@ -806,8 +807,8 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
static void ConstantPropUsersOf(Value *V,
DataLayout *DL, TargetLibraryInfo *TLI) {
static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
TargetLibraryInfo *TLI) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
@ -830,7 +831,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
DataLayout *DL,
const DataLayout *DL,
TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
@ -1278,7 +1279,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
Value *NElems, DataLayout *DL,
Value *NElems, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
@ -1470,7 +1471,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
DataLayout *DL,
const DataLayout *DL,
TargetLibraryInfo *TLI) {
if (!DL)
return false;
@ -1569,7 +1570,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
DataLayout *DL, TargetLibraryInfo *TLI) {
const DataLayout *DL,
TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();

View File

@ -108,12 +108,12 @@ public:
static const ComparableFunction TombstoneKey;
static DataLayout * const LookupOnly;
ComparableFunction(Function *Func, DataLayout *DL)
ComparableFunction(Function *Func, const DataLayout *DL)
: Func(Func), Hash(profileFunction(Func)), DL(DL) {}
Function *getFunc() const { return Func; }
unsigned getHash() const { return Hash; }
DataLayout *getDataLayout() const { return DL; }
const DataLayout *getDataLayout() const { return DL; }
// Drops AssertingVH reference to the function. Outside of debug mode, this
// does nothing.
@ -129,7 +129,7 @@ private:
AssertingVH<Function> Func;
unsigned Hash;
DataLayout *DL;
const DataLayout *DL;
};
const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);

View File

@ -81,7 +81,7 @@ public:
class LLVM_LIBRARY_VISIBILITY InstCombiner
: public FunctionPass,
public InstVisitor<InstCombiner, Instruction*> {
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
bool MadeIRChange;
LibCallSimplifier *Simplifier;
@ -108,7 +108,7 @@ public:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
DataLayout *getDataLayout() const { return DL; }
const DataLayout *getDataLayout() const { return DL; }
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }

View File

@ -235,7 +235,7 @@ isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
Type *DstTy, ///< The target type for the second cast instruction
DataLayout *DL ///< The target data for pointer size
const DataLayout *DL ///< The target data for pointer size
) {
Type *SrcTy = CI->getOperand(0)->getType(); // A from above

View File

@ -503,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
DataLayout &DL = *IC.getDataLayout();
const DataLayout &DL = *IC.getDataLayout();
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if

View File

@ -334,7 +334,7 @@ struct AddressSanitizer : public FunctionPass {
SmallString<64> BlacklistFile;
LLVMContext *C;
DataLayout *DL;
const DataLayout *DL;
int LongSize;
Type *IntptrTy;
ShadowMapping Mapping;
@ -383,7 +383,7 @@ class AddressSanitizerModule : public ModulePass {
SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
Type *IntptrTy;
LLVMContext *C;
DataLayout *DL;
const DataLayout *DL;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
Function *AsanUnpoisonGlobals;

View File

@ -164,7 +164,7 @@ class DataFlowSanitizer : public ModulePass {
WK_Custom
};
DataLayout *DL;
const DataLayout *DL;
Module *Mod;
LLVMContext *Ctx;
IntegerType *ShadowTy;

View File

@ -222,7 +222,7 @@ class MemorySanitizer : public FunctionPass {
/// \brief Track origins (allocation points) of uninitialized values.
bool TrackOrigins;
DataLayout *DL;
const DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;

View File

@ -96,7 +96,7 @@ struct ThreadSanitizer : public FunctionPass {
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
DataLayout *DL;
const DataLayout *DL;
Type *IntptrTy;
SmallString<64> BlacklistFile;
OwningPtr<SpecialCaseList> BL;

View File

@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {

View File

@ -71,7 +71,7 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts;

View File

@ -76,7 +76,7 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG

View File

@ -108,7 +108,7 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
DataLayout *DL; // DataLayout for constant folding.
const DataLayout *DL; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.

View File

@ -141,7 +141,7 @@ protected:
AliasAnalysis *AA;
LoopInfo *LI;
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
TargetLibraryInfo *TLI;
DominatorTree *DT;

View File

@ -87,7 +87,7 @@ namespace {
private:
bool HasDomTree;
DataLayout *DL;
const DataLayout *DL;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.

View File

@ -214,7 +214,7 @@ namespace {
AliasAnalysis *AA;
DominatorTree *DT;
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
const TargetTransformInfo *TTI;
// FIXME: const correct?

View File

@ -219,7 +219,7 @@ class LoopVectorizationCostModel;
class InnerLoopVectorizer {
public:
InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
DominatorTree *DT, DataLayout *DL,
DominatorTree *DT, const DataLayout *DL,
const TargetLibraryInfo *TLI, unsigned VecWidth,
unsigned UnrollFactor)
: OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), DL(DL), TLI(TLI),
@ -379,7 +379,7 @@ protected:
/// Dominator Tree.
DominatorTree *DT;
/// Data Layout.
DataLayout *DL;
const DataLayout *DL;
/// Target Library Info.
const TargetLibraryInfo *TLI;
@ -428,7 +428,7 @@ protected:
class InnerLoopUnroller : public InnerLoopVectorizer {
public:
InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
DominatorTree *DT, DataLayout *DL,
DominatorTree *DT, const DataLayout *DL,
const TargetLibraryInfo *TLI, unsigned UnrollFactor) :
InnerLoopVectorizer(OrigLoop, SE, LI, DT, DL, TLI, 1, UnrollFactor) { }
@ -487,7 +487,7 @@ public:
unsigned NumStores;
unsigned NumPredStores;
LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DataLayout *DL,
LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
DominatorTree *DT, TargetLibraryInfo *TLI)
: NumLoads(0), NumStores(0), NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
DT(DT), TLI(TLI), Induction(0), WidestIndTy(0), HasFunNoNaNAttr(false),
@ -725,7 +725,7 @@ private:
/// Scev analysis.
ScalarEvolution *SE;
/// DataLayout analysis.
DataLayout *DL;
const DataLayout *DL;
/// Dominators.
DominatorTree *DT;
/// Target Library Info.
@ -775,7 +775,7 @@ public:
LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI,
LoopVectorizationLegality *Legal,
const TargetTransformInfo &TTI,
DataLayout *DL, const TargetLibraryInfo *TLI)
const DataLayout *DL, const TargetLibraryInfo *TLI)
: TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), DL(DL), TLI(TLI) {}
/// Information about vectorization costs
@ -848,7 +848,7 @@ private:
/// Vector target information.
const TargetTransformInfo &TTI;
/// Target data layout information.
DataLayout *DL;
const DataLayout *DL;
/// Target Library Info.
const TargetLibraryInfo *TLI;
};
@ -1009,7 +1009,7 @@ struct LoopVectorize : public FunctionPass {
}
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
LoopInfo *LI;
TargetTransformInfo *TTI;
DominatorTree *DT;
@ -1283,7 +1283,7 @@ Value *InnerLoopVectorizer::getConsecutiveVector(Value* Val, int StartIdx,
/// \brief Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
static unsigned getGEPInductionOperand(DataLayout *DL,
static unsigned getGEPInductionOperand(const DataLayout *DL,
const GetElementPtrInst *Gep) {
unsigned LastOperand = Gep->getNumOperands() - 1;
unsigned GEPAllocSize = DL->getTypeAllocSize(
@ -3298,7 +3298,7 @@ bool LoopVectorizationLegality::canVectorize() {
return true;
}
static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) {
static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
if (Ty->isPointerTy())
return DL.getIntPtrType(Ty);
@ -3310,7 +3310,7 @@ static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) {
return Ty;
}
static Type* getWiderType(DataLayout &DL, Type *Ty0, Type *Ty1) {
static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
Ty0 = convertPointerToIntegerType(DL, Ty0);
Ty1 = convertPointerToIntegerType(DL, Ty1);
if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
@ -3508,7 +3508,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
///\brief Remove GEPs whose indices but the last one are loop invariant and
/// return the induction operand of the gep pointer.
static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE,
DataLayout *DL, Loop *Lp) {
const DataLayout *DL, Loop *Lp) {
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (!GEP)
return Ptr;
@ -3544,7 +3544,7 @@ static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
/// Looks for symbolic strides "a[i*stride]". Returns the symbolic stride as a
/// pointer to the Value, or null otherwise.
static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
DataLayout *DL, Loop *Lp) {
const DataLayout *DL, Loop *Lp) {
const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
if (!PtrTy || PtrTy->isAggregateType())
return 0;
@ -3679,7 +3679,7 @@ public:
/// \brief Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
AccessAnalysis(DataLayout *Dl, DepCandidates &DA) :
AccessAnalysis(const DataLayout *Dl, DepCandidates &DA) :
DL(Dl), DepCands(DA), AreAllWritesIdentified(true),
AreAllReadsIdentified(true), IsRTCheckNeeded(false) {}
@ -3745,7 +3745,7 @@ private:
/// Set of underlying objects already written to.
SmallPtrSet<Value*, 16> WriteObjects;
DataLayout *DL;
const DataLayout *DL;
/// Sets of potentially dependent accesses - members of one set share an
/// underlying pointer. The set "CheckDeps" identfies which sets really need a
@ -3772,7 +3772,7 @@ static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, ValueToValueMap &StridesMap);
bool AccessAnalysis::canCheckPtrAtRT(
@ -3992,7 +3992,7 @@ public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
MemoryDepChecker(ScalarEvolution *Se, DataLayout *Dl, const Loop *L)
MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L)
: SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
ShouldRetryWithRuntimeCheck(false) {}
@ -4030,7 +4030,7 @@ public:
private:
ScalarEvolution *SE;
DataLayout *DL;
const DataLayout *DL;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
@ -4079,7 +4079,7 @@ static bool isInBoundsGep(Value *Ptr) {
}
/// \brief Check whether the access through \p Ptr has a constant stride.
static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");