[LoopAccesses] LAA::getInfo to use const reference for stride parameter

And other required const-correctness fixes to make this work.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230289 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Adam Nemet 2015-02-24 00:41:59 +00:00
parent fbdee9f0c0
commit 989a0200c2
2 changed files with 30 additions and 30 deletions

View File

@ -120,7 +120,8 @@ public:
/// Insert a pointer and calculate the start and end SCEVs.
void insert(ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr,
unsigned DepSetId, unsigned ASId, ValueToValueMap &Strides);
unsigned DepSetId, unsigned ASId,
const ValueToValueMap &Strides);
/// \brief No run-time memory checking is necessary.
bool empty() const { return Pointers.empty(); }
@ -151,7 +152,7 @@ public:
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, ValueToValueMap &Strides);
DominatorTree *DT, const ValueToValueMap &Strides);
/// Return true we can analyze the memory accesses in the loop and there are
/// no memory dependence cycles.
@ -195,7 +196,7 @@ public:
private:
/// \brief Analyze the loop. Substitute symbolic strides using Strides.
void analyzeLoop(ValueToValueMap &Strides);
void analyzeLoop(const ValueToValueMap &Strides);
/// \brief Check if the structure of the loop allows it to be analyzed by this
/// pass.
@ -235,7 +236,7 @@ Value *stripIntegerCast(Value *V);
/// Ptr. \p PtrToStride provides the mapping between the pointer value and its
/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE,
ValueToValueMap &PtrToStride,
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr = nullptr);
/// \brief This analysis provides dependence information for the memory accesses
@ -263,7 +264,7 @@ public:
/// of symbolic strides, \p Strides provides the mapping (see
/// replaceSymbolicStrideSCEV). If there is no cached result available run
/// the analysis.
const LoopAccessInfo &getInfo(Loop *L, ValueToValueMap &Strides);
const LoopAccessInfo &getInfo(Loop *L, const ValueToValueMap &Strides);
void releaseMemory() override {
// Invalidate the cache when the pass is freed.

View File

@ -69,14 +69,15 @@ Value *llvm::stripIntegerCast(Value *V) {
}
const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
ValueToValueMap &PtrToStride,
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr) {
const SCEV *OrigSCEV = SE->getSCEV(Ptr);
// If there is an entry in the map return the SCEV of the pointer with the
// symbolic stride replaced by one.
ValueToValueMap::iterator SI = PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
ValueToValueMap::const_iterator SI =
PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
if (SI != PtrToStride.end()) {
Value *StrideVal = SI->second;
@ -99,11 +100,9 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
return SE->getSCEV(Ptr);
}
void LoopAccessInfo::RuntimePointerCheck::insert(ScalarEvolution *SE, Loop *Lp,
Value *Ptr, bool WritePtr,
unsigned DepSetId,
unsigned ASId,
ValueToValueMap &Strides) {
void LoopAccessInfo::RuntimePointerCheck::insert(
ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
unsigned ASId, const ValueToValueMap &Strides) {
// Get the stride replaced scev.
const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
@ -188,9 +187,8 @@ public:
/// \brief Check whether we can check the pointers at runtime for
/// non-intersection.
bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
unsigned &NumComparisons,
ScalarEvolution *SE, Loop *TheLoop,
ValueToValueMap &Strides,
unsigned &NumComparisons, ScalarEvolution *SE,
Loop *TheLoop, const ValueToValueMap &Strides,
bool ShouldCheckStride = false);
/// \brief Goes over all memory accesses, checks whether a RT check is needed
@ -239,8 +237,8 @@ private:
} // end anonymous namespace
/// \brief Check whether a pointer can participate in a runtime bounds check.
static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
Value *Ptr) {
static bool hasComputableBounds(ScalarEvolution *SE,
const ValueToValueMap &Strides, Value *Ptr) {
const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
if (!AR)
@ -252,12 +250,12 @@ static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, ValueToValueMap &StridesMap);
const Loop *Lp, const ValueToValueMap &StridesMap);
bool AccessAnalysis::canCheckPtrAtRT(
LoopAccessInfo::RuntimePointerCheck &RtCheck,
unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop,
ValueToValueMap &StridesMap, bool ShouldCheckStride) {
LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap,
bool ShouldCheckStride) {
// Find pointers with computable bounds. We are going to use this information
// to place a runtime bound check.
bool CanDoRT = true;
@ -523,7 +521,7 @@ public:
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
MemAccessInfoSet &CheckDeps, ValueToValueMap &Strides);
MemAccessInfoSet &CheckDeps, const ValueToValueMap &Strides);
/// \brief The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
@ -568,7 +566,7 @@ private:
/// Otherwise, this function returns true signaling a possible dependence.
bool isDependent(const MemAccessInfo &A, unsigned AIdx,
const MemAccessInfo &B, unsigned BIdx,
ValueToValueMap &Strides);
const ValueToValueMap &Strides);
/// \brief Check whether the data dependence could prevent store-load
/// forwarding.
@ -585,7 +583,7 @@ static bool isInBoundsGep(Value *Ptr) {
/// \brief Check whether the access through \p Ptr has a constant stride.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, ValueToValueMap &StridesMap) {
const Loop *Lp, const ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
@ -705,7 +703,7 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const MemAccessInfo &B, unsigned BIdx,
ValueToValueMap &Strides) {
const ValueToValueMap &Strides) {
assert (AIdx < BIdx && "Must pass arguments in program order");
Value *APtr = A.getPointer();
@ -835,7 +833,7 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
MemAccessInfoSet &CheckDeps,
ValueToValueMap &Strides) {
const ValueToValueMap &Strides) {
MaxSafeDepDistBytes = -1U;
while (!CheckDeps.empty()) {
@ -921,7 +919,7 @@ bool LoopAccessInfo::canAnalyzeLoop() {
return true;
}
void LoopAccessInfo::analyzeLoop(ValueToValueMap &Strides) {
void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
typedef SmallVector<Value*, 16> ValueVector;
typedef SmallPtrSet<Value*, 16> ValueSet;
@ -1301,7 +1299,8 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
const DataLayout *DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, ValueToValueMap &Strides)
DominatorTree *DT,
const ValueToValueMap &Strides)
: TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), NumLoads(0),
NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false) {
if (canAnalyzeLoop())
@ -1326,8 +1325,8 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
OS << "\n";
}
const LoopAccessInfo &LoopAccessAnalysis::getInfo(Loop *L,
ValueToValueMap &Strides) {
const LoopAccessInfo &
LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
auto &LAI = LoopAccessInfoMap[L];
#ifndef NDEBUG