mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-27 16:17:17 +00:00
[LoopAccesses] LAA::getInfo to use const reference for stride parameter
And other required const-correctness fixes to make this work. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230289 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -69,14 +69,15 @@ Value *llvm::stripIntegerCast(Value *V) {
|
||||
}
|
||||
|
||||
const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
|
||||
ValueToValueMap &PtrToStride,
|
||||
const ValueToValueMap &PtrToStride,
|
||||
Value *Ptr, Value *OrigPtr) {
|
||||
|
||||
const SCEV *OrigSCEV = SE->getSCEV(Ptr);
|
||||
|
||||
// If there is an entry in the map return the SCEV of the pointer with the
|
||||
// symbolic stride replaced by one.
|
||||
ValueToValueMap::iterator SI = PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
|
||||
ValueToValueMap::const_iterator SI =
|
||||
PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
|
||||
if (SI != PtrToStride.end()) {
|
||||
Value *StrideVal = SI->second;
|
||||
|
||||
@@ -99,11 +100,9 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
|
||||
return SE->getSCEV(Ptr);
|
||||
}
|
||||
|
||||
void LoopAccessInfo::RuntimePointerCheck::insert(ScalarEvolution *SE, Loop *Lp,
|
||||
Value *Ptr, bool WritePtr,
|
||||
unsigned DepSetId,
|
||||
unsigned ASId,
|
||||
ValueToValueMap &Strides) {
|
||||
void LoopAccessInfo::RuntimePointerCheck::insert(
|
||||
ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
|
||||
unsigned ASId, const ValueToValueMap &Strides) {
|
||||
// Get the stride replaced scev.
|
||||
const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
|
||||
@@ -188,9 +187,8 @@ public:
|
||||
/// \brief Check whether we can check the pointers at runtime for
|
||||
/// non-intersection.
|
||||
bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
|
||||
unsigned &NumComparisons,
|
||||
ScalarEvolution *SE, Loop *TheLoop,
|
||||
ValueToValueMap &Strides,
|
||||
unsigned &NumComparisons, ScalarEvolution *SE,
|
||||
Loop *TheLoop, const ValueToValueMap &Strides,
|
||||
bool ShouldCheckStride = false);
|
||||
|
||||
/// \brief Goes over all memory accesses, checks whether a RT check is needed
|
||||
@@ -239,8 +237,8 @@ private:
|
||||
} // end anonymous namespace
|
||||
|
||||
/// \brief Check whether a pointer can participate in a runtime bounds check.
|
||||
static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
|
||||
Value *Ptr) {
|
||||
static bool hasComputableBounds(ScalarEvolution *SE,
|
||||
const ValueToValueMap &Strides, Value *Ptr) {
|
||||
const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
|
||||
if (!AR)
|
||||
@@ -252,12 +250,12 @@ static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides,
|
||||
/// \brief Check the stride of the pointer and ensure that it does not wrap in
|
||||
/// the address space.
|
||||
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
|
||||
const Loop *Lp, ValueToValueMap &StridesMap);
|
||||
const Loop *Lp, const ValueToValueMap &StridesMap);
|
||||
|
||||
bool AccessAnalysis::canCheckPtrAtRT(
|
||||
LoopAccessInfo::RuntimePointerCheck &RtCheck,
|
||||
unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop,
|
||||
ValueToValueMap &StridesMap, bool ShouldCheckStride) {
|
||||
LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
|
||||
ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap,
|
||||
bool ShouldCheckStride) {
|
||||
// Find pointers with computable bounds. We are going to use this information
|
||||
// to place a runtime bound check.
|
||||
bool CanDoRT = true;
|
||||
@@ -523,7 +521,7 @@ public:
|
||||
///
|
||||
/// Only checks sets with elements in \p CheckDeps.
|
||||
bool areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
|
||||
MemAccessInfoSet &CheckDeps, ValueToValueMap &Strides);
|
||||
MemAccessInfoSet &CheckDeps, const ValueToValueMap &Strides);
|
||||
|
||||
/// \brief The maximum number of bytes of a vector register we can vectorize
|
||||
/// the accesses safely with.
|
||||
@@ -568,7 +566,7 @@ private:
|
||||
/// Otherwise, this function returns true signaling a possible dependence.
|
||||
bool isDependent(const MemAccessInfo &A, unsigned AIdx,
|
||||
const MemAccessInfo &B, unsigned BIdx,
|
||||
ValueToValueMap &Strides);
|
||||
const ValueToValueMap &Strides);
|
||||
|
||||
/// \brief Check whether the data dependence could prevent store-load
|
||||
/// forwarding.
|
||||
@@ -585,7 +583,7 @@ static bool isInBoundsGep(Value *Ptr) {
|
||||
|
||||
/// \brief Check whether the access through \p Ptr has a constant stride.
|
||||
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
|
||||
const Loop *Lp, ValueToValueMap &StridesMap) {
|
||||
const Loop *Lp, const ValueToValueMap &StridesMap) {
|
||||
const Type *Ty = Ptr->getType();
|
||||
assert(Ty->isPointerTy() && "Unexpected non-ptr");
|
||||
|
||||
@@ -705,7 +703,7 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
|
||||
|
||||
bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
|
||||
const MemAccessInfo &B, unsigned BIdx,
|
||||
ValueToValueMap &Strides) {
|
||||
const ValueToValueMap &Strides) {
|
||||
assert (AIdx < BIdx && "Must pass arguments in program order");
|
||||
|
||||
Value *APtr = A.getPointer();
|
||||
@@ -835,7 +833,7 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
|
||||
|
||||
bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
|
||||
MemAccessInfoSet &CheckDeps,
|
||||
ValueToValueMap &Strides) {
|
||||
const ValueToValueMap &Strides) {
|
||||
|
||||
MaxSafeDepDistBytes = -1U;
|
||||
while (!CheckDeps.empty()) {
|
||||
@@ -921,7 +919,7 @@ bool LoopAccessInfo::canAnalyzeLoop() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void LoopAccessInfo::analyzeLoop(ValueToValueMap &Strides) {
|
||||
void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
|
||||
|
||||
typedef SmallVector<Value*, 16> ValueVector;
|
||||
typedef SmallPtrSet<Value*, 16> ValueSet;
|
||||
@@ -1301,7 +1299,8 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
|
||||
LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
|
||||
const DataLayout *DL,
|
||||
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
|
||||
DominatorTree *DT, ValueToValueMap &Strides)
|
||||
DominatorTree *DT,
|
||||
const ValueToValueMap &Strides)
|
||||
: TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), NumLoads(0),
|
||||
NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false) {
|
||||
if (canAnalyzeLoop())
|
||||
@@ -1326,8 +1325,8 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
|
||||
OS << "\n";
|
||||
}
|
||||
|
||||
const LoopAccessInfo &LoopAccessAnalysis::getInfo(Loop *L,
|
||||
ValueToValueMap &Strides) {
|
||||
const LoopAccessInfo &
|
||||
LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
|
||||
auto &LAI = LoopAccessInfoMap[L];
|
||||
|
||||
#ifndef NDEBUG
|
||||
|
Reference in New Issue
Block a user