mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-18 13:34:04 +00:00
[LoopAccesses] Rename LoopAccessAnalysis to LoopAccessInfo
LoopAccessAnalysis will be used as the name of the pass. This is part of the patchset that converts LoopAccessAnalysis into an actual analysis pass. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@229621 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
d7aa78535c
commit
38a9ebb065
@ -74,7 +74,7 @@ public:
|
||||
/// generates run-time checks to prove independence. This is done by
|
||||
/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
|
||||
/// RuntimePointerCheck class.
|
||||
class LoopAccessAnalysis {
|
||||
class LoopAccessInfo {
|
||||
public:
|
||||
/// \brief Collection of parameters used from the vectorizer.
|
||||
struct VectorizerParams {
|
||||
@ -137,10 +137,10 @@ public:
|
||||
SmallVector<unsigned, 2> AliasSetId;
|
||||
};
|
||||
|
||||
LoopAccessAnalysis(Function *F, Loop *L, ScalarEvolution *SE,
|
||||
const DataLayout *DL, const TargetLibraryInfo *TLI,
|
||||
AliasAnalysis *AA, DominatorTree *DT,
|
||||
const VectorizerParams &VectParams) :
|
||||
LoopAccessInfo(Function *F, Loop *L, ScalarEvolution *SE,
|
||||
const DataLayout *DL, const TargetLibraryInfo *TLI,
|
||||
AliasAnalysis *AA, DominatorTree *DT,
|
||||
const VectorizerParams &VectParams) :
|
||||
TheFunction(F), TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT),
|
||||
NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1U),
|
||||
VectParams(VectParams) {}
|
||||
|
@ -73,12 +73,11 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
|
||||
return SE->getSCEV(Ptr);
|
||||
}
|
||||
|
||||
void LoopAccessAnalysis::RuntimePointerCheck::insert(ScalarEvolution *SE,
|
||||
Loop *Lp, Value *Ptr,
|
||||
bool WritePtr,
|
||||
unsigned DepSetId,
|
||||
unsigned ASId,
|
||||
ValueToValueMap &Strides) {
|
||||
void LoopAccessInfo::RuntimePointerCheck::insert(ScalarEvolution *SE, Loop *Lp,
|
||||
Value *Ptr, bool WritePtr,
|
||||
unsigned DepSetId,
|
||||
unsigned ASId,
|
||||
ValueToValueMap &Strides) {
|
||||
// Get the stride replaced scev.
|
||||
const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
|
||||
@ -128,7 +127,7 @@ public:
|
||||
|
||||
/// \brief Check whether we can check the pointers at runtime for
|
||||
/// non-intersection.
|
||||
bool canCheckPtrAtRT(LoopAccessAnalysis::RuntimePointerCheck &RtCheck,
|
||||
bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
|
||||
unsigned &NumComparisons,
|
||||
ScalarEvolution *SE, Loop *TheLoop,
|
||||
ValueToValueMap &Strides,
|
||||
@ -196,7 +195,7 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
|
||||
const Loop *Lp, ValueToValueMap &StridesMap);
|
||||
|
||||
bool AccessAnalysis::canCheckPtrAtRT(
|
||||
LoopAccessAnalysis::RuntimePointerCheck &RtCheck,
|
||||
LoopAccessInfo::RuntimePointerCheck &RtCheck,
|
||||
unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop,
|
||||
ValueToValueMap &StridesMap, bool ShouldCheckStride) {
|
||||
// Find pointers with computable bounds. We are going to use this information
|
||||
@ -439,7 +438,7 @@ public:
|
||||
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
|
||||
|
||||
MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L,
|
||||
const LoopAccessAnalysis::VectorizerParams &VectParams)
|
||||
const LoopAccessInfo::VectorizerParams &VectParams)
|
||||
: SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
|
||||
ShouldRetryWithRuntimeCheck(false), VectParams(VectParams) {}
|
||||
|
||||
@ -497,7 +496,7 @@ private:
|
||||
bool ShouldRetryWithRuntimeCheck;
|
||||
|
||||
/// \brief Vectorizer parameters used by the analysis.
|
||||
LoopAccessAnalysis::VectorizerParams VectParams;
|
||||
LoopAccessInfo::VectorizerParams VectParams;
|
||||
|
||||
/// \brief Check whether there is a plausible dependence between the two
|
||||
/// accesses.
|
||||
@ -815,7 +814,7 @@ bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LoopAccessAnalysis::canVectorizeMemory(ValueToValueMap &Strides) {
|
||||
bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
|
||||
|
||||
typedef SmallVector<Value*, 16> ValueVector;
|
||||
typedef SmallPtrSet<Value*, 16> ValueSet;
|
||||
@ -1069,7 +1068,7 @@ bool LoopAccessAnalysis::canVectorizeMemory(ValueToValueMap &Strides) {
|
||||
return CanVecMem;
|
||||
}
|
||||
|
||||
bool LoopAccessAnalysis::blockNeedsPredication(BasicBlock *BB) {
|
||||
bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB) {
|
||||
assert(TheLoop->contains(BB) && "Unknown block used");
|
||||
|
||||
// Blocks that do not dominate the latch need predication.
|
||||
@ -1077,11 +1076,11 @@ bool LoopAccessAnalysis::blockNeedsPredication(BasicBlock *BB) {
|
||||
return !DT->dominates(BB, Latch);
|
||||
}
|
||||
|
||||
void LoopAccessAnalysis::emitAnalysis(VectorizationReport &Message) {
|
||||
void LoopAccessInfo::emitAnalysis(VectorizationReport &Message) {
|
||||
VectorizationReport::emitAnalysis(Message, TheFunction, TheLoop);
|
||||
}
|
||||
|
||||
bool LoopAccessAnalysis::isUniform(Value *V) {
|
||||
bool LoopAccessInfo::isUniform(Value *V) {
|
||||
return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
|
||||
}
|
||||
|
||||
@ -1097,7 +1096,7 @@ static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
|
||||
}
|
||||
|
||||
std::pair<Instruction *, Instruction *>
|
||||
LoopAccessAnalysis::addRuntimeCheck(Instruction *Loc) {
|
||||
LoopAccessInfo::addRuntimeCheck(Instruction *Loc) {
|
||||
Instruction *tnullptr = nullptr;
|
||||
if (!PtrRtCheck.Need)
|
||||
return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
|
||||
|
@ -551,8 +551,8 @@ public:
|
||||
: NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
|
||||
TLI(TLI), TheFunction(F), TTI(TTI), Induction(nullptr),
|
||||
WidestIndTy(nullptr),
|
||||
LAA(F, L, SE, DL, TLI, AA, DT,
|
||||
LoopAccessAnalysis::VectorizerParams(
|
||||
LAI(F, L, SE, DL, TLI, AA, DT,
|
||||
LoopAccessInfo::VectorizerParams(
|
||||
MaxVectorWidth, VectorizationFactor, VectorizationInterleave,
|
||||
RuntimeMemoryCheckThreshold)),
|
||||
HasFunNoNaNAttr(false) {}
|
||||
@ -740,19 +740,19 @@ public:
|
||||
bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); }
|
||||
|
||||
/// Returns the information that we collected about runtime memory check.
|
||||
LoopAccessAnalysis::RuntimePointerCheck *getRuntimePointerCheck() {
|
||||
return LAA.getRuntimePointerCheck();
|
||||
LoopAccessInfo::RuntimePointerCheck *getRuntimePointerCheck() {
|
||||
return LAI.getRuntimePointerCheck();
|
||||
}
|
||||
|
||||
LoopAccessAnalysis *getLAA() {
|
||||
return &LAA;
|
||||
LoopAccessInfo *getLAI() {
|
||||
return &LAI;
|
||||
}
|
||||
|
||||
/// This function returns the identity element (or neutral element) for
|
||||
/// the operation K.
|
||||
static Constant *getReductionIdentity(ReductionKind K, Type *Tp);
|
||||
|
||||
unsigned getMaxSafeDepDistBytes() { return LAA.getMaxSafeDepDistBytes(); }
|
||||
unsigned getMaxSafeDepDistBytes() { return LAI.getMaxSafeDepDistBytes(); }
|
||||
|
||||
bool hasStride(Value *V) { return StrideSet.count(V); }
|
||||
bool mustCheckStrides() { return !StrideSet.empty(); }
|
||||
@ -777,10 +777,10 @@ public:
|
||||
return (MaskedOp.count(I) != 0);
|
||||
}
|
||||
unsigned getNumStores() const {
|
||||
return LAA.getNumStores();
|
||||
return LAI.getNumStores();
|
||||
}
|
||||
unsigned getNumLoads() const {
|
||||
return LAA.getNumLoads();
|
||||
return LAI.getNumLoads();
|
||||
}
|
||||
unsigned getNumPredStores() const {
|
||||
return NumPredStores;
|
||||
@ -874,7 +874,7 @@ private:
|
||||
/// This set holds the variables which are known to be uniform after
|
||||
/// vectorization.
|
||||
SmallPtrSet<Instruction*, 4> Uniforms;
|
||||
LoopAccessAnalysis LAA;
|
||||
LoopAccessInfo LAI;
|
||||
/// Can we assume the absence of NaNs.
|
||||
bool HasFunNoNaNAttr;
|
||||
|
||||
@ -1658,7 +1658,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
||||
}
|
||||
|
||||
bool LoopVectorizationLegality::isUniform(Value *V) {
|
||||
return LAA.isUniform(V);
|
||||
return LAI.isUniform(V);
|
||||
}
|
||||
|
||||
InnerLoopVectorizer::VectorParts&
|
||||
@ -2230,7 +2230,7 @@ void InnerLoopVectorizer::createEmptyLoop() {
|
||||
// faster.
|
||||
Instruction *MemRuntimeCheck;
|
||||
std::tie(FirstCheckInst, MemRuntimeCheck) =
|
||||
Legal->getLAA()->addRuntimeCheck(LastBypassBlock->getTerminator());
|
||||
Legal->getLAI()->addRuntimeCheck(LastBypassBlock->getTerminator());
|
||||
if (MemRuntimeCheck) {
|
||||
// Create a new block containing the memory check.
|
||||
BasicBlock *CheckBlock =
|
||||
@ -3398,7 +3398,7 @@ bool LoopVectorizationLegality::canVectorize() {
|
||||
collectLoopUniforms();
|
||||
|
||||
DEBUG(dbgs() << "LV: We can vectorize this loop" <<
|
||||
(LAA.getRuntimePointerCheck()->Need ? " (with a runtime bound check)" :
|
||||
(LAI.getRuntimePointerCheck()->Need ? " (with a runtime bound check)" :
|
||||
"")
|
||||
<<"!\n");
|
||||
|
||||
@ -3823,7 +3823,7 @@ void LoopVectorizationLegality::collectLoopUniforms() {
|
||||
}
|
||||
|
||||
bool LoopVectorizationLegality::canVectorizeMemory() {
|
||||
return LAA.canVectorizeMemory(Strides);
|
||||
return LAI.canVectorizeMemory(Strides);
|
||||
}
|
||||
|
||||
static bool hasMultipleUsesOf(Instruction *I,
|
||||
@ -4167,7 +4167,7 @@ bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
|
||||
}
|
||||
|
||||
bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
|
||||
return LAA.blockNeedsPredication(BB);
|
||||
return LAI.blockNeedsPredication(BB);
|
||||
}
|
||||
|
||||
bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
|
||||
|
Loading…
x
Reference in New Issue
Block a user