From 0bba49cebc50c7bd4662a4807bcb3ee7f42cb470 Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Tue, 7 Jul 2009 17:06:11 +0000 Subject: [PATCH] Change all SCEV* to SCEV *. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74918 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/IVUsers.h | 16 +- include/llvm/Analysis/LoopVR.h | 4 +- include/llvm/Analysis/ScalarEvolution.h | 152 +++---- .../llvm/Analysis/ScalarEvolutionExpander.h | 8 +- .../Analysis/ScalarEvolutionExpressions.h | 99 +++-- lib/Analysis/IVUsers.cpp | 28 +- lib/Analysis/LoopVR.cpp | 14 +- lib/Analysis/ScalarEvolution.cpp | 412 +++++++++--------- lib/Analysis/ScalarEvolutionExpander.cpp | 58 +-- lib/Transforms/Scalar/IndVarSimplify.cpp | 24 +- lib/Transforms/Scalar/LoopDeletion.cpp | 2 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 206 ++++----- 12 files changed, 513 insertions(+), 510 deletions(-) diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index 40396e2fcaf..e4820f0053d 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -34,7 +34,7 @@ class IVUsersOfOneStride; class IVStrideUse : public CallbackVH, public ilist_node { public: IVStrideUse(IVUsersOfOneStride *parent, - const SCEV* offset, + const SCEV *offset, Instruction* U, Value *O) : CallbackVH(U), Parent(parent), Offset(offset), OperandValToReplace(O), @@ -58,10 +58,10 @@ public: /// getOffset - Return the offset to add to a theoeretical induction /// variable that starts at zero and counts up by the stride to compute /// the value for the use. This always has the same type as the stride. - const SCEV* getOffset() const { return Offset; } + const SCEV *getOffset() const { return Offset; } /// setOffset - Assign a new offset to this use. - void setOffset(const SCEV* Val) { + void setOffset(const SCEV *Val) { Offset = Val; } @@ -96,7 +96,7 @@ private: IVUsersOfOneStride *Parent; /// Offset - The offset to add to the base induction expression. - const SCEV* Offset; + const SCEV *Offset; /// OperandValToReplace - The Value of the operand in the user instruction /// that this IVStrideUse is representing. @@ -158,7 +158,7 @@ public: /// initial value and the operand that uses the IV. ilist Users; - void addUser(const SCEV* Offset, Instruction *User, Value *Operand) { + void addUser(const SCEV *Offset, Instruction *User, Value *Operand) { Users.push_back(new IVStrideUse(this, Offset, User, Operand)); } }; @@ -178,12 +178,12 @@ public: /// IVUsesByStride - A mapping from the strides in StrideOrder to the /// uses in IVUses. - std::map IVUsesByStride; + std::map IVUsesByStride; /// StrideOrder - An ordering of the keys in IVUsesByStride that is stable: /// We use this to iterate over the IVUsesByStride collection without being /// dependent on random ordering of pointers in the process. - SmallVector StrideOrder; + SmallVector StrideOrder; private: virtual void getAnalysisUsage(AnalysisUsage &AU) const; @@ -203,7 +203,7 @@ public: /// getReplacementExpr - Return a SCEV expression which computes the /// value of the OperandValToReplace of the given IVStrideUse. - const SCEV* getReplacementExpr(const IVStrideUse &U) const; + const SCEV *getReplacementExpr(const IVStrideUse &U) const; void print(raw_ostream &OS, const Module* = 0) const; virtual void print(std::ostream &OS, const Module* = 0) const; diff --git a/include/llvm/Analysis/LoopVR.h b/include/llvm/Analysis/LoopVR.h index 36b62152f86..be13a80d02a 100644 --- a/include/llvm/Analysis/LoopVR.h +++ b/include/llvm/Analysis/LoopVR.h @@ -78,9 +78,9 @@ public: private: ConstantRange compute(Value *V); - ConstantRange getRange(const SCEV* S, Loop *L, ScalarEvolution &SE); + ConstantRange getRange(const SCEV *S, Loop *L, ScalarEvolution &SE); - ConstantRange getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE); + ConstantRange getRange(const SCEV *S, const SCEV *T, ScalarEvolution &SE); std::map Map; }; diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 36f881140da..b60a0854c8e 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -89,9 +89,9 @@ namespace llvm { /// the same value, but which uses the concrete value Conc instead of the /// symbolic value. If this SCEV does not use the symbolic value, it /// returns itself. - virtual const SCEV* - replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + virtual const SCEV * + replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const = 0; /// dominates - Return true if elements that makes up this SCEV dominates @@ -134,9 +134,9 @@ namespace llvm { virtual const Type *getType() const; virtual bool hasComputableLoopEvolution(const Loop *L) const; virtual void print(raw_ostream &OS) const; - virtual const SCEV* - replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + virtual const SCEV * + replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const; virtual bool dominates(BasicBlock *BB, DominatorTree *DT) const { @@ -184,7 +184,7 @@ namespace llvm { /// Scalars - This is a cache of the scalars we have analyzed so far. /// - std::map Scalars; + std::map Scalars; /// BackedgeTakenInfo - Information about the backedge-taken count /// of a loop. This currently inclues an exact count and a maximum count. @@ -192,16 +192,16 @@ namespace llvm { struct BackedgeTakenInfo { /// Exact - An expression indicating the exact backedge-taken count of /// the loop if it is known, or a SCEVCouldNotCompute otherwise. - const SCEV* Exact; + const SCEV *Exact; /// Exact - An expression indicating the least maximum backedge-taken /// count of the loop that is known, or a SCEVCouldNotCompute. - const SCEV* Max; + const SCEV *Max; - /*implicit*/ BackedgeTakenInfo(const SCEV* exact) : + /*implicit*/ BackedgeTakenInfo(const SCEV *exact) : Exact(exact), Max(exact) {} - BackedgeTakenInfo(const SCEV* exact, const SCEV* max) : + BackedgeTakenInfo(const SCEV *exact, const SCEV *max) : Exact(exact), Max(max) {} /// hasAnyInfo - Test whether this BackedgeTakenInfo contains any @@ -231,30 +231,30 @@ namespace llvm { /// createSCEV - We know that there is no SCEV for the specified value. /// Analyze the expression. - const SCEV* createSCEV(Value *V); + const SCEV *createSCEV(Value *V); /// createNodeForPHI - Provide the special handling we need to analyze PHI /// SCEVs. - const SCEV* createNodeForPHI(PHINode *PN); + const SCEV *createNodeForPHI(PHINode *PN); /// createNodeForGEP - Provide the special handling we need to analyze GEP /// SCEVs. - const SCEV* createNodeForGEP(User *GEP); + const SCEV *createNodeForGEP(User *GEP); /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value /// for the specified instruction and replaces any references to the /// symbolic value SymName with the specified value. This is used during /// PHI resolution. void ReplaceSymbolicValueWithConcrete(Instruction *I, - const SCEV* SymName, - const SCEV* NewVal); + const SCEV *SymName, + const SCEV *NewVal); /// getBECount - Subtract the end and start values and divide by the step, /// rounding up, to get the number of times the backedge is executed. Return /// CouldNotCompute if an intermediate computation overflows. - const SCEV* getBECount(const SCEV* Start, - const SCEV* End, - const SCEV* Step); + const SCEV *getBECount(const SCEV *Start, + const SCEV *End, + const SCEV *Step); /// getBackedgeTakenInfo - Return the BackedgeTakenInfo for the given /// loop, lazily computing new values if the loop hasn't been analyzed @@ -292,7 +292,7 @@ namespace llvm { /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition /// of 'icmp op load X, cst', try to see if we can compute the trip count. - const SCEV* + const SCEV * ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS, const Loop *L, @@ -303,19 +303,19 @@ namespace llvm { /// try to evaluate a few iterations of the loop until we get the exit /// condition gets a value of ExitWhen (true or false). If we cannot /// evaluate the trip count of the loop, return CouldNotCompute. - const SCEV* ComputeBackedgeTakenCountExhaustively(const Loop *L, + const SCEV *ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen); /// HowFarToZero - Return the number of times a backedge comparing the /// specified value to zero will execute. If not computable, return /// CouldNotCompute. - const SCEV* HowFarToZero(const SCEV *V, const Loop *L); + const SCEV *HowFarToZero(const SCEV *V, const Loop *L); /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute. - const SCEV* HowFarToNonZero(const SCEV *V, const Loop *L); + const SCEV *HowFarToNonZero(const SCEV *V, const Loop *L); /// HowManyLessThans - Return the number of times a backedge containing the /// specified less-than comparison will execute. If not computable, return @@ -375,115 +375,115 @@ namespace llvm { /// getSCEV - Return a SCEV expression handle for the full generality of the /// specified expression. - const SCEV* getSCEV(Value *V); + const SCEV *getSCEV(Value *V); - const SCEV* getConstant(ConstantInt *V); - const SCEV* getConstant(const APInt& Val); - const SCEV* getConstant(const Type *Ty, uint64_t V, bool isSigned = false); - const SCEV* getTruncateExpr(const SCEV* Op, const Type *Ty); - const SCEV* getZeroExtendExpr(const SCEV* Op, const Type *Ty); - const SCEV* getSignExtendExpr(const SCEV* Op, const Type *Ty); - const SCEV* getAnyExtendExpr(const SCEV* Op, const Type *Ty); - const SCEV* getAddExpr(SmallVectorImpl &Ops); - const SCEV* getAddExpr(const SCEV* LHS, const SCEV* RHS) { - SmallVector Ops; + const SCEV *getConstant(ConstantInt *V); + const SCEV *getConstant(const APInt& Val); + const SCEV *getConstant(const Type *Ty, uint64_t V, bool isSigned = false); + const SCEV *getTruncateExpr(const SCEV *Op, const Type *Ty); + const SCEV *getZeroExtendExpr(const SCEV *Op, const Type *Ty); + const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty); + const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty); + const SCEV *getAddExpr(SmallVectorImpl &Ops); + const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getAddExpr(Ops); } - const SCEV* getAddExpr(const SCEV* Op0, const SCEV* Op1, - const SCEV* Op2) { - SmallVector Ops; + const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, + const SCEV *Op2) { + SmallVector Ops; Ops.push_back(Op0); Ops.push_back(Op1); Ops.push_back(Op2); return getAddExpr(Ops); } - const SCEV* getMulExpr(SmallVectorImpl &Ops); - const SCEV* getMulExpr(const SCEV* LHS, const SCEV* RHS) { - SmallVector Ops; + const SCEV *getMulExpr(SmallVectorImpl &Ops); + const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS) { + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getMulExpr(Ops); } - const SCEV* getUDivExpr(const SCEV* LHS, const SCEV* RHS); - const SCEV* getAddRecExpr(const SCEV* Start, const SCEV* Step, + const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS); + const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L); - const SCEV* getAddRecExpr(SmallVectorImpl &Operands, + const SCEV *getAddRecExpr(SmallVectorImpl &Operands, const Loop *L); - const SCEV* getAddRecExpr(const SmallVectorImpl &Operands, + const SCEV *getAddRecExpr(const SmallVectorImpl &Operands, const Loop *L) { - SmallVector NewOp(Operands.begin(), Operands.end()); + SmallVector NewOp(Operands.begin(), Operands.end()); return getAddRecExpr(NewOp, L); } - const SCEV* getSMaxExpr(const SCEV* LHS, const SCEV* RHS); - const SCEV* getSMaxExpr(SmallVectorImpl &Operands); - const SCEV* getUMaxExpr(const SCEV* LHS, const SCEV* RHS); - const SCEV* getUMaxExpr(SmallVectorImpl &Operands); - const SCEV* getSMinExpr(const SCEV* LHS, const SCEV* RHS); - const SCEV* getUMinExpr(const SCEV* LHS, const SCEV* RHS); - const SCEV* getUnknown(Value *V); - const SCEV* getCouldNotCompute(); + const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS); + const SCEV *getSMaxExpr(SmallVectorImpl &Operands); + const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS); + const SCEV *getUMaxExpr(SmallVectorImpl &Operands); + const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS); + const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS); + const SCEV *getUnknown(Value *V); + const SCEV *getCouldNotCompute(); /// getNegativeSCEV - Return the SCEV object corresponding to -V. /// - const SCEV* getNegativeSCEV(const SCEV* V); + const SCEV *getNegativeSCEV(const SCEV *V); /// getNotSCEV - Return the SCEV object corresponding to ~V. /// - const SCEV* getNotSCEV(const SCEV* V); + const SCEV *getNotSCEV(const SCEV *V); /// getMinusSCEV - Return LHS-RHS. /// - const SCEV* getMinusSCEV(const SCEV* LHS, - const SCEV* RHS); + const SCEV *getMinusSCEV(const SCEV *LHS, + const SCEV *RHS); /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion /// of the input value to the specified type. If the type must be /// extended, it is zero extended. - const SCEV* getTruncateOrZeroExtend(const SCEV* V, const Type *Ty); + const SCEV *getTruncateOrZeroExtend(const SCEV *V, const Type *Ty); /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion /// of the input value to the specified type. If the type must be /// extended, it is sign extended. - const SCEV* getTruncateOrSignExtend(const SCEV* V, const Type *Ty); + const SCEV *getTruncateOrSignExtend(const SCEV *V, const Type *Ty); /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is zero extended. The conversion must not be narrowing. - const SCEV* getNoopOrZeroExtend(const SCEV* V, const Type *Ty); + const SCEV *getNoopOrZeroExtend(const SCEV *V, const Type *Ty); /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is sign extended. The conversion must not be narrowing. - const SCEV* getNoopOrSignExtend(const SCEV* V, const Type *Ty); + const SCEV *getNoopOrSignExtend(const SCEV *V, const Type *Ty); /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is extended with unspecified bits. The conversion must not be /// narrowing. - const SCEV* getNoopOrAnyExtend(const SCEV* V, const Type *Ty); + const SCEV *getNoopOrAnyExtend(const SCEV *V, const Type *Ty); /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the /// input value to the specified type. The conversion must not be /// widening. - const SCEV* getTruncateOrNoop(const SCEV* V, const Type *Ty); + const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty); /// getIntegerSCEV - Given a SCEVable type, create a constant for the /// specified signed integer value and return a SCEV for the constant. - const SCEV* getIntegerSCEV(int Val, const Type *Ty); + const SCEV *getIntegerSCEV(int Val, const Type *Ty); /// getUMaxFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umax operation /// with them. - const SCEV* getUMaxFromMismatchedTypes(const SCEV* LHS, - const SCEV* RHS); + const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, + const SCEV *RHS); /// getUMinFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umin operation /// with them. - const SCEV* getUMinFromMismatchedTypes(const SCEV* LHS, - const SCEV* RHS); + const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, + const SCEV *RHS); /// hasSCEV - Return true if the SCEV for this value has already been /// computed. @@ -491,7 +491,7 @@ namespace llvm { /// setSCEV - Insert the specified SCEV into the map of current SCEVs for /// the specified value. - void setSCEV(Value *V, const SCEV* H); + void setSCEV(Value *V, const SCEV *H); /// getSCEVAtScope - Return a SCEV expression handle for the specified value /// at the specified scope in the program. The L value specifies a loop @@ -503,11 +503,11 @@ namespace llvm { /// /// In the case that a relevant loop exit value cannot be computed, the /// original value V is returned. - const SCEV* getSCEVAtScope(const SCEV *S, const Loop *L); + const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L); /// getSCEVAtScope - This is a convenience function which does /// getSCEVAtScope(getSCEV(V), L). - const SCEV* getSCEVAtScope(Value *V, const Loop *L); + const SCEV *getSCEVAtScope(Value *V, const Loop *L); /// isLoopGuardedByCond - Test whether entry to the loop is protected by /// a conditional between LHS and RHS. This is used to help avoid max @@ -526,12 +526,12 @@ namespace llvm { /// loop-invariant backedge-taken count (see /// hasLoopInvariantBackedgeTakenCount). /// - const SCEV* getBackedgeTakenCount(const Loop *L); + const SCEV *getBackedgeTakenCount(const Loop *L); /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except /// return the least SCEV value that is known never to be less than the /// actual backedge taken count. - const SCEV* getMaxBackedgeTakenCount(const Loop *L); + const SCEV *getMaxBackedgeTakenCount(const Loop *L); /// hasLoopInvariantBackedgeTakenCount - Return true if the specified loop /// has an analyzable loop-invariant backedge-taken count. @@ -548,15 +548,15 @@ namespace llvm { /// time, the minimum number of times S is divisible by 2. For example, /// given {4,+,8} it returns 2. If S is guaranteed to be 0, it returns the /// bitwidth of S. - uint32_t GetMinTrailingZeros(const SCEV* S); + uint32_t GetMinTrailingZeros(const SCEV *S); /// GetMinLeadingZeros - Determine the minimum number of zero bits that S is /// guaranteed to begin with (at every loop iteration). - uint32_t GetMinLeadingZeros(const SCEV* S); + uint32_t GetMinLeadingZeros(const SCEV *S); /// GetMinSignBits - Determine the minimum number of sign bits that S is /// guaranteed to begin with. - uint32_t GetMinSignBits(const SCEV* S); + uint32_t GetMinSignBits(const SCEV *S); virtual bool runOnFunction(Function &F); virtual void releaseMemory(); diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h index 816d49764d1..2c8bcd742b3 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpander.h +++ b/include/llvm/Analysis/ScalarEvolutionExpander.h @@ -53,7 +53,7 @@ namespace llvm { /// expandCodeFor - Insert code to directly compute the specified SCEV /// expression into the program. The inserted code is inserted into the /// specified block. - Value *expandCodeFor(const SCEV* SH, const Type *Ty, Instruction *IP) { + Value *expandCodeFor(const SCEV *SH, const Type *Ty, Instruction *IP) { Builder.SetInsertPoint(IP->getParent(), IP); return expandCodeFor(SH, Ty); } @@ -72,8 +72,8 @@ namespace llvm { /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP /// instead of using ptrtoint+arithmetic+inttoptr. - Value *expandAddToGEP(const SCEV* const *op_begin, - const SCEV* const *op_end, + Value *expandAddToGEP(const SCEV *const *op_begin, + const SCEV *const *op_end, const PointerType *PTy, const Type *Ty, Value *V); Value *expand(const SCEV *S); @@ -82,7 +82,7 @@ namespace llvm { /// expression into the program. The inserted code is inserted into the /// SCEVExpander's current insertion point. If a type is specified, the /// result will be expanded to have that type, with a cast if necessary. - Value *expandCodeFor(const SCEV* SH, const Type *Ty = 0); + Value *expandCodeFor(const SCEV *SH, const Type *Ty = 0); /// isInsertedInstruction - Return true if the specified instruction was /// inserted by the code rewriter. If so, the client should not modify the diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index c54c86556c3..d56404066b4 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -53,8 +53,8 @@ namespace llvm { virtual const Type *getType() const; - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { return this; } @@ -77,15 +77,15 @@ namespace llvm { /// class SCEVCastExpr : public SCEV { protected: - const SCEV* Op; + const SCEV *Op; const Type *Ty; - SCEVCastExpr(unsigned SCEVTy, const SCEV* op, const Type *ty); + SCEVCastExpr(unsigned SCEVTy, const SCEV *op, const Type *ty); public: virtual void Profile(FoldingSetNodeID &ID) const; - const SCEV* getOperand() const { return Op; } + const SCEV *getOperand() const { return Op; } virtual const Type *getType() const { return Ty; } virtual bool isLoopInvariant(const Loop *L) const { @@ -114,13 +114,13 @@ namespace llvm { class SCEVTruncateExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVTruncateExpr(const SCEV* op, const Type *ty); + SCEVTruncateExpr(const SCEV *op, const Type *ty); public: - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { - const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV *H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getTruncateExpr(H, Ty); @@ -142,13 +142,13 @@ namespace llvm { class SCEVZeroExtendExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVZeroExtendExpr(const SCEV* op, const Type *ty); + SCEVZeroExtendExpr(const SCEV *op, const Type *ty); public: - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { - const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV *H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getZeroExtendExpr(H, Ty); @@ -170,13 +170,13 @@ namespace llvm { class SCEVSignExtendExpr : public SCEVCastExpr { friend class ScalarEvolution; - SCEVSignExtendExpr(const SCEV* op, const Type *ty); + SCEVSignExtendExpr(const SCEV *op, const Type *ty); public: - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { - const SCEV* H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV *H = Op->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H == Op) return this; return SE.getSignExtendExpr(H, Ty); @@ -198,22 +198,24 @@ namespace llvm { /// class SCEVNAryExpr : public SCEV { protected: - SmallVector Operands; + SmallVector Operands; - SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl &ops) + SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl &ops) : SCEV(T), Operands(ops.begin(), ops.end()) {} public: virtual void Profile(FoldingSetNodeID &ID) const; unsigned getNumOperands() const { return (unsigned)Operands.size(); } - const SCEV* getOperand(unsigned i) const { + const SCEV *getOperand(unsigned i) const { assert(i < Operands.size() && "Operand index out of range!"); return Operands[i]; } - const SmallVectorImpl &getOperands() const { return Operands; } - typedef SmallVectorImpl::const_iterator op_iterator; + const SmallVectorImpl &getOperands() const { + return Operands; + } + typedef SmallVectorImpl::const_iterator op_iterator; op_iterator op_begin() const { return Operands.begin(); } op_iterator op_end() const { return Operands.end(); } @@ -260,12 +262,12 @@ namespace llvm { class SCEVCommutativeExpr : public SCEVNAryExpr { protected: SCEVCommutativeExpr(enum SCEVTypes T, - const SmallVectorImpl &ops) + const SmallVectorImpl &ops) : SCEVNAryExpr(T, ops) {} public: - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const; virtual const char *getOperationStr() const = 0; @@ -289,7 +291,7 @@ namespace llvm { class SCEVAddExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVAddExpr(const SmallVectorImpl &ops) + explicit SCEVAddExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scAddExpr, ops) { } @@ -309,7 +311,7 @@ namespace llvm { class SCEVMulExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVMulExpr(const SmallVectorImpl &ops) + explicit SCEVMulExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scMulExpr, ops) { } @@ -330,16 +332,16 @@ namespace llvm { class SCEVUDivExpr : public SCEV { friend class ScalarEvolution; - const SCEV* LHS; - const SCEV* RHS; - SCEVUDivExpr(const SCEV* lhs, const SCEV* rhs) + const SCEV *LHS; + const SCEV *RHS; + SCEVUDivExpr(const SCEV *lhs, const SCEV *rhs) : SCEV(scUDivExpr), LHS(lhs), RHS(rhs) {} public: virtual void Profile(FoldingSetNodeID &ID) const; - const SCEV* getLHS() const { return LHS; } - const SCEV* getRHS() const { return RHS; } + const SCEV *getLHS() const { return LHS; } + const SCEV *getRHS() const { return RHS; } virtual bool isLoopInvariant(const Loop *L) const { return LHS->isLoopInvariant(L) && RHS->isLoopInvariant(L); @@ -350,11 +352,11 @@ namespace llvm { RHS->hasComputableLoopEvolution(L); } - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { - const SCEV* L = LHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); - const SCEV* R = RHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV *L = LHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); + const SCEV *R = RHS->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (L == LHS && R == RHS) return this; else @@ -389,7 +391,7 @@ namespace llvm { const Loop *L; - SCEVAddRecExpr(const SmallVectorImpl &ops, const Loop *l) + SCEVAddRecExpr(const SmallVectorImpl &ops, const Loop *l) : SCEVNAryExpr(scAddRecExpr, ops), L(l) { for (size_t i = 0, e = Operands.size(); i != e; ++i) assert(Operands[i]->isLoopInvariant(l) && @@ -399,15 +401,16 @@ namespace llvm { public: virtual void Profile(FoldingSetNodeID &ID) const; - const SCEV* getStart() const { return Operands[0]; } + const SCEV *getStart() const { return Operands[0]; } const Loop *getLoop() const { return L; } /// getStepRecurrence - This method constructs and returns the recurrence /// indicating how much this expression steps by. If this is a polynomial /// of degree N, it returns a chrec of degree N-1. - const SCEV* getStepRecurrence(ScalarEvolution &SE) const { + const SCEV *getStepRecurrence(ScalarEvolution &SE) const { if (isAffine()) return getOperand(1); - return SE.getAddRecExpr(SmallVector(op_begin()+1,op_end()), + return SE.getAddRecExpr(SmallVector(op_begin()+1, + op_end()), getLoop()); } @@ -435,7 +438,7 @@ namespace llvm { /// evaluateAtIteration - Return the value of this chain of recurrences at /// the specified iteration number. - const SCEV* evaluateAtIteration(const SCEV* It, ScalarEvolution &SE) const; + const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const; /// getNumIterationsInRange - Return the number of iterations of this loop /// that produce values in the specified constant range. Another way of @@ -443,11 +446,11 @@ namespace llvm { /// value is not in the condition, thus computing the exit count. If the /// iteration count can't be computed, an instance of SCEVCouldNotCompute is /// returned. - const SCEV* getNumIterationsInRange(ConstantRange Range, + const SCEV *getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const; - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const; virtual void print(raw_ostream &OS) const; @@ -466,7 +469,7 @@ namespace llvm { class SCEVSMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVSMaxExpr(const SmallVectorImpl &ops) + explicit SCEVSMaxExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scSMaxExpr, ops) { } @@ -487,7 +490,7 @@ namespace llvm { class SCEVUMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVUMaxExpr(const SmallVectorImpl &ops) + explicit SCEVUMaxExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scUMaxExpr, ops) { } @@ -524,8 +527,8 @@ namespace llvm { return false; // not computable } - const SCEV* replaceSymbolicValuesWithConcrete(const SCEV* Sym, - const SCEV* Conc, + const SCEV *replaceSymbolicValuesWithConcrete(const SCEV *Sym, + const SCEV *Conc, ScalarEvolution &SE) const { if (&*Sym == this) return Conc; return this; diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index caeb14bef37..317c8691640 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -39,7 +39,7 @@ Pass *llvm::createIVUsersPass() { /// containsAddRecFromDifferentLoop - Determine whether expression S involves a /// subexpression that is an AddRec from a loop other than L. An outer loop /// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { +static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { // This is very common, put it first. if (isa(S)) return false; @@ -80,10 +80,10 @@ static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { /// a mix of loop invariant and loop variant expressions. The start cannot, /// however, contain an AddRec from a different loop, unless that loop is an /// outer loop of the current loop. -static bool getSCEVStartAndStride(const SCEV* &SH, Loop *L, Loop *UseLoop, - const SCEV* &Start, const SCEV* &Stride, +static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop, + const SCEV *&Start, const SCEV *&Stride, ScalarEvolution *SE, DominatorTree *DT) { - const SCEV* TheAddRec = Start; // Initialize to zero. + const SCEV *TheAddRec = Start; // Initialize to zero. // If the outer level is an AddExpr, the operands are all start values except // for a nested AddRecExpr. @@ -109,9 +109,9 @@ static bool getSCEVStartAndStride(const SCEV* &SH, Loop *L, Loop *UseLoop, // Use getSCEVAtScope to attempt to simplify other loops out of // the picture. - const SCEV* AddRecStart = AddRec->getStart(); + const SCEV *AddRecStart = AddRec->getStart(); AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop); - const SCEV* AddRecStride = AddRec->getStepRecurrence(*SE); + const SCEV *AddRecStride = AddRec->getStepRecurrence(*SE); // FIXME: If Start contains an SCEVAddRecExpr from a different loop, other // than an outer loop of the current loop, reject it. LSR has no concept of @@ -196,13 +196,13 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { return true; // Instruction already handled. // Get the symbolic expression for this instruction. - const SCEV* ISE = SE->getSCEV(I); + const SCEV *ISE = SE->getSCEV(I); if (isa(ISE)) return false; // Get the start and stride for this expression. Loop *UseLoop = LI->getLoopFor(I->getParent()); - const SCEV* Start = SE->getIntegerSCEV(0, ISE->getType()); - const SCEV* Stride = Start; + const SCEV *Start = SE->getIntegerSCEV(0, ISE->getType()); + const SCEV *Stride = Start; if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, SE, DT)) return false; // Non-reducible symbolic expression, bail out. @@ -254,7 +254,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { if (IVUseShouldUsePostIncValue(User, I, L, LI, DT, this)) { // The value used will be incremented by the stride more than we are // expecting, so subtract this off. - const SCEV* NewStart = SE->getMinusSCEV(Start, Stride); + const SCEV *NewStart = SE->getMinusSCEV(Start, Stride); StrideUses->addUser(NewStart, User, I); StrideUses->Users.back().setIsUseOfPostIncrementedValue(true); DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n"; @@ -295,9 +295,9 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { /// getReplacementExpr - Return a SCEV expression which computes the /// value of the OperandValToReplace of the given IVStrideUse. -const SCEV* IVUsers::getReplacementExpr(const IVStrideUse &U) const { +const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const { // Start with zero. - const SCEV* RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); + const SCEV *RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType()); // Create the basic add recurrence. RetVal = SE->getAddRecExpr(RetVal, U.getParent()->Stride, L); // Add the offset in a separate step, because it may be loop-variant. @@ -308,7 +308,7 @@ const SCEV* IVUsers::getReplacementExpr(const IVStrideUse &U) const { RetVal = SE->getAddExpr(RetVal, U.getParent()->Stride); // Evaluate the expression out of the loop, if possible. if (!L->contains(U.getUser()->getParent())) { - const SCEV* ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); + const SCEV *ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); if (ExitVal->isLoopInvariant(L)) RetVal = ExitVal; } @@ -325,7 +325,7 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { OS << ":\n"; for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) { - std::map::const_iterator SI = + std::map::const_iterator SI = IVUsesByStride.find(StrideOrder[Stride]); assert(SI != IVUsesByStride.end() && "Stride doesn't exist!"); OS << " Stride " << *SI->first->getType() << " " << *SI->first << ":\n"; diff --git a/lib/Analysis/LoopVR.cpp b/lib/Analysis/LoopVR.cpp index 3800ef53580..1c78ef9a52d 100644 --- a/lib/Analysis/LoopVR.cpp +++ b/lib/Analysis/LoopVR.cpp @@ -27,8 +27,8 @@ char LoopVR::ID = 0; static RegisterPass X("loopvr", "Loop Value Ranges", false, true); /// getRange - determine the range for a particular SCEV within a given Loop -ConstantRange LoopVR::getRange(const SCEV* S, Loop *L, ScalarEvolution &SE) { - const SCEV* T = SE.getBackedgeTakenCount(L); +ConstantRange LoopVR::getRange(const SCEV *S, Loop *L, ScalarEvolution &SE) { + const SCEV *T = SE.getBackedgeTakenCount(L); if (isa(T)) return ConstantRange(cast(S->getType())->getBitWidth(), true); @@ -37,7 +37,7 @@ ConstantRange LoopVR::getRange(const SCEV* S, Loop *L, ScalarEvolution &SE) { } /// getRange - determine the range for a particular SCEV with a given trip count -ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE){ +ConstantRange LoopVR::getRange(const SCEV *S, const SCEV *T, ScalarEvolution &SE){ if (const SCEVConstant *C = dyn_cast(S)) return ConstantRange(C->getValue()->getValue()); @@ -183,8 +183,8 @@ ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE if (!Trip) return FullSet; if (AddRec->isAffine()) { - const SCEV* StartHandle = AddRec->getStart(); - const SCEV* StepHandle = AddRec->getOperand(1); + const SCEV *StartHandle = AddRec->getStart(); + const SCEV *StepHandle = AddRec->getOperand(1); const SCEVConstant *Step = dyn_cast(StepHandle); if (!Step) return FullSet; @@ -195,7 +195,7 @@ ConstantRange LoopVR::getRange(const SCEV* S, const SCEV* T, ScalarEvolution &SE if ((TripExt * StepExt).ugt(APInt::getLowBitsSet(ExWidth, ExWidth >> 1))) return FullSet; - const SCEV* EndHandle = SE.getAddExpr(StartHandle, + const SCEV *EndHandle = SE.getAddExpr(StartHandle, SE.getMulExpr(T, StepHandle)); const SCEVConstant *Start = dyn_cast(StartHandle); const SCEVConstant *End = dyn_cast(EndHandle); @@ -255,7 +255,7 @@ ConstantRange LoopVR::compute(Value *V) { ScalarEvolution &SE = getAnalysis(); - const SCEV* S = SE.getSCEV(I); + const SCEV *S = SE.getSCEV(I); if (isa(S) || isa(S)) return ConstantRange(cast(V->getType())->getBitWidth(), false); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 577315879bf..a411f2dab06 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -14,7 +14,7 @@ // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we -// can handle. These classes are reference counted, managed by the const SCEV* +// can handle. These classes are reference counted, managed by the const SCEV * // class. We only create one SCEV of a particular shape, so pointer-comparisons // for equality are legal. // @@ -180,7 +180,7 @@ bool SCEVCouldNotCompute::classof(const SCEV *S) { return S->getSCEVType() == scCouldNotCompute; } -const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { +const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { FoldingSetNodeID ID; ID.AddInteger(scConstant); ID.AddPointer(V); @@ -192,11 +192,11 @@ const SCEV* ScalarEvolution::getConstant(ConstantInt *V) { return S; } -const SCEV* ScalarEvolution::getConstant(const APInt& Val) { +const SCEV *ScalarEvolution::getConstant(const APInt& Val) { return getConstant(ConstantInt::get(Val)); } -const SCEV* +const SCEV * ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { return getConstant(ConstantInt::get(cast(Ty), V, isSigned)); } @@ -213,7 +213,7 @@ void SCEVConstant::print(raw_ostream &OS) const { } SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy, - const SCEV* op, const Type *ty) + const SCEV *op, const Type *ty) : SCEV(SCEVTy), Op(op), Ty(ty) {} void SCEVCastExpr::Profile(FoldingSetNodeID &ID) const { @@ -226,7 +226,7 @@ bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { return Op->dominates(BB, DT); } -SCEVTruncateExpr::SCEVTruncateExpr(const SCEV* op, const Type *ty) +SCEVTruncateExpr::SCEVTruncateExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scTruncate, op, ty) { assert((Op->getType()->isInteger() || isa(Op->getType())) && (Ty->isInteger() || isa(Ty)) && @@ -237,7 +237,7 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const { OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV* op, const Type *ty) +SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scZeroExtend, op, ty) { assert((Op->getType()->isInteger() || isa(Op->getType())) && (Ty->isInteger() || isa(Ty)) && @@ -248,7 +248,7 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const { OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; } -SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV* op, const Type *ty) +SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV *op, const Type *ty) : SCEVCastExpr(scSignExtend, op, ty) { assert((Op->getType()->isInteger() || isa(Op->getType())) && (Ty->isInteger() || isa(Ty)) && @@ -274,10 +274,10 @@ SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete( const SCEV *Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - const SCEV* H = + const SCEV *H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -352,10 +352,10 @@ SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym, const SCEV *Conc, ScalarEvolution &SE) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - const SCEV* H = + const SCEV *H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -558,7 +558,7 @@ namespace { /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// -static void GroupByComplexity(SmallVectorImpl &Ops, +static void GroupByComplexity(SmallVectorImpl &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { @@ -601,7 +601,7 @@ static void GroupByComplexity(SmallVectorImpl &Ops, /// BinomialCoefficient - Compute BC(It, K). The result has width W. /// Assume, K > 0. -static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, +static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, const Type* ResultTy) { // Handle the simplest case efficiently. @@ -694,15 +694,15 @@ static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, // Calculate the product, at width T+W const IntegerType *CalculationTy = IntegerType::get(CalculationBits); - const SCEV* Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); + const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { - const SCEV* S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); + const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T - const SCEV* DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); + const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. @@ -719,14 +719,14 @@ static const SCEV* BinomialCoefficient(const SCEV* It, unsigned K, /// /// where BC(It, k) stands for binomial coefficient. /// -const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, +const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const { - const SCEV* Result = getStart(); + const SCEV *Result = getStart(); for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. - const SCEV* Coeff = BinomialCoefficient(It, i, SE, getType()); + const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); if (isa(Coeff)) return Coeff; @@ -739,7 +739,7 @@ const SCEV* SCEVAddRecExpr::evaluateAtIteration(const SCEV* It, // SCEV Expression folder implementations //===----------------------------------------------------------------------===// -const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); @@ -766,7 +766,7 @@ const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast(Op)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop()); @@ -784,7 +784,7 @@ const SCEV* ScalarEvolution::getTruncateExpr(const SCEV* Op, return S; } -const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -818,28 +818,28 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - const SCEV* Start = AR->getStart(); - const SCEV* Step = AR->getStepRecurrence(*this); + const SCEV *Start = AR->getStart(); + const SCEV *Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - const SCEV* CastedMaxBECount = + const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - const SCEV* RecastedMaxBECount = + const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. - const SCEV* ZMul = + const SCEV *ZMul = getMulExpr(CastedMaxBECount, getTruncateOrZeroExtend(Step, Start->getType())); - const SCEV* Add = getAddExpr(Start, ZMul); - const SCEV* OperandExtendedAdd = + const SCEV *Add = getAddExpr(Start, ZMul); + const SCEV *OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getZeroExtendExpr(Step, WideTy))); @@ -851,7 +851,7 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, // Similar to above, only this time treat the step value as signed. // This covers loops that count down. - const SCEV* SMul = + const SCEV *SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); Add = getAddExpr(Start, SMul); @@ -880,7 +880,7 @@ const SCEV* ScalarEvolution::getZeroExtendExpr(const SCEV* Op, return S; } -const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -914,28 +914,28 @@ const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. - const SCEV* MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); + const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop()); if (!isa(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. - const SCEV* Start = AR->getStart(); - const SCEV* Step = AR->getStepRecurrence(*this); + const SCEV *Start = AR->getStart(); + const SCEV *Step = AR->getStepRecurrence(*this); // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. - const SCEV* CastedMaxBECount = + const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); - const SCEV* RecastedMaxBECount = + const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { const Type *WideTy = IntegerType::get(getTypeSizeInBits(Start->getType()) * 2); // Check whether Start+Step*MaxBECount has no signed overflow. - const SCEV* SMul = + const SCEV *SMul = getMulExpr(CastedMaxBECount, getTruncateOrSignExtend(Step, Start->getType())); - const SCEV* Add = getAddExpr(Start, SMul); - const SCEV* OperandExtendedAdd = + const SCEV *Add = getAddExpr(Start, SMul); + const SCEV *OperandExtendedAdd = getAddExpr(getSignExtendExpr(Start, WideTy), getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), getSignExtendExpr(Step, WideTy))); @@ -963,7 +963,7 @@ const SCEV* ScalarEvolution::getSignExtendExpr(const SCEV* Op, /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. /// -const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, +const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, const Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); @@ -978,19 +978,19 @@ const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast(Op)) { - const SCEV* NewOp = T->getOperand(); + const SCEV *NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. - const SCEV* ZExt = getZeroExtendExpr(Op, Ty); + const SCEV *ZExt = getZeroExtendExpr(Op, Ty); if (!isa(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. - const SCEV* SExt = getSignExtendExpr(Op, Ty); + const SCEV *SExt = getSignExtendExpr(Op, Ty); if (!isa(SExt)) return SExt; @@ -1028,10 +1028,10 @@ const SCEV* ScalarEvolution::getAnyExtendExpr(const SCEV* Op, /// is also used as a check to avoid infinite recursion. /// static bool -CollectAddOperandsWithScales(DenseMap &M, - SmallVector &NewOps, +CollectAddOperandsWithScales(DenseMap &M, + SmallVector &NewOps, APInt &AccumulatedConstant, - const SmallVectorImpl &Ops, + const SmallVectorImpl &Ops, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; @@ -1052,9 +1052,9 @@ CollectAddOperandsWithScales(DenseMap &M, } else { // A multiplication of a constant with some other value. Update // the map. - SmallVector MulOps(Mul->op_begin()+1, Mul->op_end()); - const SCEV* Key = SE.getMulExpr(MulOps); - std::pair::iterator, bool> Pair = + SmallVector MulOps(Mul->op_begin()+1, Mul->op_end()); + const SCEV *Key = SE.getMulExpr(MulOps); + std::pair::iterator, bool> Pair = M.insert(std::make_pair(Key, NewScale)); if (Pair.second) { NewOps.push_back(Pair.first->first); @@ -1072,7 +1072,7 @@ CollectAddOperandsWithScales(DenseMap &M, AccumulatedConstant += Scale * C->getValue()->getValue(); } else { // An ordinary operand. Update the map. - std::pair::iterator, bool> Pair = + std::pair::iterator, bool> Pair = M.insert(std::make_pair(Ops[i], Scale)); if (Pair.second) { NewOps.push_back(Pair.first->first); @@ -1098,7 +1098,7 @@ namespace { /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { +const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1142,8 +1142,8 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Found a match, merge the two values into a multiply, and add any // remaining values to the result. - const SCEV* Two = getIntegerSCEV(2, Ty); - const SCEV* Mul = getMulExpr(Ops[i], Two); + const SCEV *Two = getIntegerSCEV(2, Ty); + const SCEV *Mul = getMulExpr(Ops[i], Two); if (Ops.size() == 2) return Mul; Ops.erase(Ops.begin()+i, Ops.begin()+i+2); @@ -1159,7 +1159,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { const SCEVTruncateExpr *Trunc = cast(Ops[Idx]); const Type *DstType = Trunc->getType(); const Type *SrcType = Trunc->getOperand()->getType(); - SmallVector LargeOps; + SmallVector LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. @@ -1175,7 +1175,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // is much more likely to be foldable here. LargeOps.push_back(getSignExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { - SmallVector LargeMulOps; + SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast(M->getOperand(j))) { @@ -1203,7 +1203,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { } if (Ok) { // Evaluate the expression in the larger type. - const SCEV* Fold = getAddExpr(LargeOps); + const SCEV *Fold = getAddExpr(LargeOps); // If it folds to something simple, use it. Otherwise, don't. if (isa(Fold) || isa(Fold)) return getTruncateExpr(Fold, DstType); @@ -1240,16 +1240,16 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // operands multiplied by constant values. if (Idx < Ops.size() && isa(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); - DenseMap M; - SmallVector NewOps; + DenseMap M; + SmallVector NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops, APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. - std::map, APIntCompare> MulOpLists; - for (SmallVector::iterator I = NewOps.begin(), + std::map, APIntCompare> MulOpLists; + for (SmallVector::iterator I = NewOps.begin(), E = NewOps.end(); I != E; ++I) MulOpLists[M.find(*I)->second].push_back(*I); // Re-generate the operands list. @@ -1279,17 +1279,17 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp] && !isa(Ops[AddOp])) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) - const SCEV* InnerMul = Mul->getOperand(MulOp == 0); + const SCEV *InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. - SmallVector MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } - const SCEV* One = getIntegerSCEV(1, Ty); - const SCEV* AddOne = getAddExpr(InnerMul, One); - const SCEV* OuterMul = getMulExpr(AddOne, Ops[AddOp]); + const SCEV *One = getIntegerSCEV(1, Ty); + const SCEV *AddOne = getAddExpr(InnerMul, One); + const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); @@ -1313,22 +1313,22 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) - const SCEV* InnerMul1 = Mul->getOperand(MulOp == 0); + const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul1 = getMulExpr(MulOps); } - const SCEV* InnerMul2 = OtherMul->getOperand(OMulOp == 0); + const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { SmallVector MulOps(OtherMul->op_begin(), OtherMul->op_end()); MulOps.erase(MulOps.begin()+OMulOp); InnerMul2 = getMulExpr(MulOps); } - const SCEV* InnerMulSum = getAddExpr(InnerMul1,InnerMul2); - const SCEV* OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); + const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); + const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); @@ -1349,7 +1349,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1363,11 +1363,11 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); - SmallVector AddRecOps(AddRec->op_begin(), + SmallVector AddRecOps(AddRec->op_begin(), AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); - const SCEV* NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1399,7 +1399,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { } NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); } - const SCEV* NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1432,7 +1432,7 @@ const SCEV* ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { +const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1513,7 +1513,7 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. - SmallVector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1525,7 +1525,7 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); if (LIOps.size() == 1) { const SCEV *Scale = LIOps[0]; @@ -1533,13 +1533,13 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); } else { for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - SmallVector MulOps(LIOps.begin(), LIOps.end()); + SmallVector MulOps(LIOps.begin(), LIOps.end()); MulOps.push_back(AddRec->getOperand(i)); NewOps.push_back(getMulExpr(MulOps)); } } - const SCEV* NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); + const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1563,14 +1563,14 @@ const SCEV* ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { if (AddRec->getLoop() == OtherAddRec->getLoop()) { // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; - const SCEV* NewStart = getMulExpr(F->getStart(), + const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart()); - const SCEV* B = F->getStepRecurrence(*this); - const SCEV* D = G->getStepRecurrence(*this); - const SCEV* NewStep = getAddExpr(getMulExpr(F, D), + const SCEV *B = F->getStepRecurrence(*this); + const SCEV *D = G->getStepRecurrence(*this); + const SCEV *NewStep = getAddExpr(getMulExpr(F, D), getMulExpr(G, B), getMulExpr(B, D)); - const SCEV* NewAddRec = getAddRecExpr(NewStart, NewStep, + const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, F->getLoop()); if (Ops.size() == 2) return NewAddRec; @@ -1636,24 +1636,24 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop())) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop()); } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast(LHS)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) // Find an operand that's safely divisible. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { - const SCEV* Op = M->getOperand(i); - const SCEV* Div = getUDivExpr(Op, RHSC); + const SCEV *Op = M->getOperand(i); + const SCEV *Div = getUDivExpr(Op, RHSC); if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { - const SmallVectorImpl &MOperands = M->getOperands(); - Operands = SmallVector(MOperands.begin(), + const SmallVectorImpl &MOperands = M->getOperands(); + Operands = SmallVector(MOperands.begin(), MOperands.end()); Operands[i] = Div; return getMulExpr(Operands); @@ -1662,13 +1662,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddRecExpr *A = dyn_cast(LHS)) { - SmallVector Operands; + SmallVector Operands; for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { Operands.clear(); for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { - const SCEV* Op = getUDivExpr(A->getOperand(i), RHS); + const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); if (isa(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) break; Operands.push_back(Op); @@ -1702,9 +1702,9 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, - const SCEV* Step, const Loop *L) { - SmallVector Operands; +const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, + const SCEV *Step, const Loop *L) { + SmallVector Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { @@ -1720,7 +1720,7 @@ const SCEV* ScalarEvolution::getAddRecExpr(const SCEV* Start, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. const SCEV * -ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, +ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, const Loop *L) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG @@ -1739,7 +1739,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop* NestedLoop = NestedAR->getLoop(); if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { - SmallVector NestedOperands(NestedAR->op_begin(), + SmallVector NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); Operands[0] = NestedAR->getStart(); // AddRecs require their operands be loop-invariant with respect to their @@ -1784,14 +1784,14 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { - SmallVector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getSMaxExpr(Ops); } -const SCEV* -ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { +const SCEV * +ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty smax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1881,14 +1881,14 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { - SmallVector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getUMaxExpr(Ops); } -const SCEV* -ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { +const SCEV * +ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty umax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1988,7 +1988,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } -const SCEV* ScalarEvolution::getUnknown(Value *V) { +const SCEV *ScalarEvolution::getUnknown(Value *V) { // Don't attempt to do anything other than create a SCEVUnknown object // here. createSCEV only calls getUnknown after checking for all other // interesting possibilities, and any other code that calls getUnknown @@ -2055,7 +2055,7 @@ const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { return TD->getIntPtrType(); } -const SCEV* ScalarEvolution::getCouldNotCompute() { +const SCEV *ScalarEvolution::getCouldNotCompute() { return &CouldNotCompute; } @@ -2067,26 +2067,26 @@ bool ScalarEvolution::hasSCEV(Value *V) const { /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the /// expression and create a new one. -const SCEV* ScalarEvolution::getSCEV(Value *V) { +const SCEV *ScalarEvolution::getSCEV(Value *V) { assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); - std::map::iterator I = Scalars.find(V); + std::map::iterator I = Scalars.find(V); if (I != Scalars.end()) return I->second; - const SCEV* S = createSCEV(V); + const SCEV *S = createSCEV(V); Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); return S; } /// getIntegerSCEV - Given a SCEVable type, create a constant for the /// specified signed integer value and return a SCEV for the constant. -const SCEV* ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { +const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { const IntegerType *ITy = cast(getEffectiveSCEVType(Ty)); return getConstant(ConstantInt::get(ITy, Val)); } /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V /// -const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { +const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { if (const SCEVConstant *VC = dyn_cast(V)) return getConstant(cast(ConstantExpr::getNeg(VC->getValue()))); @@ -2096,13 +2096,13 @@ const SCEV* ScalarEvolution::getNegativeSCEV(const SCEV* V) { } /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V -const SCEV* ScalarEvolution::getNotSCEV(const SCEV* V) { +const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { if (const SCEVConstant *VC = dyn_cast(V)) return getConstant(cast(ConstantExpr::getNot(VC->getValue()))); const Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); - const SCEV* AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); + const SCEV *AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty)); return getMinusSCEV(AllOnes, V); } @@ -2117,8 +2117,8 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. -const SCEV* -ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, +const SCEV * +ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && @@ -2134,8 +2134,8 @@ ScalarEvolution::getTruncateOrZeroExtend(const SCEV* V, /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. -const SCEV* -ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, +const SCEV * +ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && @@ -2151,8 +2151,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV* V, /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. The conversion must not be narrowing. -const SCEV* -ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { +const SCEV * +ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2167,8 +2167,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV* V, const Type *Ty) { /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. The conversion must not be narrowing. -const SCEV* -ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { +const SCEV * +ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2184,8 +2184,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV* V, const Type *Ty) { /// the input value to the specified type. If the type must be extended, /// it is extended with unspecified bits. The conversion must not be /// narrowing. -const SCEV* -ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { +const SCEV * +ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2199,8 +2199,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV* V, const Type *Ty) { /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the /// input value to the specified type. The conversion must not be widening. -const SCEV* -ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { +const SCEV * +ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { const Type *SrcTy = V->getType(); assert((SrcTy->isInteger() || (TD && isa(SrcTy))) && (Ty->isInteger() || (TD && isa(Ty))) && @@ -2217,8 +2217,8 @@ ScalarEvolution::getTruncateOrNoop(const SCEV* V, const Type *Ty) { /// with them. const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS) { - const SCEV* PromotedLHS = LHS; - const SCEV* PromotedRHS = RHS; + const SCEV *PromotedLHS = LHS; + const SCEV *PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); @@ -2233,8 +2233,8 @@ const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, /// with them. const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS) { - const SCEV* PromotedLHS = LHS; - const SCEV* PromotedRHS = RHS; + const SCEV *PromotedLHS = LHS; + const SCEV *PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); @@ -2251,11 +2251,11 @@ void ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEV *SymName, const SCEV *NewVal) { - std::map::iterator SI = + std::map::iterator SI = Scalars.find(SCEVCallbackVH(I, this)); if (SI == Scalars.end()) return; - const SCEV* NV = + const SCEV *NV = SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this); if (NV == SI->second) return; // No change. @@ -2271,7 +2271,7 @@ ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I, /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in /// a loop header, making it a potential recurrence, or it doesn't. /// -const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { +const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. if (const Loop *L = LI->getLoopFor(PN->getParent())) if (L->getHeader() == PN->getParent()) { @@ -2281,14 +2281,14 @@ const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { unsigned BackEdge = IncomingEdge^1; // While we are analyzing this PHI node, handle its value symbolically. - const SCEV* SymbolicName = getUnknown(PN); + const SCEV *SymbolicName = getUnknown(PN); assert(Scalars.find(PN) == Scalars.end() && "PHI node already processed?"); Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. - const SCEV* BEValue = getSCEV(PN->getIncomingValue(BackEdge)); + const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge)); // NOTE: If BEValue is loop invariant, we know that the PHI node just // has a special value for the first iteration of the loop. @@ -2308,11 +2308,11 @@ const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { if (FoundIndex != Add->getNumOperands()) { // Create an add with everything but the specified operand. - SmallVector Ops; + SmallVector Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(Add->getOperand(i)); - const SCEV* Accum = getAddExpr(Ops); + const SCEV *Accum = getAddExpr(Ops); // This is not a valid addrec if the step amount is varying each // loop iteration, but is not itself an addrec in this loop. @@ -2341,13 +2341,13 @@ const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { // Because the other in-value of i (0) fits the evolution of BEValue // i really is an addrec evolution. if (AddRec->getLoop() == L && AddRec->isAffine()) { - const SCEV* StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); + const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); // If StartVal = j.start - j.stride, we can use StartVal as the // initial step of the addrec evolution. if (StartVal == getMinusSCEV(AddRec->getOperand(0), AddRec->getOperand(1))) { - const SCEV* PHISCEV = + const SCEV *PHISCEV = getAddRecExpr(StartVal, AddRec->getOperand(1), L); // Okay, for the entire analysis of this edge we assumed the PHI @@ -2371,14 +2371,14 @@ const SCEV* ScalarEvolution::createNodeForPHI(PHINode *PN) { /// createNodeForGEP - Expand GEP instructions into add and multiply /// operations. This allows them to be analyzed by regular SCEV code. /// -const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { +const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) { const Type *IntPtrTy = TD->getIntPtrType(); Value *Base = GEP->getOperand(0); // Don't attempt to analyze GEPs over unsized objects. if (!cast(Base->getType())->getElementType()->isSized()) return getUnknown(GEP); - const SCEV* TotalOffset = getIntegerSCEV(0, IntPtrTy); + const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy); gep_type_iterator GTI = gep_type_begin(GEP); for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), E = GEP->op_end(); @@ -2394,7 +2394,7 @@ const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { getIntegerSCEV(Offset, IntPtrTy)); } else { // For an array, add the element offset, explicitly scaled. - const SCEV* LocalOffset = getSCEV(Index); + const SCEV *LocalOffset = getSCEV(Index); if (!isa(LocalOffset->getType())) // Getelementptr indicies are signed. LocalOffset = getTruncateOrSignExtend(LocalOffset, @@ -2414,7 +2414,7 @@ const SCEV* ScalarEvolution::createNodeForGEP(User *GEP) { /// the minimum number of times S is divisible by 2. For example, given {4,+,8} /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. uint32_t -ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { +ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { if (const SCEVConstant *C = dyn_cast(S)) return C->getValue()->getValue().countTrailingZeros(); @@ -2491,7 +2491,7 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV* S) { } uint32_t -ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { +ScalarEvolution::GetMinLeadingZeros(const SCEV *S) { // TODO: Handle other SCEV expression types here. if (const SCEVConstant *C = dyn_cast(S)) @@ -2517,7 +2517,7 @@ ScalarEvolution::GetMinLeadingZeros(const SCEV* S) { } uint32_t -ScalarEvolution::GetMinSignBits(const SCEV* S) { +ScalarEvolution::GetMinSignBits(const SCEV *S) { // TODO: Handle other SCEV expression types here. if (const SCEVConstant *C = dyn_cast(S)) { @@ -2576,7 +2576,7 @@ ScalarEvolution::GetMinSignBits(const SCEV* S) { /// createSCEV - We know that there is no SCEV for the specified value. /// Analyze the expression. /// -const SCEV* ScalarEvolution::createSCEV(Value *V) { +const SCEV *ScalarEvolution::createSCEV(Value *V) { if (!isSCEVable(V->getType())) return getUnknown(V); @@ -2646,7 +2646,7 @@ const SCEV* ScalarEvolution::createSCEV(Value *V) { // In order for this transformation to be safe, the LHS must be of the // form X*(2^n) and the Or constant must be less than 2^n. if (ConstantInt *CI = dyn_cast(U->getOperand(1))) { - const SCEV* LHS = getSCEV(U->getOperand(0)); + const SCEV *LHS = getSCEV(U->getOperand(0)); const APInt &CIVal = CI->getValue(); if (GetMinTrailingZeros(LHS) >= (CIVal.getBitWidth() - CIVal.countLeadingZeros())) @@ -2676,7 +2676,7 @@ const SCEV* ScalarEvolution::createSCEV(Value *V) { if (const SCEVZeroExtendExpr *Z = dyn_cast(getSCEV(U->getOperand(0)))) { const Type *UTy = U->getType(); - const SCEV* Z0 = Z->getOperand(); + const SCEV *Z0 = Z->getOperand(); const Type *Z0Ty = Z0->getType(); unsigned Z0TySize = getTypeSizeInBits(Z0Ty); @@ -2845,14 +2845,14 @@ const SCEV* ScalarEvolution::createSCEV(Value *V) { /// loop-invariant backedge-taken count (see /// hasLoopInvariantBackedgeTakenCount). /// -const SCEV* ScalarEvolution::getBackedgeTakenCount(const Loop *L) { +const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).Exact; } /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except /// return the least SCEV value that is known never to be less than the /// actual backedge taken count. -const SCEV* ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { +const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).Max; } @@ -2919,7 +2919,7 @@ void ScalarEvolution::forgetLoopPHIs(const Loop *L) { SmallVector Worklist; for (BasicBlock::iterator I = Header->begin(); PHINode *PN = dyn_cast(I); ++I) { - std::map::iterator It = + std::map::iterator It = Scalars.find((Value*)I); if (It != Scalars.end() && !isa(It->second)) Worklist.push_back(PN); @@ -2942,8 +2942,8 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { L->getExitingBlocks(ExitingBlocks); // Examine all exits and pick the most conservative values. - const SCEV* BECount = getCouldNotCompute(); - const SCEV* MaxBECount = getCouldNotCompute(); + const SCEV *BECount = getCouldNotCompute(); + const SCEV *MaxBECount = getCouldNotCompute(); bool CouldNotComputeBECount = false; for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { BackedgeTakenInfo NewBTI = @@ -3052,8 +3052,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); BackedgeTakenInfo BTI1 = ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); - const SCEV* BECount = getCouldNotCompute(); - const SCEV* MaxBECount = getCouldNotCompute(); + const SCEV *BECount = getCouldNotCompute(); + const SCEV *MaxBECount = getCouldNotCompute(); if (L->contains(TBB)) { // Both conditions must be true for the loop to continue executing. // Choose the less conservative count. @@ -3087,8 +3087,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); BackedgeTakenInfo BTI1 = ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); - const SCEV* BECount = getCouldNotCompute(); - const SCEV* MaxBECount = getCouldNotCompute(); + const SCEV *BECount = getCouldNotCompute(); + const SCEV *MaxBECount = getCouldNotCompute(); if (L->contains(FBB)) { // Both conditions must be false for the loop to continue executing. // Choose the less conservative count. @@ -3146,7 +3146,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, // Handle common loops like: for (X = "string"; *X; ++X) if (LoadInst *LI = dyn_cast(ExitCond->getOperand(0))) if (Constant *RHS = dyn_cast(ExitCond->getOperand(1))) { - const SCEV* ItCnt = + const SCEV *ItCnt = ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); if (!isa(ItCnt)) { unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); @@ -3156,8 +3156,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, } } - const SCEV* LHS = getSCEV(ExitCond->getOperand(0)); - const SCEV* RHS = getSCEV(ExitCond->getOperand(1)); + const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); + const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); // Try to evaluate any dependencies out of the loop. LHS = getSCEVAtScope(LHS, L); @@ -3180,20 +3180,20 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, ConstantRange CompRange( ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); - const SCEV* Ret = AddRec->getNumIterationsInRange(CompRange, *this); + const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); if (!isa(Ret)) return Ret; } switch (Cond) { case ICmpInst::ICMP_NE: { // while (X != Y) // Convert to: while (X-Y != 0) - const SCEV* TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); + const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); if (!isa(TC)) return TC; break; } case ICmpInst::ICMP_EQ: { // Convert to: while (X-Y == 0) // while (X == Y) - const SCEV* TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); + const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); if (!isa(TC)) return TC; break; } @@ -3237,8 +3237,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE) { - const SCEV* InVal = SE.getConstant(C); - const SCEV* Val = AddRec->evaluateAtIteration(InVal, SE); + const SCEV *InVal = SE.getConstant(C); + const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); assert(isa(Val) && "Evaluation of SCEV at constant didn't fold correctly?"); return cast(Val)->getValue(); @@ -3317,7 +3317,7 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. // Check to see if X is a loop variant variable value now. - const SCEV* Idx = getSCEV(VarIdx); + const SCEV *Idx = getSCEV(VarIdx); Idx = getSCEVAtScope(Idx, L); // We can only recognize very limited forms of loop index expressions, in @@ -3556,7 +3556,7 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, /// /// In the case that a relevant loop exit value cannot be computed, the /// original value V is returned. -const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { +const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // FIXME: this should be turned into a virtual method on SCEV! if (isa(V)) return V; @@ -3573,7 +3573,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // to see if the loop that contains it has a known backedge-taken // count. If so, we may be able to force computation of the exit // value. - const SCEV* BackedgeTakenCount = getBackedgeTakenCount(LI); + const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); if (const SCEVConstant *BTCC = dyn_cast(BackedgeTakenCount)) { // Okay, we know how many times the containing loop executes. If @@ -3611,7 +3611,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { if (!isSCEVable(Op->getType())) return V; - const SCEV* OpV = getSCEVAtScope(getSCEV(Op), L); + const SCEV *OpV = getSCEVAtScope(getSCEV(Op), L); if (const SCEVConstant *SC = dyn_cast(OpV)) { Constant *C = SC->getValue(); if (C->getType() != Op->getType()) @@ -3658,7 +3658,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { - const SCEV* OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); + const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); if (OpAtScope != Comm->getOperand(i)) { // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. @@ -3686,8 +3686,8 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { } if (const SCEVUDivExpr *Div = dyn_cast(V)) { - const SCEV* LHS = getSCEVAtScope(Div->getLHS(), L); - const SCEV* RHS = getSCEVAtScope(Div->getRHS(), L); + const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); + const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); if (LHS == Div->getLHS() && RHS == Div->getRHS()) return Div; // must be loop invariant return getUDivExpr(LHS, RHS); @@ -3699,7 +3699,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { if (!L || !AddRec->getLoop()->contains(L->getHeader())) { // To evaluate this recurrence, we need to know how many times the AddRec // loop iterates. Compute this now. - const SCEV* BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); + const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; // Then, evaluate the AddRec. @@ -3709,21 +3709,21 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { } if (const SCEVZeroExtendExpr *Cast = dyn_cast(V)) { - const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getZeroExtendExpr(Op, Cast->getType()); } if (const SCEVSignExtendExpr *Cast = dyn_cast(V)) { - const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getSignExtendExpr(Op, Cast->getType()); } if (const SCEVTruncateExpr *Cast = dyn_cast(V)) { - const SCEV* Op = getSCEVAtScope(Cast->getOperand(), L); + const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getTruncateExpr(Op, Cast->getType()); @@ -3735,7 +3735,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { /// getSCEVAtScope - This is a convenience function which does /// getSCEVAtScope(getSCEV(V), L). -const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { +const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { return getSCEVAtScope(getSCEV(V), L); } @@ -3748,7 +3748,7 @@ const SCEV* ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { /// A and B isn't important. /// /// If the equation does not have a solution, SCEVCouldNotCompute is returned. -static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, +static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, ScalarEvolution &SE) { uint32_t BW = A.getBitWidth(); assert(BW == B.getBitWidth() && "Bit widths must be the same."); @@ -3791,7 +3791,7 @@ static const SCEV* SolveLinEquationWithOverflow(const APInt &A, const APInt &B, /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which /// might be the same) or two SCEVCouldNotCompute objects. /// -static std::pair +static std::pair SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); const SCEVConstant *LC = dyn_cast(AddRec->getOperand(0)); @@ -3854,7 +3854,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { /// HowFarToZero - Return the number of times a backedge comparing the specified /// value to zero will execute. If not computable, return CouldNotCompute. -const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { +const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // If the value is a constant if (const SCEVConstant *C = dyn_cast(V)) { // If the value is already zero, the branch will execute zero times. @@ -3902,7 +3902,7 @@ const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of // the quadratic equation to solve it. - std::pair Roots = SolveQuadraticEquation(AddRec, + std::pair Roots = SolveQuadraticEquation(AddRec, *this); const SCEVConstant *R1 = dyn_cast(Roots.first); const SCEVConstant *R2 = dyn_cast(Roots.second); @@ -3921,7 +3921,7 @@ const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { // We can only use this value if the chrec ends up with an exact zero // value at this index. When solving for "X*X != 5", for example, we // should not accept a root of 2. - const SCEV* Val = AddRec->evaluateAtIteration(R1, *this); + const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); if (Val->isZero()) return R1; // We found a quadratic root! } @@ -3934,7 +3934,7 @@ const SCEV* ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute -const SCEV* ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { +const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { // Loops that look like: while (X == 0) are very strange indeed. We don't // handle them yet except for the trivial case. This could be expanded in the // future as needed. @@ -3995,7 +3995,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { /// more general, since a front-end may have replicated the controlling /// expression. /// -static bool HasSameValue(const SCEV* A, const SCEV* B) { +static bool HasSameValue(const SCEV *A, const SCEV *B) { // Quick check to see if they are the same SCEV. if (A == B) return true; @@ -4148,22 +4148,22 @@ bool ScalarEvolution::isNecessaryCond(Value *CondValue, /// getBECount - Subtract the end and start values and divide by the step, /// rounding up, to get the number of times the backedge is executed. Return /// CouldNotCompute if an intermediate computation overflows. -const SCEV* ScalarEvolution::getBECount(const SCEV* Start, - const SCEV* End, - const SCEV* Step) { +const SCEV *ScalarEvolution::getBECount(const SCEV *Start, + const SCEV *End, + const SCEV *Step) { const Type *Ty = Start->getType(); - const SCEV* NegOne = getIntegerSCEV(-1, Ty); - const SCEV* Diff = getMinusSCEV(End, Start); - const SCEV* RoundUp = getAddExpr(Step, NegOne); + const SCEV *NegOne = getIntegerSCEV(-1, Ty); + const SCEV *Diff = getMinusSCEV(End, Start); + const SCEV *RoundUp = getAddExpr(Step, NegOne); // Add an adjustment to the difference between End and Start so that // the division will effectively round up. - const SCEV* Add = getAddExpr(Diff, RoundUp); + const SCEV *Add = getAddExpr(Diff, RoundUp); // Check Add for unsigned overflow. // TODO: More sophisticated things could be done here. const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1); - const SCEV* OperandExtendedAdd = + const SCEV *OperandExtendedAdd = getAddExpr(getZeroExtendExpr(Diff, WideTy), getZeroExtendExpr(RoundUp, WideTy)); if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) @@ -4188,7 +4188,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, if (AddRec->isAffine()) { // FORNOW: We only support unit strides. unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); - const SCEV* Step = AddRec->getStepRecurrence(*this); + const SCEV *Step = AddRec->getStepRecurrence(*this); // TODO: handle non-constant strides. const SCEVConstant *CStep = dyn_cast(Step); @@ -4224,7 +4224,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // treat m-n as signed nor unsigned due to overflow possibility. // First, we get the value of the LHS in the first iteration: n - const SCEV* Start = AddRec->getOperand(0); + const SCEV *Start = AddRec->getOperand(0); // Determine the minimum constant start value. const SCEV *MinStart = isa(Start) ? Start : @@ -4235,7 +4235,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // then we know that it will run exactly (m-n)/s times. Otherwise, we // only know that it will execute (max(m,n)-n)/s times. In both cases, // the division must round up. - const SCEV* End = RHS; + const SCEV *End = RHS; if (!isLoopGuardedByCond(L, isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, getMinusSCEV(Start, Step), RHS)) @@ -4243,7 +4243,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, : getUMaxExpr(RHS, Start); // Determine the maximum constant end value. - const SCEV* MaxEnd = + const SCEV *MaxEnd = isa(End) ? End : getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth) .ashr(GetMinSignBits(End) - 1) : @@ -4252,11 +4252,11 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, // Finally, we subtract these two values and divide, rounding up, to get // the number of times the backedge is executed. - const SCEV* BECount = getBECount(Start, End, Step); + const SCEV *BECount = getBECount(Start, End, Step); // The maximum backedge count is similar, except using the minimum start // value and the maximum end value. - const SCEV* MaxBECount = getBECount(MinStart, MaxEnd, Step); + const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step); return BackedgeTakenInfo(BECount, MaxBECount); } @@ -4269,7 +4269,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, /// this is that it returns the first iteration number where the value is not in /// the condition, thus computing the exit count. If the iteration count can't /// be computed, an instance of SCEVCouldNotCompute is returned. -const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, +const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const { if (Range.isFullSet()) // Infinite loop. return SE.getCouldNotCompute(); @@ -4277,9 +4277,9 @@ const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // If the start is a non-zero constant, shift the range to simplify things. if (const SCEVConstant *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { - SmallVector Operands(op_begin(), op_end()); + SmallVector Operands(op_begin(), op_end()); Operands[0] = SE.getIntegerSCEV(0, SC->getType()); - const SCEV* Shifted = SE.getAddRecExpr(Operands, getLoop()); + const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); if (const SCEVAddRecExpr *ShiftedAddRec = dyn_cast(Shifted)) return ShiftedAddRec->getNumIterationsInRange( @@ -4338,12 +4338,12 @@ const SCEV* SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // quadratic equation to solve it. To do this, we must frame our problem in // terms of figuring out when zero is crossed, instead of when // Range.getUpper() is crossed. - SmallVector NewOps(op_begin(), op_end()); + SmallVector NewOps(op_begin(), op_end()); NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); - const SCEV* NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); + const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); // Next, solve the constructed addrec - std::pair Roots = + std::pair Roots = SolveQuadraticEquation(cast(NewAddRec), SE); const SCEVConstant *R1 = dyn_cast(Roots.first); const SCEVConstant *R2 = dyn_cast(Roots.second); @@ -4525,12 +4525,12 @@ void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { if (isSCEVable(I->getType())) { OS << *I; OS << " --> "; - const SCEV* SV = SE.getSCEV(&*I); + const SCEV *SV = SE.getSCEV(&*I); SV->print(OS); const Loop *L = LI->getLoopFor((*I).getParent()); - const SCEV* AtUse = SE.getSCEVAtScope(SV, L); + const SCEV *AtUse = SE.getSCEVAtScope(SV, L); if (AtUse != SV) { OS << " --> "; AtUse->print(OS); @@ -4538,7 +4538,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { if (L) { OS << "\t\t" "Exits: "; - const SCEV* ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); + const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); if (!ExitValue->isLoopInvariant(L)) { OS << "<>"; } else { diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index fbb53269502..ecfbc8ec79f 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -156,8 +156,8 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made /// unnecessary; in its place, just signed-divide Ops[i] by the scale and /// check to see if the divide was folded. -static bool FactorOutConstant(const SCEV* &S, - const SCEV* &Remainder, +static bool FactorOutConstant(const SCEV *&S, + const SCEV *&Remainder, const APInt &Factor, ScalarEvolution &SE) { // Everything is divisible by one. @@ -172,7 +172,7 @@ static bool FactorOutConstant(const SCEV* &S, // the value at this scale. It will be considered for subsequent // smaller scales. if (C->isZero() || !CI->isZero()) { - const SCEV* Div = SE.getConstant(CI); + const SCEV *Div = SE.getConstant(CI); S = Div; Remainder = SE.getAddExpr(Remainder, @@ -197,13 +197,13 @@ static bool FactorOutConstant(const SCEV* &S, // In an AddRec, check if both start and step are divisible. if (const SCEVAddRecExpr *A = dyn_cast(S)) { - const SCEV* Step = A->getStepRecurrence(SE); - const SCEV* StepRem = SE.getIntegerSCEV(0, Step->getType()); + const SCEV *Step = A->getStepRecurrence(SE); + const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType()); if (!FactorOutConstant(Step, StepRem, Factor, SE)) return false; if (!StepRem->isZero()) return false; - const SCEV* Start = A->getStart(); + const SCEV *Start = A->getStart(); if (!FactorOutConstant(Start, Remainder, Factor, SE)) return false; S = SE.getAddRecExpr(Start, Step, A->getLoop()); @@ -238,14 +238,14 @@ static bool FactorOutConstant(const SCEV* &S, /// loop-invariant portions of expressions, after considering what /// can be folded using target addressing modes. /// -Value *SCEVExpander::expandAddToGEP(const SCEV* const *op_begin, - const SCEV* const *op_end, +Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, + const SCEV *const *op_end, const PointerType *PTy, const Type *Ty, Value *V) { const Type *ElTy = PTy->getElementType(); SmallVector GepIndices; - SmallVector Ops(op_begin, op_end); + SmallVector Ops(op_begin, op_end); bool AnyNonZeroIndices = false; // Decend down the pointer's type and attempt to convert the other @@ -256,14 +256,14 @@ Value *SCEVExpander::expandAddToGEP(const SCEV* const *op_begin, for (;;) { APInt ElSize = APInt(SE.getTypeSizeInBits(Ty), ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0); - SmallVector NewOps; - SmallVector ScaledOps; + SmallVector NewOps; + SmallVector ScaledOps; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { // Split AddRecs up into parts as either of the parts may be usable // without the other. if (const SCEVAddRecExpr *A = dyn_cast(Ops[i])) if (!A->getStart()->isZero()) { - const SCEV* Start = A->getStart(); + const SCEV *Start = A->getStart(); Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), A->getStepRecurrence(SE), A->getLoop())); @@ -272,8 +272,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV* const *op_begin, } // If the scale size is not 0, attempt to factor out a scale. if (ElSize != 0) { - const SCEV* Op = Ops[i]; - const SCEV* Remainder = SE.getIntegerSCEV(0, Op->getType()); + const SCEV *Op = Ops[i]; + const SCEV *Remainder = SE.getIntegerSCEV(0, Op->getType()); if (FactorOutConstant(Op, Remainder, ElSize, SE)) { ScaledOps.push_back(Op); // Op now has ElSize factored out. NewOps.push_back(Remainder); @@ -370,7 +370,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { // comments on expandAddToGEP for details. if (SE.TD) if (const PointerType *PTy = dyn_cast(V->getType())) { - const SmallVectorImpl &Ops = S->getOperands(); + const SmallVectorImpl &Ops = S->getOperands(); return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V); } @@ -424,7 +424,7 @@ Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { /// Move parts of Base into Rest to leave Base with the minimal /// expression that provides a pointer operand suitable for a /// GEP expansion. -static void ExposePointerBase(const SCEV* &Base, const SCEV* &Rest, +static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, ScalarEvolution &SE) { while (const SCEVAddRecExpr *A = dyn_cast(Base)) { Base = A->getStart(); @@ -435,7 +435,7 @@ static void ExposePointerBase(const SCEV* &Base, const SCEV* &Rest, } if (const SCEVAddExpr *A = dyn_cast(Base)) { Base = A->getOperand(A->getNumOperands()-1); - SmallVector NewAddOps(A->op_begin(), A->op_end()); + SmallVector NewAddOps(A->op_begin(), A->op_end()); NewAddOps.back() = Rest; Rest = SE.getAddExpr(NewAddOps); ExposePointerBase(Base, Rest, SE); @@ -477,16 +477,16 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // {X,+,F} --> X + {0,+,F} if (!S->getStart()->isZero()) { - const SmallVectorImpl &SOperands = S->getOperands(); - SmallVector NewOps(SOperands.begin(), SOperands.end()); + const SmallVectorImpl &SOperands = S->getOperands(); + SmallVector NewOps(SOperands.begin(), SOperands.end()); NewOps[0] = SE.getIntegerSCEV(0, Ty); - const SCEV* Rest = SE.getAddRecExpr(NewOps, L); + const SCEV *Rest = SE.getAddRecExpr(NewOps, L); // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the // comments on expandAddToGEP for details. if (SE.TD) { - const SCEV* Base = S->getStart(); - const SCEV* RestArray[1] = { Rest }; + const SCEV *Base = S->getStart(); + const SCEV *RestArray[1] = { Rest }; // Dig into the expression to find the pointer base for a GEP. ExposePointerBase(Base, RestArray[0], SE); // If we found a pointer, expand the AddRec with a GEP. @@ -565,19 +565,19 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // folders, then expandCodeFor the closed form. This allows the folders to // simplify the expression without having to build a bunch of special code // into this folder. - const SCEV* IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. + const SCEV *IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. // Promote S up to the canonical IV type, if the cast is foldable. - const SCEV* NewS = S; - const SCEV* Ext = SE.getNoopOrAnyExtend(S, I->getType()); + const SCEV *NewS = S; + const SCEV *Ext = SE.getNoopOrAnyExtend(S, I->getType()); if (isa(Ext)) NewS = Ext; - const SCEV* V = cast(NewS)->evaluateAtIteration(IH, SE); + const SCEV *V = cast(NewS)->evaluateAtIteration(IH, SE); //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; // Truncate the result down to the original type, if needed. - const SCEV* T = SE.getTruncateOrNoop(V, Ty); + const SCEV *T = SE.getTruncateOrNoop(V, Ty); return expand(T); } @@ -636,7 +636,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { return LHS; } -Value *SCEVExpander::expandCodeFor(const SCEV* SH, const Type *Ty) { +Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) { // Expand the code for this SCEV. Value *V = expand(SH); if (Ty) { @@ -697,7 +697,7 @@ Value * SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, const Type *Ty) { assert(Ty->isInteger() && "Can only insert integer induction variables!"); - const SCEV* H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), + const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), SE.getIntegerSCEV(1, Ty), L); BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 88cf60ecbaa..03a17d673dc 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -98,7 +98,7 @@ namespace { void RewriteNonIntegerIVs(Loop *L); - ICmpInst *LinearFunctionTestReplace(Loop *L, const SCEV* BackedgeTakenCount, + ICmpInst *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, Value *IndVar, BasicBlock *ExitingBlock, BranchInst *BI, @@ -129,7 +129,7 @@ Pass *llvm::createIndVarSimplifyPass() { /// SCEV analysis can determine a loop-invariant trip count of the loop, which /// is actually a much broader range than just linear tests. ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L, - const SCEV* BackedgeTakenCount, + const SCEV *BackedgeTakenCount, Value *IndVar, BasicBlock *ExitingBlock, BranchInst *BI, @@ -138,13 +138,13 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L, // against the preincremented value, otherwise we prefer to compare against // the post-incremented value. Value *CmpIndVar; - const SCEV* RHS = BackedgeTakenCount; + const SCEV *RHS = BackedgeTakenCount; if (ExitingBlock == L->getLoopLatch()) { // Add one to the "backedge-taken" count to get the trip count. // If this addition may overflow, we have to be more pessimistic and // cast the induction variable before doing the add. - const SCEV* Zero = SE->getIntegerSCEV(0, BackedgeTakenCount->getType()); - const SCEV* N = + const SCEV *Zero = SE->getIntegerSCEV(0, BackedgeTakenCount->getType()); + const SCEV *N = SE->getAddExpr(BackedgeTakenCount, SE->getIntegerSCEV(1, BackedgeTakenCount->getType())); if ((isa(N) && !N->isZero()) || @@ -264,7 +264,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, // Okay, this instruction has a user outside of the current loop // and varies predictably *inside* the loop. Evaluate the value it // contains when the loop exits, if possible. - const SCEV* ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); + const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); if (!ExitValue->isLoopInvariant(L)) continue; @@ -339,7 +339,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { RewriteNonIntegerIVs(L); BasicBlock *ExitingBlock = L->getExitingBlock(); // may be null - const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); // Create a rewriter object which we'll use to transform the code with. SCEVExpander Rewriter(*SE); @@ -367,14 +367,14 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { NeedCannIV = true; } for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - const SCEV* Stride = IU->StrideOrder[i]; + const SCEV *Stride = IU->StrideOrder[i]; const Type *Ty = SE->getEffectiveSCEVType(Stride->getType()); if (!LargestType || SE->getTypeSizeInBits(Ty) > SE->getTypeSizeInBits(LargestType)) LargestType = Ty; - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); @@ -458,9 +458,9 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, const Type *LargestType, // the need for the code evaluation methods to insert induction variables // of different sizes. for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - const SCEV* Stride = IU->StrideOrder[i]; + const SCEV *Stride = IU->StrideOrder[i]; - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); ilist &List = SI->second->Users; @@ -471,7 +471,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, const Type *LargestType, Instruction *User = UI->getUser(); // Compute the final addrec to expand into code. - const SCEV* AR = IU->getReplacementExpr(*UI); + const SCEV *AR = IU->getReplacementExpr(*UI); // FIXME: It is an extremely bad idea to indvar substitute anything more // complex than affine induction variables. Doing so will put expensive diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp index 302cdec2ba4..76a773a8081 100644 --- a/lib/Transforms/Scalar/LoopDeletion.cpp +++ b/lib/Transforms/Scalar/LoopDeletion.cpp @@ -187,7 +187,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) { // Don't remove loops for which we can't solve the trip count. // They could be infinite, in which case we'd be changing program behavior. ScalarEvolution& SE = getAnalysis(); - const SCEV* S = SE.getBackedgeTakenCount(L); + const SCEV *S = SE.getBackedgeTakenCount(L); if (isa(S)) return false; diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 046fed3d715..76985da0673 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -65,11 +65,11 @@ namespace { /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as /// well as the PHI node and increment value created for rewrite. struct VISIBILITY_HIDDEN IVExpr { - const SCEV* Stride; - const SCEV* Base; + const SCEV *Stride; + const SCEV *Base; PHINode *PHI; - IVExpr(const SCEV* const stride, const SCEV* const base, PHINode *phi) + IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi) : Stride(stride), Base(base), PHI(phi) {} }; @@ -78,7 +78,7 @@ namespace { struct VISIBILITY_HIDDEN IVsOfOneStride { std::vector IVs; - void addIV(const SCEV* const Stride, const SCEV* const Base, PHINode *PHI) { + void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) { IVs.push_back(IVExpr(Stride, Base, PHI)); } }; @@ -92,11 +92,11 @@ namespace { /// IVsByStride - Keep track of all IVs that have been inserted for a /// particular stride. - std::map IVsByStride; + std::map IVsByStride; /// StrideNoReuse - Keep track of all the strides whose ivs cannot be /// reused (nor should they be rewritten to reuse other strides). - SmallSet StrideNoReuse; + SmallSet StrideNoReuse; /// DeadInsts - Keep track of instructions we may have made dead, so that /// we can remove them after we are done working. @@ -134,7 +134,7 @@ namespace { private: ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond, IVStrideUse* &CondUse, - const SCEV* const * &CondStride); + const SCEV *const * &CondStride); void OptimizeIndvars(Loop *L); void OptimizeLoopCountIV(Loop *L); @@ -150,16 +150,16 @@ namespace { IVStrideUse* &CondUse); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, - const SCEV* const * &CondStride); + const SCEV *const * &CondStride); bool RequiresTypeConversion(const Type *Ty, const Type *NewTy); - const SCEV* CheckForIVReuse(bool, bool, bool, const SCEV* const&, + const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&, IVExpr&, const Type*, const std::vector& UsersToProcess); bool ValidScale(bool, int64_t, const std::vector& UsersToProcess); bool ValidOffset(bool, int64_t, int64_t, const std::vector& UsersToProcess); - const SCEV* CollectIVUsers(const SCEV* const &Stride, + const SCEV *CollectIVUsers(const SCEV *const &Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -169,11 +169,11 @@ namespace { const std::vector &UsersToProcess, const Loop *L, bool AllUsesAreAddresses, - const SCEV* Stride); + const SCEV *Stride); void PrepareToStrengthReduceFully( std::vector &UsersToProcess, - const SCEV* Stride, - const SCEV* CommonExprs, + const SCEV *Stride, + const SCEV *CommonExprs, const Loop *L, SCEVExpander &PreheaderRewriter); void PrepareToStrengthReduceFromSmallerStride( @@ -183,13 +183,13 @@ namespace { Instruction *PreInsertPt); void PrepareToStrengthReduceWithNewPhi( std::vector &UsersToProcess, - const SCEV* Stride, - const SCEV* CommonExprs, + const SCEV *Stride, + const SCEV *CommonExprs, Value *CommonBaseV, Instruction *IVIncInsertPt, const Loop *L, SCEVExpander &PreheaderRewriter); - void StrengthReduceStridedIVUsers(const SCEV* const &Stride, + void StrengthReduceStridedIVUsers(const SCEV *const &Stride, IVUsersOfOneStride &Uses, Loop *L); void DeleteTriviallyDeadInstructions(); @@ -233,7 +233,7 @@ void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { /// containsAddRecFromDifferentLoop - Determine whether expression S involves a /// subexpression that is an AddRec from a loop other than L. An outer loop /// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { +static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { // This is very common, put it first. if (isa(S)) return false; @@ -328,7 +328,7 @@ namespace { /// this use. As the use is processed, information gets moved from this /// field to the Imm field (below). BasedUser values are sorted by this /// field. - const SCEV* Base; + const SCEV *Base; /// Inst - The instruction using the induction variable. Instruction *Inst; @@ -341,7 +341,7 @@ namespace { /// before Inst, because it will be folded into the imm field of the /// instruction. This is also sometimes used for loop-variant values that /// must be added inside the loop. - const SCEV* Imm; + const SCEV *Imm; /// Phi - The induction variable that performs the striding that /// should be used for this user. @@ -363,13 +363,13 @@ namespace { // Once we rewrite the code to insert the new IVs we want, update the // operands of Inst to use the new expression 'NewBase', with 'Imm' added // to it. - void RewriteInstructionToUseNewBase(const SCEV* const &NewBase, + void RewriteInstructionToUseNewBase(const SCEV *const &NewBase, Instruction *InsertPt, SCEVExpander &Rewriter, Loop *L, Pass *P, LoopInfo &LI, SmallVectorImpl &DeadInsts); - Value *InsertCodeForBaseAtPosition(const SCEV* const &NewBase, + Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, Loop *L, @@ -384,7 +384,7 @@ void BasedUser::dump() const { cerr << " Inst: " << *Inst; } -Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV* const &NewBase, +Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, Loop *L, @@ -408,7 +408,7 @@ Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV* const &NewBase, Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt); - const SCEV* NewValSCEV = SE->getUnknown(Base); + const SCEV *NewValSCEV = SE->getUnknown(Base); // Always emit the immediate into the same block as the user. NewValSCEV = SE->getAddExpr(NewValSCEV, Imm); @@ -423,7 +423,7 @@ Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV* const &NewBase, // value of NewBase in the case that it's a diffferent instruction from // the PHI that NewBase is computed from, or null otherwise. // -void BasedUser::RewriteInstructionToUseNewBase(const SCEV* const &NewBase, +void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, Instruction *NewBasePt, SCEVExpander &Rewriter, Loop *L, Pass *P, LoopInfo &LI, @@ -535,7 +535,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV* const &NewBase, /// fitsInAddressMode - Return true if V can be subsumed within an addressing /// mode, and does not need to be put in a register first. -static bool fitsInAddressMode(const SCEV* const &V, const Type *AccessTy, +static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy, const TargetLowering *TLI, bool HasBaseReg) { if (const SCEVConstant *SC = dyn_cast(V)) { int64_t VC = SC->getValue()->getSExtValue(); @@ -567,12 +567,12 @@ static bool fitsInAddressMode(const SCEV* const &V, const Type *AccessTy, /// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are /// loop varying to the Imm operand. -static void MoveLoopVariantsToImmediateField(const SCEV* &Val, const SCEV* &Imm, +static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm, Loop *L, ScalarEvolution *SE) { if (Val->isLoopInvariant(L)) return; // Nothing to do. if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) @@ -590,10 +590,10 @@ static void MoveLoopVariantsToImmediateField(const SCEV* &Val, const SCEV* &Imm, Val = SE->getAddExpr(NewOps); } else if (const SCEVAddRecExpr *SARE = dyn_cast(Val)) { // Try to pull immediates out of the start value of nested addrec's. - const SCEV* Start = SARE->getStart(); + const SCEV *Start = SARE->getStart(); MoveLoopVariantsToImmediateField(Start, Imm, L, SE); - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } else { @@ -609,15 +609,15 @@ static void MoveLoopVariantsToImmediateField(const SCEV* &Val, const SCEV* &Imm, /// Accumulate these immediate values into the Imm value. static void MoveImmediateValues(const TargetLowering *TLI, const Type *AccessTy, - const SCEV* &Val, const SCEV* &Imm, + const SCEV *&Val, const SCEV *&Imm, bool isAddress, Loop *L, ScalarEvolution *SE) { if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - SmallVector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) { - const SCEV* NewOp = SAE->getOperand(i); + const SCEV *NewOp = SAE->getOperand(i); MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE); if (!NewOp->isLoopInvariant(L)) { @@ -636,11 +636,11 @@ static void MoveImmediateValues(const TargetLowering *TLI, return; } else if (const SCEVAddRecExpr *SARE = dyn_cast(Val)) { // Try to pull immediates out of the start value of nested addrec's. - const SCEV* Start = SARE->getStart(); + const SCEV *Start = SARE->getStart(); MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE); if (Start != SARE->getStart()) { - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } @@ -651,8 +651,8 @@ static void MoveImmediateValues(const TargetLowering *TLI, fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) && SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) { - const SCEV* SubImm = SE->getIntegerSCEV(0, Val->getType()); - const SCEV* NewOp = SME->getOperand(1); + const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType()); + const SCEV *NewOp = SME->getOperand(1); MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE); // If we extracted something out of the subexpressions, see if we can @@ -687,7 +687,7 @@ static void MoveImmediateValues(const TargetLowering *TLI, static void MoveImmediateValues(const TargetLowering *TLI, Instruction *User, - const SCEV* &Val, const SCEV* &Imm, + const SCEV *&Val, const SCEV *&Imm, bool isAddress, Loop *L, ScalarEvolution *SE) { const Type *AccessTy = getAccessType(User); @@ -697,19 +697,19 @@ static void MoveImmediateValues(const TargetLowering *TLI, /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are /// added together. This is used to reassociate common addition subexprs /// together for maximal sharing when rewriting bases. -static void SeparateSubExprs(SmallVector &SubExprs, - const SCEV* Expr, +static void SeparateSubExprs(SmallVector &SubExprs, + const SCEV *Expr, ScalarEvolution *SE) { if (const SCEVAddExpr *AE = dyn_cast(Expr)) { for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j) SeparateSubExprs(SubExprs, AE->getOperand(j), SE); } else if (const SCEVAddRecExpr *SARE = dyn_cast(Expr)) { - const SCEV* Zero = SE->getIntegerSCEV(0, Expr->getType()); + const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType()); if (SARE->getOperand(0) == Zero) { SubExprs.push_back(Expr); } else { // Compute the addrec with zero as its base. - SmallVector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Zero; // Start with zero base. SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop())); @@ -733,7 +733,7 @@ struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; /// not remove anything. This looks for things like (a+b+c) and /// (a+c+d) and computes the common (a+c) subexpression. The common expression /// is *removed* from the Bases and returned. -static const SCEV* +static const SCEV * RemoveCommonExpressionsFromUseBases(std::vector &Uses, ScalarEvolution *SE, Loop *L, const TargetLowering *TLI) { @@ -741,9 +741,9 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Only one use? This is a very common case, so we handle it specially and // cheaply. - const SCEV* Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); - const SCEV* Result = Zero; - const SCEV* FreeResult = Zero; + const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); + const SCEV *Result = Zero; + const SCEV *FreeResult = Zero; if (NumUses == 1) { // If the use is inside the loop, use its base, regardless of what it is: // it is clearly shared across all the IV's. If the use is outside the loop @@ -759,13 +759,13 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Also track whether all uses of each expression can be moved into an // an addressing mode "for free"; such expressions are left within the loop. // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; - std::map SubExpressionUseData; + std::map SubExpressionUseData; // UniqueSubExprs - Keep track of all of the subexpressions we see in the // order we see them. - SmallVector UniqueSubExprs; + SmallVector UniqueSubExprs; - SmallVector SubExprs; + SmallVector SubExprs; unsigned NumUsesInsideLoop = 0; for (unsigned i = 0; i != NumUses; ++i) { // If the user is outside the loop, just ignore it for base computation. @@ -809,7 +809,7 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // Now that we know how many times each is used, build Result. Iterate over // UniqueSubexprs so that we have a stable ordering. for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) { - std::map::iterator I = + std::map::iterator I = SubExpressionUseData.find(UniqueSubExprs[i]); assert(I != SubExpressionUseData.end() && "Entry not found?"); if (I->second.Count == NumUsesInsideLoop) { // Found CSE! @@ -853,7 +853,7 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, if (FreeResult != Zero) { SeparateSubExprs(SubExprs, FreeResult, SE); for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { - std::map::iterator I = + std::map::iterator I = SubExpressionUseData.find(SubExprs[j]); SubExpressionUseData.erase(I); } @@ -982,10 +982,10 @@ bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1, /// be folded into the addressing mode, nor even that the factor be constant; /// a multiply (executed once) outside the loop is better than another IV /// within. Well, usually. -const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, +const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, bool AllUsesAreAddresses, bool AllUsesAreOutsideLoop, - const SCEV* const &Stride, + const SCEV *const &Stride, IVExpr &IV, const Type *Ty, const std::vector& UsersToProcess) { if (StrideNoReuse.count(Stride)) @@ -995,7 +995,7 @@ const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, int64_t SInt = SC->getValue()->getSExtValue(); for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end() || !isa(SI->first) || StrideNoReuse.count(SI->first)) @@ -1048,7 +1048,7 @@ const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, // an existing IV if we can. for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end() || !isa(SI->first)) continue; @@ -1068,7 +1068,7 @@ const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, // -1*old. for (unsigned NewStride = 0, e = IU->StrideOrder.size(); NewStride != e; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IVsByStride.find(IU->StrideOrder[NewStride]); if (SI == IVsByStride.end()) continue; @@ -1097,7 +1097,7 @@ static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) { /// isNonConstantNegative - Return true if the specified scev is negated, but /// not a constant. -static bool isNonConstantNegative(const SCEV* const &Expr) { +static bool isNonConstantNegative(const SCEV *const &Expr) { const SCEVMulExpr *Mul = dyn_cast(Expr); if (!Mul) return false; @@ -1114,7 +1114,7 @@ static bool isNonConstantNegative(const SCEV* const &Expr) { /// of the strided accesses, as well as the old information from Uses. We /// progressively move information from the Base field to the Imm field, until /// we eventually have the full access expression to rewrite the use. -const SCEV* LoopStrengthReduce::CollectIVUsers(const SCEV* const &Stride, +const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -1145,7 +1145,7 @@ const SCEV* LoopStrengthReduce::CollectIVUsers(const SCEV* const &Stride, // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find // "A+B"), emit it to the preheader, then remove the expression from the // UsersToProcess base values. - const SCEV* CommonExprs = + const SCEV *CommonExprs = RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI); // Next, figure out what we can represent in the immediate fields of @@ -1211,7 +1211,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( const std::vector &UsersToProcess, const Loop *L, bool AllUsesAreAddresses, - const SCEV* Stride) { + const SCEV *Stride) { if (!EnableFullLSRMode) return false; @@ -1248,7 +1248,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType()); const Instruction *Inst = UsersToProcess[i].Inst; const Type *AccessTy = getAccessType(Inst); - const SCEV* Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); + const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); if (!Diff->isZero() && (!AllUsesAreAddresses || !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true))) @@ -1282,7 +1282,7 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( /// /// Return the created phi node. /// -static PHINode *InsertAffinePhi(const SCEV* Start, const SCEV* Step, +static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step, Instruction *IVIncInsertPt, const Loop *L, SCEVExpander &Rewriter) { @@ -1302,7 +1302,7 @@ static PHINode *InsertAffinePhi(const SCEV* Start, const SCEV* Step, // If the stride is negative, insert a sub instead of an add for the // increment. bool isNegative = isNonConstantNegative(Step); - const SCEV* IncAmount = Step; + const SCEV *IncAmount = Step; if (isNegative) IncAmount = Rewriter.SE.getNegativeSCEV(Step); @@ -1341,13 +1341,13 @@ static void SortUsersToProcess(std::vector &UsersToProcess) { // loop before users outside of the loop with a particular base. // // We would like to use stable_sort here, but we can't. The problem is that - // const SCEV*'s don't have a deterministic ordering w.r.t to each other, so + // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so // we don't have anything to do a '<' comparison on. Because we think the // number of uses is small, do a horrible bubble sort which just relies on // ==. for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { // Get a base value. - const SCEV* Base = UsersToProcess[i].Base; + const SCEV *Base = UsersToProcess[i].Base; // Compact everything with this base to be consecutive with this one. for (unsigned j = i+1; j != e; ++j) { @@ -1366,8 +1366,8 @@ static void SortUsersToProcess(std::vector &UsersToProcess) { void LoopStrengthReduce::PrepareToStrengthReduceFully( std::vector &UsersToProcess, - const SCEV* Stride, - const SCEV* CommonExprs, + const SCEV *Stride, + const SCEV *CommonExprs, const Loop *L, SCEVExpander &PreheaderRewriter) { DOUT << " Fully reducing all users\n"; @@ -1379,9 +1379,9 @@ LoopStrengthReduce::PrepareToStrengthReduceFully( // TODO: The uses are grouped by base, but not sorted. We arbitrarily // pick the first Imm value here to start with, and adjust it for the // other uses. - const SCEV* Imm = UsersToProcess[i].Imm; - const SCEV* Base = UsersToProcess[i].Base; - const SCEV* Start = SE->getAddExpr(CommonExprs, Base, Imm); + const SCEV *Imm = UsersToProcess[i].Imm; + const SCEV *Base = UsersToProcess[i].Base; + const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm); PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L, PreheaderRewriter); // Loop over all the users with the same base. @@ -1413,8 +1413,8 @@ static Instruction *FindIVIncInsertPt(std::vector &UsersToProcess, void LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( std::vector &UsersToProcess, - const SCEV* Stride, - const SCEV* CommonExprs, + const SCEV *Stride, + const SCEV *CommonExprs, Value *CommonBaseV, Instruction *IVIncInsertPt, const Loop *L, @@ -1490,7 +1490,7 @@ static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset, /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single /// stride of IV. All of the users may have different starting values, and this /// may not be the only stride. -void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, +void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride, IVUsersOfOneStride &Uses, Loop *L) { // If all the users are moved to another stride, then there is nothing to do. @@ -1513,7 +1513,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, // move information from the Base field to the Imm field, until we eventually // have the full access expression to rewrite the use. std::vector UsersToProcess; - const SCEV* CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, + const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -1531,8 +1531,8 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, // If all uses are addresses, consider sinking the immediate part of the // common expression back into uses if they can fit in the immediate fields. if (TLI && HaveCommonExprs && AllUsesAreAddresses) { - const SCEV* NewCommon = CommonExprs; - const SCEV* Imm = SE->getIntegerSCEV(0, ReplacedTy); + const SCEV *NewCommon = CommonExprs; + const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy); MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE); if (!Imm->isZero()) { bool DoSink = true; @@ -1578,7 +1578,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, Value *CommonBaseV = Context->getNullValue(ReplacedTy); - const SCEV* RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); + const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty), SE->getIntegerSCEV(0, Type::Int32Ty), 0); @@ -1618,7 +1618,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, // strength-reduced forms. This outer loop handles all bases, the inner // loop handles all users of a particular base. while (!UsersToProcess.empty()) { - const SCEV* Base = UsersToProcess.back().Base; + const SCEV *Base = UsersToProcess.back().Base; Instruction *Inst = UsersToProcess.back().Inst; // Emit the code for Base into the preheader. @@ -1673,7 +1673,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, User.Inst->moveBefore(IVIncInsertPt); } - const SCEV* RewriteExpr = SE->getUnknown(RewriteOp); + const SCEV *RewriteExpr = SE->getUnknown(RewriteOp); if (SE->getEffectiveSCEVType(RewriteOp->getType()) != SE->getEffectiveSCEVType(ReplacedTy)) { @@ -1705,7 +1705,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, // The base has been used to initialize the PHI node but we don't want // it here. if (!ReuseIV.Base->isZero()) { - const SCEV* typedBase = ReuseIV.Base; + const SCEV *typedBase = ReuseIV.Base; if (SE->getEffectiveSCEVType(RewriteExpr->getType()) != SE->getEffectiveSCEVType(ReuseIV.Base->getType())) { // It's possible the original IV is a larger type than the new IV, @@ -1770,10 +1770,10 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, /// set the IV user and stride information and return true, otherwise return /// false. bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, - const SCEV* const * &CondStride) { + const SCEV *const * &CondStride) { for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e && !CondUse; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); @@ -1800,7 +1800,7 @@ namespace { const ScalarEvolution *SE; explicit StrideCompare(const ScalarEvolution *se) : SE(se) {} - bool operator()(const SCEV* const &LHS, const SCEV* const &RHS) { + bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) { const SCEVConstant *LHSC = dyn_cast(LHS); const SCEVConstant *RHSC = dyn_cast(RHS); if (LHSC && RHSC) { @@ -1843,14 +1843,14 @@ namespace { /// if (v1 < 30) goto loop ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, IVStrideUse* &CondUse, - const SCEV* const* &CondStride) { + const SCEV *const* &CondStride) { // If there's only one stride in the loop, there's nothing to do here. if (IU->StrideOrder.size() < 2) return Cond; // If there are other users of the condition's stride, don't bother // trying to change the condition because the stride will still // remain. - std::map::iterator I = + std::map::iterator I = IU->IVUsesByStride.find(*CondStride); if (I == IU->IVUsesByStride.end() || I->second->Users.size() != 1) @@ -1867,11 +1867,11 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, const Type *NewCmpTy = NULL; unsigned TyBits = SE->getTypeSizeInBits(CmpTy); unsigned NewTyBits = 0; - const SCEV* *NewStride = NULL; + const SCEV **NewStride = NULL; Value *NewCmpLHS = NULL; Value *NewCmpRHS = NULL; int64_t Scale = 1; - const SCEV* NewOffset = SE->getIntegerSCEV(0, CmpTy); + const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy); if (ConstantInt *C = dyn_cast(Cond->getOperand(1))) { int64_t CmpVal = C->getValue().getSExtValue(); @@ -1883,7 +1883,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, // Look for a suitable stride / iv as replacement. for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[i]); if (!isa(SI->first)) continue; @@ -1963,7 +1963,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, bool AllUsesAreAddresses = true; bool AllUsesAreOutsideLoop = true; std::vector UsersToProcess; - const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, + const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -2098,13 +2098,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, SelectInst *Sel = dyn_cast(Cond->getOperand(1)); if (!Sel || !Sel->hasOneUse()) return Cond; - const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return Cond; - const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); + const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); // Add one to the backedge-taken count to get the trip count. - const SCEV* IterationCount = SE->getAddExpr(BackedgeTakenCount, One); + const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One); // Check for a max calculation that matches the pattern. if (!isa(IterationCount) && !isa(IterationCount)) @@ -2117,13 +2117,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, if (Max->getNumOperands() != 2) return Cond; - const SCEV* MaxLHS = Max->getOperand(0); - const SCEV* MaxRHS = Max->getOperand(1); + const SCEV *MaxLHS = Max->getOperand(0); + const SCEV *MaxRHS = Max->getOperand(1); if (!MaxLHS || MaxLHS != One) return Cond; // Check the relevant induction variable for conformance to // the pattern. - const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); + const SCEV *IV = SE->getSCEV(Cond->getOperand(0)); const SCEVAddRecExpr *AR = dyn_cast(IV); if (!AR || !AR->isAffine() || AR->getStart() != One || @@ -2169,13 +2169,13 @@ ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, /// inside the loop then try to eliminate the cast opeation. void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { - const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return; for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); if (!isa(SI->first)) @@ -2305,7 +2305,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { // Search IVUsesByStride to find Cond's IVUse if there is one. IVStrideUse *CondUse = 0; - const SCEV* const *CondStride = 0; + const SCEV *const *CondStride = 0; ICmpInst *Cond = cast(TermBr->getCondition()); if (!FindIVUserForCond(Cond, CondUse, CondStride)) return; // setcc doesn't use the IV. @@ -2335,7 +2335,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { int64_t SInt = SC->getValue()->getSExtValue(); for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee; ++NewStride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[NewStride]); if (!isa(SI->first) || SI->first == *CondStride) continue; @@ -2349,7 +2349,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { bool AllUsesAreAddresses = true; bool AllUsesAreOutsideLoop = true; std::vector UsersToProcess; - const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, + const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L, AllUsesAreAddresses, AllUsesAreOutsideLoop, UsersToProcess); @@ -2410,7 +2410,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { // If the number of times the loop is executed isn't computable, give up. - const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); + const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa(BackedgeTakenCount)) return; @@ -2439,9 +2439,9 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { // Handle only tests for equality for the moment, and only stride 1. if (Cond->getPredicate() != CmpInst::ICMP_EQ) return; - const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); + const SCEV *IV = SE->getSCEV(Cond->getOperand(0)); const SCEVAddRecExpr *AR = dyn_cast(IV); - const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); + const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One) return; // If the RHS of the comparison is defined inside the loop, the rewrite @@ -2557,7 +2557,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { // strides deterministic - not dependent on map order. for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) { - std::map::iterator SI = + std::map::iterator SI = IU->IVUsesByStride.find(IU->StrideOrder[Stride]); assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); // FIXME: Generalize to non-affine IV's.