Spelling fixes.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@97453 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman
2010-03-01 17:49:51 +00:00
parent 67d9bf9dc4
commit 3f46a3abee
7 changed files with 45 additions and 45 deletions

View File

@@ -61,7 +61,7 @@ public:
Stride = Val; Stride = Val;
} }
/// getOffset - Return the offset to add to a theoeretical induction /// getOffset - Return the offset to add to a theoretical induction
/// variable that starts at zero and counts up by the stride to compute /// variable that starts at zero and counts up by the stride to compute
/// the value for the use. This always has the same type as the stride. /// the value for the use. This always has the same type as the stride.
const SCEV *getOffset() const { return Offset; } const SCEV *getOffset() const { return Offset; }
@@ -116,7 +116,7 @@ private:
bool IsUseOfPostIncrementedValue; bool IsUseOfPostIncrementedValue;
/// Deleted - Implementation of CallbackVH virtual function to /// Deleted - Implementation of CallbackVH virtual function to
/// recieve notification when the User is deleted. /// receive notification when the User is deleted.
virtual void deleted(); virtual void deleted();
}; };

View File

@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// The ScalarEvolution class is an LLVM pass which can be used to analyze and // The ScalarEvolution class is an LLVM pass which can be used to analyze and
// catagorize scalar expressions in loops. It specializes in recognizing // categorize scalar expressions in loops. It specializes in recognizing
// general induction variables, representing them with the abstract and opaque // general induction variables, representing them with the abstract and opaque
// SCEV class. Given this analysis, trip counts of loops and other important // SCEV class. Given this analysis, trip counts of loops and other important
// properties can be obtained. // properties can be obtained.
@@ -55,7 +55,7 @@ namespace llvm {
protected: protected:
/// SubclassData - This field is initialized to zero and may be used in /// SubclassData - This field is initialized to zero and may be used in
/// subclasses to store miscelaneous information. /// subclasses to store miscellaneous information.
unsigned short SubclassData; unsigned short SubclassData;
private: private:
@@ -177,7 +177,7 @@ namespace llvm {
/// ///
LoopInfo *LI; LoopInfo *LI;
/// TD - The target data information for the target we are targetting. /// TD - The target data information for the target we are targeting.
/// ///
TargetData *TD; TargetData *TD;
@@ -194,7 +194,7 @@ namespace llvm {
std::map<SCEVCallbackVH, const SCEV *> Scalars; std::map<SCEVCallbackVH, const SCEV *> Scalars;
/// BackedgeTakenInfo - Information about the backedge-taken count /// BackedgeTakenInfo - Information about the backedge-taken count
/// of a loop. This currently inclues an exact count and a maximum count. /// of a loop. This currently includes an exact count and a maximum count.
/// ///
struct BackedgeTakenInfo { struct BackedgeTakenInfo {
/// Exact - An expression indicating the exact backedge-taken count of /// Exact - An expression indicating the exact backedge-taken count of
@@ -353,14 +353,14 @@ namespace llvm {
bool Inverse); bool Inverse);
/// isImpliedCondOperands - Test whether the condition described by Pred, /// isImpliedCondOperands - Test whether the condition described by Pred,
/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS, /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
/// and FoundRHS is true. /// and FoundRHS is true.
bool isImpliedCondOperands(ICmpInst::Predicate Pred, bool isImpliedCondOperands(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS, const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS, const SCEV *FoundRHS); const SCEV *FoundLHS, const SCEV *FoundRHS);
/// isImpliedCondOperandsHelper - Test whether the condition described by /// isImpliedCondOperandsHelper - Test whether the condition described by
/// Pred, LHS, and RHS is true whenever the condition desribed by Pred, /// Pred, LHS, and RHS is true whenever the condition described by Pred,
/// FoundLHS, and FoundRHS is true. /// FoundLHS, and FoundRHS is true.
bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS, const SCEV *LHS, const SCEV *RHS,

View File

@@ -222,7 +222,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// Descend recursively, but not into PHI nodes outside the current loop. // Descend recursively, but not into PHI nodes outside the current loop.
// It's important to see the entire expression outside the loop to get // It's important to see the entire expression outside the loop to get
// choices that depend on addressing mode use right, although we won't // choices that depend on addressing mode use right, although we won't
// consider references ouside the loop in all cases. // consider references outside the loop in all cases.
// If User is already in Processed, we don't want to recurse into it again, // If User is already in Processed, we don't want to recurse into it again,
// but do want to record a second reference in the same instruction. // but do want to record a second reference in the same instruction.
bool AddUserToIVUsers = false; bool AddUserToIVUsers = false;
@@ -330,7 +330,7 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
} }
OS << ":\n"; OS << ":\n";
// Use a defualt AssemblyAnnotationWriter to suppress the default info // Use a default AssemblyAnnotationWriter to suppress the default info
// comments, which aren't relevant here. // comments, which aren't relevant here.
AssemblyAnnotationWriter Annotator; AssemblyAnnotationWriter Annotator;
for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(), for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),

View File

@@ -616,7 +616,7 @@ namespace {
/// When this routine is finished, we know that any duplicates in the vector are /// When this routine is finished, we know that any duplicates in the vector are
/// consecutive and that complexity is monotonically increasing. /// consecutive and that complexity is monotonically increasing.
/// ///
/// Note that we go take special precautions to ensure that we get determinstic /// Note that we go take special precautions to ensure that we get deterministic
/// results from this routine. In other words, we don't want the results of /// results from this routine. In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to /// this to depend on where the addresses of various SCEV objects happened to
/// land in memory. /// land in memory.
@@ -744,7 +744,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
// We need at least W + T bits for the multiplication step // We need at least W + T bits for the multiplication step
unsigned CalculationBits = W + T; unsigned CalculationBits = W + T;
// Calcuate 2^T, at width T+W. // Calculate 2^T, at width T+W.
APInt DivFactor = APInt(CalculationBits, 1).shl(T); APInt DivFactor = APInt(CalculationBits, 1).shl(T);
// Calculate the multiplicative inverse of K! / 2^T; // Calculate the multiplicative inverse of K! / 2^T;
@@ -1410,7 +1410,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we deleted at least one add, we added operands to the end of the list, // If we deleted at least one add, we added operands to the end of the list,
// and they are not necessarily sorted. Recurse to resort and resimplify // and they are not necessarily sorted. Recurse to resort and resimplify
// any operands we just aquired. // any operands we just acquired.
if (DeletedAdd) if (DeletedAdd)
return getAddExpr(Ops); return getAddExpr(Ops);
} }
@@ -1717,7 +1717,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// If we deleted at least one mul, we added operands to the end of the list, // If we deleted at least one mul, we added operands to the end of the list,
// and they are not necessarily sorted. Recurse to resort and resimplify // and they are not necessarily sorted. Recurse to resort and resimplify
// any operands we just aquired. // any operands we just acquired.
if (DeletedMul) if (DeletedMul)
return getMulExpr(Ops); return getMulExpr(Ops);
} }
@@ -2746,7 +2746,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
} else { } else {
// For an array, add the element offset, explicitly scaled. // For an array, add the element offset, explicitly scaled.
const SCEV *LocalOffset = getSCEV(Index); const SCEV *LocalOffset = getSCEV(Index);
// Getelementptr indicies are signed. // Getelementptr indices are signed.
LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy); LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
// Lower "inbounds" GEPs to NSW arithmetic. // Lower "inbounds" GEPs to NSW arithmetic.
LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI), LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
@@ -3220,7 +3220,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
const Type *Z0Ty = Z0->getType(); const Type *Z0Ty = Z0->getType();
unsigned Z0TySize = getTypeSizeInBits(Z0Ty); unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
// If C is a low-bits mask, the zero extend is zerving to // If C is a low-bits mask, the zero extend is serving to
// mask off the high bits. Complement the operand and // mask off the high bits. Complement the operand and
// re-apply the zext. // re-apply the zext.
if (APIntOps::isMask(Z0TySize, CI->getValue())) if (APIntOps::isMask(Z0TySize, CI->getValue()))
@@ -3405,7 +3405,7 @@ PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
const ScalarEvolution::BackedgeTakenInfo & const ScalarEvolution::BackedgeTakenInfo &
ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// Initially insert a CouldNotCompute for this loop. If the insertion // Initially insert a CouldNotCompute for this loop. If the insertion
// succeeds, procede to actually compute a backedge-taken count and // succeeds, proceed to actually compute a backedge-taken count and
// update the value. The temporary CouldNotCompute value tells SCEV // update the value. The temporary CouldNotCompute value tells SCEV
// code elsewhere that it shouldn't attempt to request a new // code elsewhere that it shouldn't attempt to request a new
// backedge-taken count, which could result in infinite recursion. // backedge-taken count, which could result in infinite recursion.
@@ -3622,7 +3622,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
return getCouldNotCompute(); return getCouldNotCompute();
} }
// Procede to the next level to examine the exit condition expression. // Proceed to the next level to examine the exit condition expression.
return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
ExitBr->getSuccessor(0), ExitBr->getSuccessor(0),
ExitBr->getSuccessor(1)); ExitBr->getSuccessor(1));
@@ -3711,7 +3711,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
} }
// With an icmp, it may be feasible to compute an exact backedge-taken count. // With an icmp, it may be feasible to compute an exact backedge-taken count.
// Procede to the next level to examine the icmp. // Proceed to the next level to examine the icmp.
if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
@@ -4780,7 +4780,7 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue,
ICmpInst::Predicate Pred, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS, const SCEV *LHS, const SCEV *RHS,
bool Inverse) { bool Inverse) {
// Recursivly handle And and Or conditions. // Recursively handle And and Or conditions.
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) { if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
if (BO->getOpcode() == Instruction::And) { if (BO->getOpcode() == Instruction::And) {
if (!Inverse) if (!Inverse)
@@ -4983,7 +4983,7 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue,
} }
/// isImpliedCondOperands - Test whether the condition described by Pred, /// isImpliedCondOperands - Test whether the condition described by Pred,
/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS, /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
/// and FoundRHS is true. /// and FoundRHS is true.
bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS, const SCEV *LHS, const SCEV *RHS,
@@ -4998,7 +4998,7 @@ bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
} }
/// isImpliedCondOperandsHelper - Test whether the condition described by /// isImpliedCondOperandsHelper - Test whether the condition described by
/// Pred, LHS, and RHS is true whenever the condition desribed by Pred, /// Pred, LHS, and RHS is true whenever the condition described by Pred,
/// FoundLHS, and FoundRHS is true. /// FoundLHS, and FoundRHS is true.
bool bool
ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
@@ -5156,7 +5156,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// If MaxEnd is within a step of the maximum integer value in its type, // If MaxEnd is within a step of the maximum integer value in its type,
// adjust it down to the minimum value which would produce the same effect. // adjust it down to the minimum value which would produce the same effect.
// This allows the subsequent ceiling divison of (N+(step-1))/step to // This allows the subsequent ceiling division of (N+(step-1))/step to
// compute the correct value. // compute the correct value.
const SCEV *StepMinusOne = getMinusSCEV(Step, const SCEV *StepMinusOne = getMinusSCEV(Step,
getIntegerSCEV(1, Step->getType())); getIntegerSCEV(1, Step->getType()));
@@ -5433,7 +5433,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
} }
void ScalarEvolution::print(raw_ostream &OS, const Module *) const { void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
// ScalarEvolution's implementaiton of the print method is to print // ScalarEvolution's implementation of the print method is to print
// out SCEV values of all instructions that are interesting. Doing // out SCEV values of all instructions that are interesting. Doing
// this potentially causes it to create new SCEV objects though, // this potentially causes it to create new SCEV objects though,
// which technically conflicts with the const qualifier. This isn't // which technically conflicts with the const qualifier. This isn't

View File

@@ -152,7 +152,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
/// FactorOutConstant - Test if S is divisible by Factor, using signed /// FactorOutConstant - Test if S is divisible by Factor, using signed
/// division. If so, update S with Factor divided out and return true. /// division. If so, update S with Factor divided out and return true.
/// S need not be evenly divisble if a reasonable remainder can be /// S need not be evenly divisible if a reasonable remainder can be
/// computed. /// computed.
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
@@ -462,7 +462,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
break; break;
} }
// If none of the operands were convertable to proper GEP indices, cast // If none of the operands were convertible to proper GEP indices, cast
// the base to i8* and do an ugly getelementptr with that. It's still // the base to i8* and do an ugly getelementptr with that. It's still
// better than ptrtoint+arithmetic+inttoptr at least. // better than ptrtoint+arithmetic+inttoptr at least.
if (!AnyNonZeroIndices) { if (!AnyNonZeroIndices) {
@@ -820,7 +820,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const Type *ExpandTy = PostLoopScale ? IntTy : STy; const Type *ExpandTy = PostLoopScale ? IntTy : STy;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
// Accomodate post-inc mode, if necessary. // Accommodate post-inc mode, if necessary.
Value *Result; Value *Result;
if (L != PostIncLoop) if (L != PostIncLoop)
Result = PN; Result = PN;
@@ -1131,7 +1131,7 @@ void SCEVExpander::rememberInstruction(Value *I) {
} }
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
// If we aquired more instructions since the old insert point was saved, // If we acquired more instructions since the old insert point was saved,
// advance past them. // advance past them.
while (isInsertedInstruction(I)) ++I; while (isInsertedInstruction(I)) ++I;

View File

@@ -594,8 +594,8 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
} }
} }
/// Return true if it is OK to use SIToFPInst for an inducation variable /// Return true if it is OK to use SIToFPInst for an induction variable
/// with given inital and exit values. /// with given initial and exit values.
static bool useSIToFPInst(ConstantFP &InitV, ConstantFP &ExitV, static bool useSIToFPInst(ConstantFP &InitV, ConstantFP &ExitV,
uint64_t intIV, uint64_t intEV) { uint64_t intIV, uint64_t intEV) {
@@ -648,7 +648,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
if (!convertToInt(InitValue->getValueAPF(), &newInitValue)) if (!convertToInt(InitValue->getValueAPF(), &newInitValue))
return; return;
// Check IV increment. Reject this PH if increement operation is not // Check IV increment. Reject this PH if increment operation is not
// an add or increment value can not be represented by an integer. // an add or increment value can not be represented by an integer.
BinaryOperator *Incr = BinaryOperator *Incr =
dyn_cast<BinaryOperator>(PH->getIncomingValue(BackEdge)); dyn_cast<BinaryOperator>(PH->getIncomingValue(BackEdge));
@@ -684,7 +684,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
if (BI->getCondition() != EC) return; if (BI->getCondition() != EC) return;
} }
// Find exit value. If exit value can not be represented as an interger then // Find exit value. If exit value can not be represented as an integer then
// do not handle this floating point PH. // do not handle this floating point PH.
ConstantFP *EV = NULL; ConstantFP *EV = NULL;
unsigned EVIndex = 1; unsigned EVIndex = 1;
@@ -746,11 +746,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(), ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
NewPred, LHS, RHS, EC->getName()); NewPred, LHS, RHS, EC->getName());
// In the following deltions, PH may become dead and may be deleted. // In the following deletions, PH may become dead and may be deleted.
// Use a WeakVH to observe whether this happens. // Use a WeakVH to observe whether this happens.
WeakVH WeakPH = PH; WeakVH WeakPH = PH;
// Delete old, floating point, exit comparision instruction. // Delete old, floating point, exit comparison instruction.
NewEC->takeName(EC); NewEC->takeName(EC);
EC->replaceAllUsesWith(NewEC); EC->replaceAllUsesWith(NewEC);
RecursivelyDeleteTriviallyDeadInstructions(EC); RecursivelyDeleteTriviallyDeadInstructions(EC);

View File

@@ -198,7 +198,7 @@ struct Formula {
} }
/// DoInitialMatch - Recurrsion helper for InitialMatch. /// DoInitialMatch - Recursion helper for InitialMatch.
static void DoInitialMatch(const SCEV *S, Loop *L, static void DoInitialMatch(const SCEV *S, Loop *L,
SmallVectorImpl<const SCEV *> &Good, SmallVectorImpl<const SCEV *> &Good,
SmallVectorImpl<const SCEV *> &Bad, SmallVectorImpl<const SCEV *> &Bad,
@@ -1246,7 +1246,7 @@ public:
} }
/// OptimizeShadowIV - If IV is used in a int-to-float cast /// OptimizeShadowIV - If IV is used in a int-to-float cast
/// inside the loop then try to eliminate the cast opeation. /// inside the loop then try to eliminate the cast operation.
void LSRInstance::OptimizeShadowIV() { void LSRInstance::OptimizeShadowIV() {
const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
@@ -1673,7 +1673,7 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
/// getUse - Return an LSRUse index and an offset value for a fixup which /// getUse - Return an LSRUse index and an offset value for a fixup which
/// needs the given expression, with the given kind and optional access type. /// needs the given expression, with the given kind and optional access type.
/// Either reuse an exisitng use or create a new one, as needed. /// Either reuse an existing use or create a new one, as needed.
std::pair<size_t, int64_t> std::pair<size_t, int64_t>
LSRInstance::getUse(const SCEV *&Expr, LSRInstance::getUse(const SCEV *&Expr,
LSRUse::KindType Kind, const Type *AccessTy) { LSRUse::KindType Kind, const Type *AccessTy) {
@@ -2035,7 +2035,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
/// loop-dominating registers added into a single register. /// loop-dominating registers added into a single register.
void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
Formula Base) { Formula Base) {
// This method is only intersting on a plurality of registers. // This method is only interesting on a plurality of registers.
if (Base.BaseRegs.size() <= 1) return; if (Base.BaseRegs.size() <= 1) return;
Formula F = Base; Formula F = Base;
@@ -2054,7 +2054,7 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
const SCEV *Sum = SE.getAddExpr(Ops); const SCEV *Sum = SE.getAddExpr(Ops);
// TODO: If Sum is zero, it probably means ScalarEvolution missed an // TODO: If Sum is zero, it probably means ScalarEvolution missed an
// opportunity to fold something. For now, just ignore such cases // opportunity to fold something. For now, just ignore such cases
// rather than procede with zero in a register. // rather than proceed with zero in a register.
if (!Sum->isZero()) { if (!Sum->isZero()) {
F.BaseRegs.push_back(Sum); F.BaseRegs.push_back(Sum);
(void)InsertFormula(LU, LUIdx, F); (void)InsertFormula(LU, LUIdx, F);
@@ -2401,7 +2401,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
unsigned BitWidth = SE.getTypeSizeInBits(IntTy); unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
// TODO: Use a more targetted data structure. // TODO: Use a more targeted data structure.
for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
Formula F = LU.Formulae[L]; Formula F = LU.Formulae[L];
// Use the immediate in the scaled register. // Use the immediate in the scaled register.
@@ -2569,9 +2569,9 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
}); });
} }
/// NarrowSearchSpaceUsingHeuristics - If there are an extrordinary number of /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
/// formulae to choose from, use some rough heuristics to prune down the number /// formulae to choose from, use some rough heuristics to prune down the number
/// of formulae. This keeps the main solver from taking an extrordinary amount /// of formulae. This keeps the main solver from taking an extraordinary amount
/// of time in some worst-case scenarios. /// of time in some worst-case scenarios.
void LSRInstance::NarrowSearchSpaceUsingHeuristics() { void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
// This is a rough guess that seems to work fairly well. // This is a rough guess that seems to work fairly well.
@@ -2621,7 +2621,7 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
} }
DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
<< " will yeild profitable reuse.\n"); << " will yield profitable reuse.\n");
Taken.insert(Best); Taken.insert(Best);
// In any use with formulae which references this register, delete formulae // In any use with formulae which references this register, delete formulae
@@ -2668,7 +2668,7 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
// - sort the formula so that the most profitable solutions are found first // - sort the formula so that the most profitable solutions are found first
// - sort the uses too // - sort the uses too
// - search faster: // - search faster:
// - dont compute a cost, and then compare. compare while computing a cost // - don't compute a cost, and then compare. compare while computing a cost
// and bail early. // and bail early.
// - track register sets with SmallBitVector // - track register sets with SmallBitVector
@@ -3104,7 +3104,7 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
dbgs() << ":\n"); dbgs() << ":\n");
/// OptimizeShadowIV - If IV is used in a int-to-float cast /// OptimizeShadowIV - If IV is used in a int-to-float cast
/// inside the loop then try to eliminate the cast opeation. /// inside the loop then try to eliminate the cast operation.
OptimizeShadowIV(); OptimizeShadowIV();
// Change loop terminating condition to use the postinc iv when possible. // Change loop terminating condition to use the postinc iv when possible.