revert r166264 because the LTO build is still failing

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@166340 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nadav Rotem 2012-10-19 21:28:43 +00:00
parent e06ce4c2c4
commit a04a4a79ea
6 changed files with 92 additions and 101 deletions

View File

@ -22,7 +22,7 @@
#include <set> #include <set>
namespace llvm { namespace llvm {
class ScalarTargetTransformInfo; class TargetLowering;
/// Return true if the given expression is safe to expand in the sense that /// Return true if the given expression is safe to expand in the sense that
/// all materialized values are safe to speculate. /// all materialized values are safe to speculate.
@ -129,7 +129,7 @@ namespace llvm {
/// representative. Return the number of phis eliminated. /// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts, SmallVectorImpl<WeakVH> &DeadInsts,
const ScalarTargetTransformInfo *STTI = NULL); const TargetLowering *TLI = NULL);
/// expandCodeFor - Insert code to directly compute the specified SCEV /// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the /// expression into the program. The inserted code is inserted into the

View File

@ -119,7 +119,7 @@ Pass *createLICMPass();
// optional parameter used to consult the target machine whether certain // optional parameter used to consult the target machine whether certain
// transformations are profitable. // transformations are profitable.
// //
Pass *createLoopStrengthReducePass(); Pass *createLoopStrengthReducePass(const TargetLowering *TLI = 0);
Pass *createGlobalMergePass(const TargetLowering *TLI = 0); Pass *createGlobalMergePass(const TargetLowering *TLI = 0);
@ -249,8 +249,9 @@ extern char &LowerSwitchID;
// purpose "my LLVM-to-LLVM pass doesn't support the invoke instruction yet" // purpose "my LLVM-to-LLVM pass doesn't support the invoke instruction yet"
// lowering pass. // lowering pass.
// //
FunctionPass *createLowerInvokePass(); FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0);
FunctionPass *createLowerInvokePass(bool useExpensiveEHSupport); FunctionPass *createLowerInvokePass(const TargetLowering *TLI,
bool useExpensiveEHSupport);
extern char &LowerInvokePassID; extern char &LowerInvokePassID;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -19,8 +19,8 @@
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/DataLayout.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
#include "llvm/TargetTransformInfo.h"
using namespace llvm; using namespace llvm;
@ -1599,15 +1599,15 @@ static bool width_descending(Value *lhs, Value *rhs) {
/// This does not depend on any SCEVExpander state but should be used in /// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used. /// the same context that SCEVExpander is used.
unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts, SmallVectorImpl<WeakVH> &DeadInsts,
const ScalarTargetTransformInfo *STTI) { const TargetLowering *TLI) {
// Find integer phis in order of increasing width. // Find integer phis in order of increasing width.
SmallVector<PHINode*, 8> Phis; SmallVector<PHINode*, 8> Phis;
for (BasicBlock::iterator I = L->getHeader()->begin(); for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *Phi = dyn_cast<PHINode>(I); ++I) { PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
Phis.push_back(Phi); Phis.push_back(Phi);
} }
if (STTI) if (TLI)
std::sort(Phis.begin(), Phis.end(), width_descending); std::sort(Phis.begin(), Phis.end(), width_descending);
unsigned NumElim = 0; unsigned NumElim = 0;
@ -1635,8 +1635,8 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
if (!OrigPhiRef) { if (!OrigPhiRef) {
OrigPhiRef = Phi; OrigPhiRef = Phi;
if (Phi->getType()->isIntegerTy() && STTI && if (Phi->getType()->isIntegerTy() && TLI
STTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
// This phi can be freely truncated to the narrowest phi type. Map the // This phi can be freely truncated to the narrowest phi type. Map the
// truncated expression to it so it will be reused for narrow types. // truncated expression to it so it will be reused for narrow types.
const SCEV *TruncExpr = const SCEV *TruncExpr =

View File

@ -359,7 +359,7 @@ void TargetPassConfig::addIRPasses() {
// Run loop strength reduction before anything else. // Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) { if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
addPass(createLoopStrengthReducePass()); addPass(createLoopStrengthReducePass(getTargetLowering()));
if (PrintLSR) if (PrintLSR)
addPass(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs())); addPass(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
} }
@ -389,7 +389,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
addPass(createDwarfEHPass(TM)); addPass(createDwarfEHPass(TM));
break; break;
case ExceptionHandling::None: case ExceptionHandling::None:
addPass(createLowerInvokePass()); addPass(createLowerInvokePass(TM->getTargetLowering()));
// The lower invoke pass may create unreachable code. Remove it. // The lower invoke pass may create unreachable code. Remove it.
addPass(createUnreachableBlockEliminationPass()); addPass(createUnreachableBlockEliminationPass());

View File

@ -37,7 +37,7 @@
// //
// TODO: Handle multiple loops at a time. // TODO: Handle multiple loops at a time.
// //
// TODO: Should AddrMode::BaseGV be changed to a ConstantExpr // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
// instead of a GlobalValue? // instead of a GlobalValue?
// //
// TODO: When truncation is free, truncate ICmp users' operands to make it a // TODO: When truncation is free, truncate ICmp users' operands to make it a
@ -67,7 +67,6 @@
#include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
#include "llvm/TargetTransformInfo.h"
#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/DenseSet.h" #include "llvm/ADT/DenseSet.h"
@ -75,6 +74,7 @@
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/ValueHandle.h" #include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm> #include <algorithm>
using namespace llvm; using namespace llvm;
@ -1118,7 +1118,7 @@ public:
enum KindType { enum KindType {
Basic, ///< A normal use, with no folding. Basic, ///< A normal use, with no folding.
Special, ///< A special case of basic, allowing -1 scales. Special, ///< A special case of basic, allowing -1 scales.
Address, ///< An address use; folding according to ScalarTargetTransformInfo. Address, ///< An address use; folding according to TargetLowering
ICmpZero ///< An equality icmp with both operands folded into one. ICmpZero ///< An equality icmp with both operands folded into one.
// TODO: Add a generic icmp too? // TODO: Add a generic icmp too?
}; };
@ -1272,12 +1272,12 @@ void LSRUse::dump() const {
/// address-mode folding and special icmp tricks. /// address-mode folding and special icmp tricks.
static bool isLegalUse(const AddrMode &AM, static bool isLegalUse(const AddrMode &AM,
LSRUse::KindType Kind, Type *AccessTy, LSRUse::KindType Kind, Type *AccessTy,
const ScalarTargetTransformInfo *STTI) { const TargetLowering *TLI) {
switch (Kind) { switch (Kind) {
case LSRUse::Address: case LSRUse::Address:
// If we have low-level target information, ask the target if it can // If we have low-level target information, ask the target if it can
// completely fold this address. // completely fold this address.
if (STTI) return STTI->isLegalAddressingMode(AM, AccessTy); if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
// Otherwise, just guess that reg+reg addressing is legal. // Otherwise, just guess that reg+reg addressing is legal.
return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
@ -1300,7 +1300,7 @@ static bool isLegalUse(const AddrMode &AM,
// If we have low-level target information, ask the target if it can fold an // If we have low-level target information, ask the target if it can fold an
// integer immediate on an icmp. // integer immediate on an icmp.
if (AM.BaseOffs != 0) { if (AM.BaseOffs != 0) {
if (!STTI) if (!TLI)
return false; return false;
// We have one of: // We have one of:
// ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset // ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset
@ -1309,7 +1309,7 @@ static bool isLegalUse(const AddrMode &AM,
int64_t Offs = AM.BaseOffs; int64_t Offs = AM.BaseOffs;
if (AM.Scale == 0) if (AM.Scale == 0)
Offs = -(uint64_t)Offs; // The cast does the right thing with INT64_MIN. Offs = -(uint64_t)Offs; // The cast does the right thing with INT64_MIN.
return STTI->isLegalICmpImmediate(Offs); return TLI->isLegalICmpImmediate(Offs);
} }
// ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
@ -1330,20 +1330,20 @@ static bool isLegalUse(const AddrMode &AM,
static bool isLegalUse(AddrMode AM, static bool isLegalUse(AddrMode AM,
int64_t MinOffset, int64_t MaxOffset, int64_t MinOffset, int64_t MaxOffset,
LSRUse::KindType Kind, Type *AccessTy, LSRUse::KindType Kind, Type *AccessTy,
const ScalarTargetTransformInfo *LTTI) { const TargetLowering *TLI) {
// Check for overflow. // Check for overflow.
if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
(MinOffset > 0)) (MinOffset > 0))
return false; return false;
AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
if (isLegalUse(AM, Kind, AccessTy, LTTI)) { if (isLegalUse(AM, Kind, AccessTy, TLI)) {
AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
// Check for overflow. // Check for overflow.
if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) !=
(MaxOffset > 0)) (MaxOffset > 0))
return false; return false;
AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
return isLegalUse(AM, Kind, AccessTy, LTTI); return isLegalUse(AM, Kind, AccessTy, TLI);
} }
return false; return false;
} }
@ -1352,7 +1352,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
GlobalValue *BaseGV, GlobalValue *BaseGV,
bool HasBaseReg, bool HasBaseReg,
LSRUse::KindType Kind, Type *AccessTy, LSRUse::KindType Kind, Type *AccessTy,
const ScalarTargetTransformInfo *LTTI) { const TargetLowering *TLI) {
// Fast-path: zero is always foldable. // Fast-path: zero is always foldable.
if (BaseOffs == 0 && !BaseGV) return true; if (BaseOffs == 0 && !BaseGV) return true;
@ -1371,14 +1371,14 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
AM.HasBaseReg = true; AM.HasBaseReg = true;
} }
return isLegalUse(AM, Kind, AccessTy, LTTI); return isLegalUse(AM, Kind, AccessTy, TLI);
} }
static bool isAlwaysFoldable(const SCEV *S, static bool isAlwaysFoldable(const SCEV *S,
int64_t MinOffset, int64_t MaxOffset, int64_t MinOffset, int64_t MaxOffset,
bool HasBaseReg, bool HasBaseReg,
LSRUse::KindType Kind, Type *AccessTy, LSRUse::KindType Kind, Type *AccessTy,
const ScalarTargetTransformInfo *LTTI, const TargetLowering *TLI,
ScalarEvolution &SE) { ScalarEvolution &SE) {
// Fast-path: zero is always foldable. // Fast-path: zero is always foldable.
if (S->isZero()) return true; if (S->isZero()) return true;
@ -1402,7 +1402,7 @@ static bool isAlwaysFoldable(const SCEV *S,
AM.HasBaseReg = HasBaseReg; AM.HasBaseReg = HasBaseReg;
AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, LTTI); return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
} }
namespace { namespace {
@ -1502,7 +1502,7 @@ class LSRInstance {
ScalarEvolution &SE; ScalarEvolution &SE;
DominatorTree &DT; DominatorTree &DT;
LoopInfo &LI; LoopInfo &LI;
const ScalarTargetTransformInfo *const STTI; const TargetLowering *const TLI;
Loop *const L; Loop *const L;
bool Changed; bool Changed;
@ -1638,7 +1638,7 @@ class LSRInstance {
Pass *P); Pass *P);
public: public:
LSRInstance(const ScalarTargetTransformInfo *ltti, Loop *l, Pass *P); LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
bool getChanged() const { return Changed; } bool getChanged() const { return Changed; }
@ -1688,10 +1688,11 @@ void LSRInstance::OptimizeShadowIV() {
} }
if (!DestTy) continue; if (!DestTy) continue;
if (STTI) { if (TLI) {
// If target does not support DestTy natively then do not apply // If target does not support DestTy natively then do not apply
// this transformation. // this transformation.
if (!STTI->isTypeLegal(DestTy)) continue; EVT DVT = TLI->getValueType(DestTy);
if (!TLI->isTypeLegal(DVT)) continue;
} }
PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
@ -2014,18 +2015,18 @@ LSRInstance::OptimizeLoopTermCond() {
if (C->getValue().getMinSignedBits() >= 64 || if (C->getValue().getMinSignedBits() >= 64 ||
C->getValue().isMinSignedValue()) C->getValue().isMinSignedValue())
goto decline_post_inc; goto decline_post_inc;
// Without STTI, assume that any stride might be valid, and so any // Without TLI, assume that any stride might be valid, and so any
// use might be shared. // use might be shared.
if (!STTI) if (!TLI)
goto decline_post_inc; goto decline_post_inc;
// Check for possible scaled-address reuse. // Check for possible scaled-address reuse.
Type *AccessTy = getAccessType(UI->getUser()); Type *AccessTy = getAccessType(UI->getUser());
AddrMode AM; AddrMode AM;
AM.Scale = C->getSExtValue(); AM.Scale = C->getSExtValue();
if (STTI->isLegalAddressingMode(AM, AccessTy)) if (TLI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc; goto decline_post_inc;
AM.Scale = -AM.Scale; AM.Scale = -AM.Scale;
if (STTI->isLegalAddressingMode(AM, AccessTy)) if (TLI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc; goto decline_post_inc;
} }
} }
@ -2096,12 +2097,12 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
// Conservatively assume HasBaseReg is true for now. // Conservatively assume HasBaseReg is true for now.
if (NewOffset < LU.MinOffset) { if (NewOffset < LU.MinOffset) {
if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
Kind, AccessTy, STTI)) Kind, AccessTy, TLI))
return false; return false;
NewMinOffset = NewOffset; NewMinOffset = NewOffset;
} else if (NewOffset > LU.MaxOffset) { } else if (NewOffset > LU.MaxOffset) {
if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
Kind, AccessTy, STTI)) Kind, AccessTy, TLI))
return false; return false;
NewMaxOffset = NewOffset; NewMaxOffset = NewOffset;
} }
@ -2130,7 +2131,7 @@ LSRInstance::getUse(const SCEV *&Expr,
int64_t Offset = ExtractImmediate(Expr, SE); int64_t Offset = ExtractImmediate(Expr, SE);
// Basic uses can't accept any offset, for example. // Basic uses can't accept any offset, for example.
if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, STTI)) { if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) {
Expr = Copy; Expr = Copy;
Offset = 0; Offset = 0;
} }
@ -2395,7 +2396,7 @@ bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
/// TODO: Consider IVInc free if it's already used in another chains. /// TODO: Consider IVInc free if it's already used in another chains.
static bool static bool
isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users, isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
ScalarEvolution &SE, const ScalarTargetTransformInfo *STTI) { ScalarEvolution &SE, const TargetLowering *TLI) {
if (StressIVChain) if (StressIVChain)
return true; return true;
@ -2653,7 +2654,7 @@ void LSRInstance::CollectChains() {
for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
UsersIdx < NChains; ++UsersIdx) { UsersIdx < NChains; ++UsersIdx) {
if (!isProfitableChain(IVChainVec[UsersIdx], if (!isProfitableChain(IVChainVec[UsersIdx],
ChainUsersVec[UsersIdx].FarUsers, SE, STTI)) ChainUsersVec[UsersIdx].FarUsers, SE, TLI))
continue; continue;
// Preserve the chain at UsesIdx. // Preserve the chain at UsesIdx.
if (ChainIdx != UsersIdx) if (ChainIdx != UsersIdx)
@ -2680,8 +2681,7 @@ void LSRInstance::FinalizeChain(IVChain &Chain) {
/// Return true if the IVInc can be folded into an addressing mode. /// Return true if the IVInc can be folded into an addressing mode.
static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
Value *Operand, Value *Operand, const TargetLowering *TLI) {
const ScalarTargetTransformInfo *STTI) {
const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
if (!IncConst || !isAddressUse(UserInst, Operand)) if (!IncConst || !isAddressUse(UserInst, Operand))
return false; return false;
@ -2691,7 +2691,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
int64_t IncOffset = IncConst->getValue()->getSExtValue(); int64_t IncOffset = IncConst->getValue()->getSExtValue();
if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false, if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false,
LSRUse::Address, getAccessType(UserInst), STTI)) LSRUse::Address, getAccessType(UserInst), TLI))
return false; return false;
return true; return true;
@ -2762,7 +2762,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
// If an IV increment can't be folded, use it as the next IV value. // If an IV increment can't be folded, use it as the next IV value.
if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
STTI)) { TLI)) {
assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
IVSrc = IVOper; IVSrc = IVOper;
LeftOverExpr = 0; LeftOverExpr = 0;
@ -3108,7 +3108,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// into an immediate field. // into an immediate field.
if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
Base.getNumRegs() > 1, Base.getNumRegs() > 1,
LU.Kind, LU.AccessTy, STTI, SE)) LU.Kind, LU.AccessTy, TLI, SE))
continue; continue;
// Collect all operands except *J. // Collect all operands except *J.
@ -3122,7 +3122,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
if (InnerAddOps.size() == 1 && if (InnerAddOps.size() == 1 &&
isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset,
Base.getNumRegs() > 1, Base.getNumRegs() > 1,
LU.Kind, LU.AccessTy, STTI, SE)) LU.Kind, LU.AccessTy, TLI, SE))
continue; continue;
const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
@ -3132,9 +3132,9 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// Add the remaining pieces of the add back into the new formula. // Add the remaining pieces of the add back into the new formula.
const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
if (STTI && InnerSumSC && if (TLI && InnerSumSC &&
SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
InnerSumSC->getValue()->getZExtValue())) { InnerSumSC->getValue()->getZExtValue())) {
F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
InnerSumSC->getValue()->getZExtValue(); InnerSumSC->getValue()->getZExtValue();
@ -3144,8 +3144,8 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// Add J as its own register, or an unfolded immediate. // Add J as its own register, or an unfolded immediate.
const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
if (STTI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
SC->getValue()->getZExtValue())) SC->getValue()->getZExtValue()))
F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
SC->getValue()->getZExtValue(); SC->getValue()->getZExtValue();
@ -3205,7 +3205,7 @@ void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base; Formula F = Base;
F.AM.BaseGV = GV; F.AM.BaseGV = GV;
if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI)) LU.Kind, LU.AccessTy, TLI))
continue; continue;
F.BaseRegs[i] = G; F.BaseRegs[i] = G;
(void)InsertFormula(LU, LUIdx, F); (void)InsertFormula(LU, LUIdx, F);
@ -3230,7 +3230,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base; Formula F = Base;
F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
LU.Kind, LU.AccessTy, STTI)) { LU.Kind, LU.AccessTy, TLI)) {
// Add the offset to the base register. // Add the offset to the base register.
const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
// If it cancelled out, drop the base register, otherwise update it. // If it cancelled out, drop the base register, otherwise update it.
@ -3250,7 +3250,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base; Formula F = Base;
F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI)) LU.Kind, LU.AccessTy, TLI))
continue; continue;
F.BaseRegs[i] = G; F.BaseRegs[i] = G;
(void)InsertFormula(LU, LUIdx, F); (void)InsertFormula(LU, LUIdx, F);
@ -3297,7 +3297,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
F.AM.BaseOffs = NewBaseOffs; F.AM.BaseOffs = NewBaseOffs;
// Check that this scale is legal. // Check that this scale is legal.
if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, STTI)) if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
continue; continue;
// Compensate for the use having MinOffset built into it. // Compensate for the use having MinOffset built into it.
@ -3353,12 +3353,12 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
// Check whether this scale is going to be legal. // Check whether this scale is going to be legal.
if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI)) { LU.Kind, LU.AccessTy, TLI)) {
// As a special-case, handle special out-of-loop Basic users specially. // As a special-case, handle special out-of-loop Basic users specially.
// TODO: Reconsider this special case. // TODO: Reconsider this special case.
if (LU.Kind == LSRUse::Basic && if (LU.Kind == LSRUse::Basic &&
isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
LSRUse::Special, LU.AccessTy, STTI) && LSRUse::Special, LU.AccessTy, TLI) &&
LU.AllFixupsOutsideLoop) LU.AllFixupsOutsideLoop)
LU.Kind = LSRUse::Special; LU.Kind = LSRUse::Special;
else else
@ -3391,8 +3391,8 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
/// GenerateTruncates - Generate reuse formulae from different IV types. /// GenerateTruncates - Generate reuse formulae from different IV types.
void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
// This requires ScalarTargetTransformInfo to tell us which truncates are free. // This requires TargetLowering to tell us which truncates are free.
if (!STTI) return; if (!TLI) return;
// Don't bother truncating symbolic values. // Don't bother truncating symbolic values.
if (Base.AM.BaseGV) return; if (Base.AM.BaseGV) return;
@ -3405,7 +3405,7 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
for (SmallSetVector<Type *, 4>::const_iterator for (SmallSetVector<Type *, 4>::const_iterator
I = Types.begin(), E = Types.end(); I != E; ++I) { I = Types.begin(), E = Types.end(); I != E; ++I) {
Type *SrcTy = *I; Type *SrcTy = *I;
if (SrcTy != DstTy && STTI->isTruncateFree(SrcTy, DstTy)) { if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
Formula F = Base; Formula F = Base;
if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
@ -3561,7 +3561,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
Formula NewF = F; Formula NewF = F;
NewF.AM.BaseOffs = Offs; NewF.AM.BaseOffs = Offs;
if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI)) LU.Kind, LU.AccessTy, TLI))
continue; continue;
NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
@ -3586,9 +3586,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
Formula NewF = F; Formula NewF = F;
NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI)) { LU.Kind, LU.AccessTy, TLI)) {
if (!STTI || if (!TLI ||
!STTI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
continue; continue;
NewF = F; NewF = F;
NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
@ -3900,7 +3900,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
Formula &F = LUThatHas->Formulae[i]; Formula &F = LUThatHas->Formulae[i];
if (!isLegalUse(F.AM, if (!isLegalUse(F.AM,
LUThatHas->MinOffset, LUThatHas->MaxOffset, LUThatHas->MinOffset, LUThatHas->MaxOffset,
LUThatHas->Kind, LUThatHas->AccessTy, STTI)) { LUThatHas->Kind, LUThatHas->AccessTy, TLI)) {
DEBUG(dbgs() << " Deleting "; F.print(dbgs()); DEBUG(dbgs() << " Deleting "; F.print(dbgs());
dbgs() << '\n'); dbgs() << '\n');
LUThatHas->DeleteFormula(F); LUThatHas->DeleteFormula(F);
@ -4589,12 +4589,12 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Changed |= DeleteTriviallyDeadInstructions(DeadInsts); Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
} }
LSRInstance::LSRInstance(const ScalarTargetTransformInfo *stti, Loop *l, Pass *P) LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
: IU(P->getAnalysis<IVUsers>()), : IU(P->getAnalysis<IVUsers>()),
SE(P->getAnalysis<ScalarEvolution>()), SE(P->getAnalysis<ScalarEvolution>()),
DT(P->getAnalysis<DominatorTree>()), DT(P->getAnalysis<DominatorTree>()),
LI(P->getAnalysis<LoopInfo>()), LI(P->getAnalysis<LoopInfo>()),
STTI(stti), L(l), Changed(false), IVIncInsertPos(0) { TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
// If LoopSimplify form is not available, stay out of trouble. // If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm()) if (!L->isLoopSimplifyForm())
@ -4684,7 +4684,7 @@ LSRInstance::LSRInstance(const ScalarTargetTransformInfo *stti, Loop *l, Pass *P
for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
JE = LU.Formulae.end(); J != JE; ++J) JE = LU.Formulae.end(); J != JE; ++J)
assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, STTI) && LU.Kind, LU.AccessTy, TLI) &&
"Illegal formula generated!"); "Illegal formula generated!");
}; };
#endif #endif
@ -4757,13 +4757,13 @@ void LSRInstance::dump() const {
namespace { namespace {
class LoopStrengthReduce : public LoopPass { class LoopStrengthReduce : public LoopPass {
/// ScalarTargetTransformInfo provides target information that is needed /// TLI - Keep a pointer of a TargetLowering to consult for determining
/// for strength reducing loops. /// transformation profitability.
const ScalarTargetTransformInfo *STTI; const TargetLowering *const TLI;
public: public:
static char ID; // Pass ID, replacement for typeid static char ID; // Pass ID, replacement for typeid
LoopStrengthReduce(); explicit LoopStrengthReduce(const TargetLowering *tli = 0);
private: private:
bool runOnLoop(Loop *L, LPPassManager &LPM); bool runOnLoop(Loop *L, LPPassManager &LPM);
@ -4783,12 +4783,13 @@ INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false) "Loop Strength Reduction", false, false)
Pass *llvm::createLoopStrengthReducePass() {
return new LoopStrengthReduce(); Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
return new LoopStrengthReduce(TLI);
} }
LoopStrengthReduce::LoopStrengthReduce() LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
: LoopPass(ID), STTI(0) { : LoopPass(ID), TLI(tli) {
initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
} }
@ -4814,13 +4815,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
bool Changed = false; bool Changed = false;
TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
if (TTI)
STTI = TTI->getScalarTargetTransformInfo();
// Run the main LSR transformation. // Run the main LSR transformation.
Changed |= LSRInstance(STTI, L, this).getChanged(); Changed |= LSRInstance(TLI, L, this).getChanged();
// Remove any extra phis created by processing inner loops. // Remove any extra phis created by processing inner loops.
Changed |= DeleteDeadPHIs(L->getHeader()); Changed |= DeleteDeadPHIs(L->getHeader());
@ -4831,7 +4827,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
Rewriter.setDebugType(DEBUG_TYPE); Rewriter.setDebugType(DEBUG_TYPE);
#endif #endif
unsigned numFolded = Rewriter. unsigned numFolded = Rewriter.
replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, STTI); replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, TLI);
if (numFolded) { if (numFolded) {
Changed = true; Changed = true;
DeleteTriviallyDeadInstructions(DeadInsts); DeleteTriviallyDeadInstructions(DeadInsts);

View File

@ -45,10 +45,10 @@
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
#include "llvm/TargetTransformInfo.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include <csetjmp> #include <csetjmp>
#include <set> #include <set>
using namespace llvm; using namespace llvm;
@ -70,14 +70,15 @@ namespace {
Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn; Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
bool useExpensiveEHSupport; bool useExpensiveEHSupport;
// We peek in STTI to grab the target's jmp_buf size and alignment // We peek in TLI to grab the target's jmp_buf size and alignment
const ScalarTargetTransformInfo *STTI; const TargetLowering *TLI;
public: public:
static char ID; // Pass identification, replacement for typeid static char ID; // Pass identification, replacement for typeid
explicit LowerInvoke(bool useExpensiveEHSupport = ExpensiveEHSupport) explicit LowerInvoke(const TargetLowering *tli = NULL,
bool useExpensiveEHSupport = ExpensiveEHSupport)
: FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport), : FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport),
STTI(0) { TLI(tli) {
initializeLowerInvokePass(*PassRegistry::getPassRegistry()); initializeLowerInvokePass(*PassRegistry::getPassRegistry());
} }
bool doInitialization(Module &M); bool doInitialization(Module &M);
@ -107,24 +108,21 @@ INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
char &llvm::LowerInvokePassID = LowerInvoke::ID; char &llvm::LowerInvokePassID = LowerInvoke::ID;
// Public Interface To the LowerInvoke pass. // Public Interface To the LowerInvoke pass.
FunctionPass *llvm::createLowerInvokePass() { FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) {
return new LowerInvoke(ExpensiveEHSupport); return new LowerInvoke(TLI, ExpensiveEHSupport);
} }
FunctionPass *llvm::createLowerInvokePass(bool useExpensiveEHSupport) { FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
return new LowerInvoke(useExpensiveEHSupport); bool useExpensiveEHSupport) {
return new LowerInvoke(TLI, useExpensiveEHSupport);
} }
// doInitialization - Make sure that there is a prototype for abort in the // doInitialization - Make sure that there is a prototype for abort in the
// current module. // current module.
bool LowerInvoke::doInitialization(Module &M) { bool LowerInvoke::doInitialization(Module &M) {
TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
if (TTI)
STTI = TTI->getScalarTargetTransformInfo();
Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
if (useExpensiveEHSupport) { if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers. // Insert a type for the linked list of jump buffers.
unsigned JBSize = STTI ? STTI->getJumpBufSize() : 0; unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
JBSize = JBSize ? JBSize : 200; JBSize = JBSize ? JBSize : 200;
Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize); Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
@ -432,7 +430,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Create an alloca for the incoming jump buffer ptr and the new jump buffer // Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an // that needs to be restored on all exits from the function. This is an
// alloca because the value needs to be live across invokes. // alloca because the value needs to be live across invokes.
unsigned Align = STTI ? STTI->getJumpBufAlignment() : 0; unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
AllocaInst *JmpBuf = AllocaInst *JmpBuf =
new AllocaInst(JBLinkTy, 0, Align, new AllocaInst(JBLinkTy, 0, Align,
"jblink", F.begin()->begin()); "jblink", F.begin()->begin());
@ -577,10 +575,6 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
} }
bool LowerInvoke::runOnFunction(Function &F) { bool LowerInvoke::runOnFunction(Function &F) {
TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
if (TTI)
STTI = TTI->getScalarTargetTransformInfo();
if (useExpensiveEHSupport) if (useExpensiveEHSupport)
return insertExpensiveEHSupport(F); return insertExpensiveEHSupport(F);
else else