mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-28 09:31:03 +00:00
Introduce encapsulation for ScalarEvolution's TargetData object, and refactor
the code to minimize dependencies on TargetData. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@69644 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
fb17fd2cdf
commit
af79fb5f47
@ -32,7 +32,6 @@ namespace llvm {
|
||||
class Type;
|
||||
class SCEVHandle;
|
||||
class ScalarEvolution;
|
||||
class TargetData;
|
||||
|
||||
/// SCEV - This class represent an analyzed expression in the program. These
|
||||
/// are reference counted opaque objects that the client is not allowed to
|
||||
@ -201,9 +200,21 @@ namespace llvm {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
ScalarEvolution() : FunctionPass(&ID), Impl(0) {}
|
||||
|
||||
// getTargetData - Return the TargetData object contained in this
|
||||
// ScalarEvolution.
|
||||
const TargetData &getTargetData() const;
|
||||
/// isSCEVable - Test if values of the given type are analyzable within
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
/// can optionally include pointer types if the ScalarEvolution class
|
||||
/// has access to target-specific information.
|
||||
bool isSCEVable(const Type *Ty) const;
|
||||
|
||||
/// getTypeSizeInBits - Return the size in bits of the specified type,
|
||||
/// for which isSCEVable must return true.
|
||||
uint64_t getTypeSizeInBits(const Type *Ty) const;
|
||||
|
||||
/// getEffectiveSCEVType - Return a type with the same bitwidth as
|
||||
/// the given type and which represents how SCEV will treat the given
|
||||
/// type, for which isSCEVable must return true. For pointer types,
|
||||
/// this is the pointer-sized integer type.
|
||||
const Type *getEffectiveSCEVType(const Type *Ty) const;
|
||||
|
||||
/// getSCEV - Return a SCEV expression handle for the full generality of the
|
||||
/// specified expression.
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
|
||||
namespace llvm {
|
||||
class TargetData;
|
||||
|
||||
/// SCEVExpander - This class uses information about analyze scalars to
|
||||
/// rewrite expressions in canonical form.
|
||||
///
|
||||
@ -31,7 +29,6 @@ namespace llvm {
|
||||
struct SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
|
||||
ScalarEvolution &SE;
|
||||
LoopInfo &LI;
|
||||
const TargetData &TD;
|
||||
std::map<SCEVHandle, Value*> InsertedExpressions;
|
||||
std::set<Instruction*> InsertedInstructions;
|
||||
|
||||
@ -39,8 +36,8 @@ namespace llvm {
|
||||
|
||||
friend struct SCEVVisitor<SCEVExpander, Value*>;
|
||||
public:
|
||||
SCEVExpander(ScalarEvolution &se, LoopInfo &li, const TargetData &td)
|
||||
: SE(se), LI(li), TD(td) {}
|
||||
SCEVExpander(ScalarEvolution &se, LoopInfo &li)
|
||||
: SE(se), LI(li) {}
|
||||
|
||||
LoopInfo &getLoopInfo() const { return LI; }
|
||||
|
||||
@ -85,6 +82,11 @@ namespace llvm {
|
||||
/// we can to share the casts.
|
||||
Value *InsertCastOfTo(Instruction::CastOps opcode, Value *V,
|
||||
const Type *Ty);
|
||||
|
||||
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
|
||||
/// which must be possible with a noop cast.
|
||||
Value *InsertNoopCastOfTo(Value *V, const Type *Ty);
|
||||
|
||||
/// InsertBinop - Insert the specified binary operator, doing a small amount
|
||||
/// of work to avoid inserting an obviously redundant operation.
|
||||
static Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
|
||||
|
@ -570,7 +570,7 @@ static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K,
|
||||
if (K > 1000)
|
||||
return SE.getCouldNotCompute();
|
||||
|
||||
unsigned W = SE.getTargetData().getTypeSizeInBits(ResultTy);
|
||||
unsigned W = SE.getTypeSizeInBits(ResultTy);
|
||||
|
||||
// Calculate K! / 2^T and T; we divide out the factors of two before
|
||||
// multiplying for calculating K! / 2^T to avoid overflow.
|
||||
@ -648,8 +648,7 @@ SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It,
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, const Type *Ty) {
|
||||
assert(getTargetData().getTypeSizeInBits(Op->getType()) >
|
||||
getTargetData().getTypeSizeInBits(Ty) &&
|
||||
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
|
||||
"This is not a truncating conversion!");
|
||||
|
||||
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
|
||||
@ -677,13 +676,11 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, const Type *Ty
|
||||
|
||||
SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op,
|
||||
const Type *Ty) {
|
||||
assert(getTargetData().getTypeSizeInBits(Op->getType()) <
|
||||
getTargetData().getTypeSizeInBits(Ty) &&
|
||||
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
|
||||
"This is not an extending conversion!");
|
||||
|
||||
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
|
||||
const Type *IntTy = Ty;
|
||||
if (isa<PointerType>(IntTy)) IntTy = getTargetData().getIntPtrType();
|
||||
const Type *IntTy = getEffectiveSCEVType(Ty);
|
||||
Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
|
||||
if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
|
||||
return getUnknown(C);
|
||||
@ -700,13 +697,11 @@ SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op,
|
||||
}
|
||||
|
||||
SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op, const Type *Ty) {
|
||||
assert(getTargetData().getTypeSizeInBits(Op->getType()) <
|
||||
getTargetData().getTypeSizeInBits(Ty) &&
|
||||
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
|
||||
"This is not an extending conversion!");
|
||||
|
||||
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
|
||||
const Type *IntTy = Ty;
|
||||
if (isa<PointerType>(IntTy)) IntTy = getTargetData().getIntPtrType();
|
||||
const Type *IntTy = getEffectiveSCEVType(Ty);
|
||||
Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
|
||||
if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
|
||||
return getUnknown(C);
|
||||
@ -1366,7 +1361,7 @@ namespace {
|
||||
|
||||
/// TD - The target data information for the target we are targetting.
|
||||
///
|
||||
TargetData &TD;
|
||||
TargetData *TD;
|
||||
|
||||
/// UnknownValue - This SCEV is used to represent unknown trip counts and
|
||||
/// things.
|
||||
@ -1389,9 +1384,25 @@ namespace {
|
||||
|
||||
public:
|
||||
ScalarEvolutionsImpl(ScalarEvolution &se, Function &f, LoopInfo &li,
|
||||
TargetData &td)
|
||||
TargetData *td)
|
||||
: SE(se), F(f), LI(li), TD(td), UnknownValue(new SCEVCouldNotCompute()) {}
|
||||
|
||||
/// isSCEVable - Test if values of the given type are analyzable within
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
/// can optionally include pointer types if the ScalarEvolution class
|
||||
/// has access to target-specific information.
|
||||
bool isSCEVable(const Type *Ty) const;
|
||||
|
||||
/// getTypeSizeInBits - Return the size in bits of the specified type,
|
||||
/// for which isSCEVable must return true.
|
||||
uint64_t getTypeSizeInBits(const Type *Ty) const;
|
||||
|
||||
/// getEffectiveSCEVType - Return a type with the same bitwidth as
|
||||
/// the given type and which represents how SCEV will treat the given
|
||||
/// type, for which isSCEVable must return true. For pointer types,
|
||||
/// this is the pointer-sized integer type.
|
||||
const Type *getEffectiveSCEVType(const Type *Ty) const;
|
||||
|
||||
SCEVHandle getCouldNotCompute();
|
||||
|
||||
/// getIntegerSCEV - Given an integer or FP type, create a constant for the
|
||||
@ -1478,9 +1489,6 @@ namespace {
|
||||
/// that no dangling references are left around.
|
||||
void deleteValueFromRecords(Value *V);
|
||||
|
||||
/// getTargetData - Return the TargetData.
|
||||
const TargetData &getTargetData() const;
|
||||
|
||||
private:
|
||||
/// createSCEV - We know that there is no SCEV for the specified value.
|
||||
/// Analyze the expression.
|
||||
@ -1581,8 +1589,50 @@ void ScalarEvolutionsImpl::deleteValueFromRecords(Value *V) {
|
||||
}
|
||||
}
|
||||
|
||||
const TargetData &ScalarEvolutionsImpl::getTargetData() const {
|
||||
return TD;
|
||||
/// isSCEVable - Test if values of the given type are analyzable within
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
/// can optionally include pointer types if the ScalarEvolution class
|
||||
/// has access to target-specific information.
|
||||
bool ScalarEvolutionsImpl::isSCEVable(const Type *Ty) const {
|
||||
// Integers are always SCEVable.
|
||||
if (Ty->isInteger())
|
||||
return true;
|
||||
|
||||
// Pointers are SCEVable if TargetData information is available
|
||||
// to provide pointer size information.
|
||||
if (isa<PointerType>(Ty))
|
||||
return TD != NULL;
|
||||
|
||||
// Otherwise it's not SCEVable.
|
||||
return false;
|
||||
}
|
||||
|
||||
/// getTypeSizeInBits - Return the size in bits of the specified type,
|
||||
/// for which isSCEVable must return true.
|
||||
uint64_t ScalarEvolutionsImpl::getTypeSizeInBits(const Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
// If we have a TargetData, use it!
|
||||
if (TD)
|
||||
return TD->getTypeSizeInBits(Ty);
|
||||
|
||||
// Otherwise, we support only integer types.
|
||||
assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
|
||||
return Ty->getPrimitiveSizeInBits();
|
||||
}
|
||||
|
||||
/// getEffectiveSCEVType - Return a type with the same bitwidth as
|
||||
/// the given type and which represents how SCEV will treat the given
|
||||
/// type, for which isSCEVable must return true. For pointer types,
|
||||
/// this is the pointer-sized integer type.
|
||||
const Type *ScalarEvolutionsImpl::getEffectiveSCEVType(const Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
if (Ty->isInteger())
|
||||
return Ty;
|
||||
|
||||
assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
|
||||
return TD->getIntPtrType();
|
||||
}
|
||||
|
||||
SCEVHandle ScalarEvolutionsImpl::getCouldNotCompute() {
|
||||
@ -1592,7 +1642,7 @@ SCEVHandle ScalarEvolutionsImpl::getCouldNotCompute() {
|
||||
/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
|
||||
/// expression and create a new one.
|
||||
SCEVHandle ScalarEvolutionsImpl::getSCEV(Value *V) {
|
||||
assert(V->getType() != Type::VoidTy && "Can't analyze void expressions!");
|
||||
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
|
||||
|
||||
std::map<Value*, SCEVHandle>::iterator I = Scalars.find(V);
|
||||
if (I != Scalars.end()) return I->second;
|
||||
@ -1604,8 +1654,7 @@ SCEVHandle ScalarEvolutionsImpl::getSCEV(Value *V) {
|
||||
/// getIntegerSCEV - Given an integer or FP type, create a constant for the
|
||||
/// specified signed integer value and return a SCEV for the constant.
|
||||
SCEVHandle ScalarEvolutionsImpl::getIntegerSCEV(int Val, const Type *Ty) {
|
||||
if (isa<PointerType>(Ty))
|
||||
Ty = TD.getIntPtrType();
|
||||
Ty = SE.getEffectiveSCEVType(Ty);
|
||||
Constant *C;
|
||||
if (Val == 0)
|
||||
C = Constant::getNullValue(Ty);
|
||||
@ -1624,8 +1673,7 @@ SCEVHandle ScalarEvolutionsImpl::getNegativeSCEV(const SCEVHandle &V) {
|
||||
return SE.getUnknown(ConstantExpr::getNeg(VC->getValue()));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
if (isa<PointerType>(Ty))
|
||||
Ty = TD.getIntPtrType();
|
||||
Ty = SE.getEffectiveSCEVType(Ty);
|
||||
return SE.getMulExpr(V, SE.getConstant(ConstantInt::getAllOnesValue(Ty)));
|
||||
}
|
||||
|
||||
@ -1635,8 +1683,7 @@ SCEVHandle ScalarEvolutionsImpl::getNotSCEV(const SCEVHandle &V) {
|
||||
return SE.getUnknown(ConstantExpr::getNot(VC->getValue()));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
if (isa<PointerType>(Ty))
|
||||
Ty = TD.getIntPtrType();
|
||||
Ty = SE.getEffectiveSCEVType(Ty);
|
||||
SCEVHandle AllOnes = SE.getConstant(ConstantInt::getAllOnesValue(Ty));
|
||||
return getMinusSCEV(AllOnes, V);
|
||||
}
|
||||
@ -1656,12 +1703,12 @@ SCEVHandle
|
||||
ScalarEvolutionsImpl::getTruncateOrZeroExtend(const SCEVHandle &V,
|
||||
const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
|
||||
(Ty->isInteger() || isa<PointerType>(Ty)) &&
|
||||
assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
|
||||
(Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
if (TD.getTypeSizeInBits(SrcTy) == TD.getTypeSizeInBits(Ty))
|
||||
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
|
||||
return V; // No conversion
|
||||
if (TD.getTypeSizeInBits(SrcTy) > TD.getTypeSizeInBits(Ty))
|
||||
if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
|
||||
return SE.getTruncateExpr(V, Ty);
|
||||
return SE.getZeroExtendExpr(V, Ty);
|
||||
}
|
||||
@ -1673,12 +1720,12 @@ SCEVHandle
|
||||
ScalarEvolutionsImpl::getTruncateOrSignExtend(const SCEVHandle &V,
|
||||
const Type *Ty) {
|
||||
const Type *SrcTy = V->getType();
|
||||
assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
|
||||
(Ty->isInteger() || isa<PointerType>(Ty)) &&
|
||||
assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
|
||||
(Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
|
||||
"Cannot truncate or zero extend with non-integer arguments!");
|
||||
if (TD.getTypeSizeInBits(SrcTy) == TD.getTypeSizeInBits(Ty))
|
||||
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
|
||||
return V; // No conversion
|
||||
if (TD.getTypeSizeInBits(SrcTy) > TD.getTypeSizeInBits(Ty))
|
||||
if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
|
||||
return SE.getTruncateExpr(V, Ty);
|
||||
return SE.getSignExtendExpr(V, Ty);
|
||||
}
|
||||
@ -1806,66 +1853,66 @@ SCEVHandle ScalarEvolutionsImpl::createNodeForPHI(PHINode *PN) {
|
||||
/// guaranteed to end in (at every loop iteration). It is, at the same time,
|
||||
/// the minimum number of times S is divisible by 2. For example, given {4,+,8}
|
||||
/// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
|
||||
static uint32_t GetMinTrailingZeros(SCEVHandle S, const TargetData &TD) {
|
||||
static uint32_t GetMinTrailingZeros(SCEVHandle S, const ScalarEvolution &SE) {
|
||||
if (SCEVConstant *C = dyn_cast<SCEVConstant>(S))
|
||||
return C->getValue()->getValue().countTrailingZeros();
|
||||
|
||||
if (SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
|
||||
return std::min(GetMinTrailingZeros(T->getOperand(), TD),
|
||||
(uint32_t)TD.getTypeSizeInBits(T->getType()));
|
||||
return std::min(GetMinTrailingZeros(T->getOperand(), SE),
|
||||
(uint32_t)SE.getTypeSizeInBits(T->getType()));
|
||||
|
||||
if (SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
|
||||
uint32_t OpRes = GetMinTrailingZeros(E->getOperand(), TD);
|
||||
return OpRes == TD.getTypeSizeInBits(E->getOperand()->getType()) ?
|
||||
TD.getTypeSizeInBits(E->getOperand()->getType()) : OpRes;
|
||||
uint32_t OpRes = GetMinTrailingZeros(E->getOperand(), SE);
|
||||
return OpRes == SE.getTypeSizeInBits(E->getOperand()->getType()) ?
|
||||
SE.getTypeSizeInBits(E->getOperand()->getType()) : OpRes;
|
||||
}
|
||||
|
||||
if (SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
|
||||
uint32_t OpRes = GetMinTrailingZeros(E->getOperand(), TD);
|
||||
return OpRes == TD.getTypeSizeInBits(E->getOperand()->getType()) ?
|
||||
TD.getTypeSizeInBits(E->getOperand()->getType()) : OpRes;
|
||||
uint32_t OpRes = GetMinTrailingZeros(E->getOperand(), SE);
|
||||
return OpRes == SE.getTypeSizeInBits(E->getOperand()->getType()) ?
|
||||
SE.getTypeSizeInBits(E->getOperand()->getType()) : OpRes;
|
||||
}
|
||||
|
||||
if (SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
|
||||
// The result is the min of all operands results.
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0), TD);
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0), SE);
|
||||
for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i), TD));
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i), SE));
|
||||
return MinOpRes;
|
||||
}
|
||||
|
||||
if (SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
|
||||
// The result is the sum of all operands results.
|
||||
uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0), TD);
|
||||
uint32_t BitWidth = TD.getTypeSizeInBits(M->getType());
|
||||
uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0), SE);
|
||||
uint32_t BitWidth = SE.getTypeSizeInBits(M->getType());
|
||||
for (unsigned i = 1, e = M->getNumOperands();
|
||||
SumOpRes != BitWidth && i != e; ++i)
|
||||
SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i), TD),
|
||||
SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i), SE),
|
||||
BitWidth);
|
||||
return SumOpRes;
|
||||
}
|
||||
|
||||
if (SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||
// The result is the min of all operands results.
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0), TD);
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0), SE);
|
||||
for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i), TD));
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i), SE));
|
||||
return MinOpRes;
|
||||
}
|
||||
|
||||
if (SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
|
||||
// The result is the min of all operands results.
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0), TD);
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0), SE);
|
||||
for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i), TD));
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i), SE));
|
||||
return MinOpRes;
|
||||
}
|
||||
|
||||
if (SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
|
||||
// The result is the min of all operands results.
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0), TD);
|
||||
uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0), SE);
|
||||
for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i), TD));
|
||||
MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i), SE));
|
||||
return MinOpRes;
|
||||
}
|
||||
|
||||
@ -1877,8 +1924,7 @@ static uint32_t GetMinTrailingZeros(SCEVHandle S, const TargetData &TD) {
|
||||
/// Analyze the expression.
|
||||
///
|
||||
SCEVHandle ScalarEvolutionsImpl::createSCEV(Value *V) {
|
||||
if (!isa<IntegerType>(V->getType()) &&
|
||||
!isa<PointerType>(V->getType()))
|
||||
if (!isSCEVable(V->getType()))
|
||||
return SE.getUnknown(V);
|
||||
|
||||
unsigned Opcode = Instruction::UserOp1;
|
||||
@ -1913,7 +1959,7 @@ SCEVHandle ScalarEvolutionsImpl::createSCEV(Value *V) {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
||||
SCEVHandle LHS = getSCEV(U->getOperand(0));
|
||||
const APInt &CIVal = CI->getValue();
|
||||
if (GetMinTrailingZeros(LHS, TD) >=
|
||||
if (GetMinTrailingZeros(LHS, SE) >=
|
||||
(CIVal.getBitWidth() - CIVal.countLeadingZeros()))
|
||||
return SE.getAddExpr(LHS, getSCEV(U->getOperand(1)));
|
||||
}
|
||||
@ -1963,23 +2009,23 @@ SCEVHandle ScalarEvolutionsImpl::createSCEV(Value *V) {
|
||||
|
||||
case Instruction::BitCast:
|
||||
// BitCasts are no-op casts so we just eliminate the cast.
|
||||
if ((U->getType()->isInteger() ||
|
||||
isa<PointerType>(U->getType())) &&
|
||||
(U->getOperand(0)->getType()->isInteger() ||
|
||||
isa<PointerType>(U->getOperand(0)->getType())))
|
||||
if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
|
||||
return getSCEV(U->getOperand(0));
|
||||
break;
|
||||
|
||||
case Instruction::IntToPtr:
|
||||
if (!TD) break; // Without TD we can't analyze pointers.
|
||||
return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
|
||||
TD.getIntPtrType());
|
||||
TD->getIntPtrType());
|
||||
|
||||
case Instruction::PtrToInt:
|
||||
if (!TD) break; // Without TD we can't analyze pointers.
|
||||
return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
|
||||
U->getType());
|
||||
|
||||
case Instruction::GetElementPtr: {
|
||||
const Type *IntPtrTy = TD.getIntPtrType();
|
||||
if (!TD) break; // Without TD we can't analyze pointers.
|
||||
const Type *IntPtrTy = TD->getIntPtrType();
|
||||
Value *Base = U->getOperand(0);
|
||||
SCEVHandle TotalOffset = SE.getIntegerSCEV(0, IntPtrTy);
|
||||
gep_type_iterator GTI = gep_type_begin(U);
|
||||
@ -1990,7 +2036,7 @@ SCEVHandle ScalarEvolutionsImpl::createSCEV(Value *V) {
|
||||
// Compute the (potentially symbolic) offset in bytes for this index.
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
// For a struct, add the member offset.
|
||||
const StructLayout &SL = *TD.getStructLayout(STy);
|
||||
const StructLayout &SL = *TD->getStructLayout(STy);
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
uint64_t Offset = SL.getElementOffset(FieldNo);
|
||||
TotalOffset = SE.getAddExpr(TotalOffset,
|
||||
@ -2004,7 +2050,7 @@ SCEVHandle ScalarEvolutionsImpl::createSCEV(Value *V) {
|
||||
IntPtrTy);
|
||||
LocalOffset =
|
||||
SE.getMulExpr(LocalOffset,
|
||||
SE.getIntegerSCEV(TD.getTypePaddedSize(*GTI),
|
||||
SE.getIntegerSCEV(TD->getTypePaddedSize(*GTI),
|
||||
IntPtrTy));
|
||||
TotalOffset = SE.getAddExpr(TotalOffset, LocalOffset);
|
||||
}
|
||||
@ -3132,7 +3178,7 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
|
||||
// First check to see if the range contains zero. If not, the first
|
||||
// iteration exits.
|
||||
unsigned BitWidth = SE.getTargetData().getTypeSizeInBits(getType());
|
||||
unsigned BitWidth = SE.getTypeSizeInBits(getType());
|
||||
if (!Range.contains(APInt(BitWidth, 0)))
|
||||
return SE.getConstant(ConstantInt::get(getType(),0));
|
||||
|
||||
@ -3226,7 +3272,7 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
bool ScalarEvolution::runOnFunction(Function &F) {
|
||||
Impl = new ScalarEvolutionsImpl(*this, F,
|
||||
getAnalysis<LoopInfo>(),
|
||||
getAnalysis<TargetData>());
|
||||
&getAnalysis<TargetData>());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3241,8 +3287,16 @@ void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.addRequiredTransitive<TargetData>();
|
||||
}
|
||||
|
||||
const TargetData &ScalarEvolution::getTargetData() const {
|
||||
return ((ScalarEvolutionsImpl*)Impl)->getTargetData();
|
||||
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
|
||||
return ((ScalarEvolutionsImpl*)Impl)->isSCEVable(Ty);
|
||||
}
|
||||
|
||||
uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
|
||||
return ((ScalarEvolutionsImpl*)Impl)->getTypeSizeInBits(Ty);
|
||||
}
|
||||
|
||||
const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
|
||||
return ((ScalarEvolutionsImpl*)Impl)->getEffectiveSCEVType(Ty);
|
||||
}
|
||||
|
||||
SCEVHandle ScalarEvolution::getCouldNotCompute() {
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
using namespace llvm;
|
||||
|
||||
/// InsertCastOfTo - Insert a cast of V to the specified type, doing what
|
||||
@ -27,12 +26,14 @@ Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V,
|
||||
return V;
|
||||
|
||||
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
|
||||
if (opcode == Instruction::PtrToInt && Ty == TD.getIntPtrType())
|
||||
if (IntToPtrInst *ITP = dyn_cast<IntToPtrInst>(V))
|
||||
return ITP->getOperand(0);
|
||||
if (opcode == Instruction::IntToPtr && V->getType() == TD.getIntPtrType())
|
||||
if (PtrToIntInst *PTI = dyn_cast<PtrToIntInst>(V))
|
||||
return PTI->getOperand(0);
|
||||
if ((opcode == Instruction::PtrToInt || opcode == Instruction::IntToPtr) &&
|
||||
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType()))
|
||||
if (CastInst *CI = dyn_cast<CastInst>(V))
|
||||
if ((CI->getOpcode() == Instruction::PtrToInt ||
|
||||
CI->getOpcode() == Instruction::IntToPtr) &&
|
||||
SE.getTypeSizeInBits(CI->getType()) ==
|
||||
SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
|
||||
return CI->getOperand(0);
|
||||
|
||||
// FIXME: keep track of the cast instruction.
|
||||
if (Constant *C = dyn_cast<Constant>(V))
|
||||
@ -83,6 +84,19 @@ Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V,
|
||||
return CastInst::Create(opcode, V, Ty, V->getName(), IP);
|
||||
}
|
||||
|
||||
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
|
||||
/// which must be possible with a noop cast.
|
||||
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
|
||||
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
|
||||
assert((Op == Instruction::BitCast ||
|
||||
Op == Instruction::Instruction::PtrToInt ||
|
||||
Op == Instruction::Instruction::IntToPtr) &&
|
||||
"InsertNoopCastOfTo cannot perform non-noop casts!");
|
||||
assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
|
||||
"InsertNoopCastOfTo cannot change sizes!");
|
||||
return InsertCastOfTo(Op, V, Ty);
|
||||
}
|
||||
|
||||
/// InsertBinop - Insert the specified binary operator, doing a small amount
|
||||
/// of work to avoid inserting an obviously redundant operation.
|
||||
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
|
||||
@ -113,23 +127,21 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expand(S->getOperand(S->getNumOperands()-1));
|
||||
V = InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
|
||||
V = InsertNoopCastOfTo(V, Ty);
|
||||
|
||||
// Emit a bunch of add instructions
|
||||
for (int i = S->getNumOperands()-2; i >= 0; --i) {
|
||||
Value *W = expand(S->getOperand(i));
|
||||
W = InsertCastOfTo(CastInst::getCastOpcode(W, false, Ty, false), W, Ty);
|
||||
W = InsertNoopCastOfTo(W, Ty);
|
||||
V = InsertBinop(Instruction::Add, V, W, InsertPt);
|
||||
}
|
||||
return V;
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
int FirstOp = 0; // Set if we should emit a subtract.
|
||||
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
|
||||
if (SC->getValue()->isAllOnesValue())
|
||||
@ -137,12 +149,12 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
|
||||
int i = S->getNumOperands()-2;
|
||||
Value *V = expand(S->getOperand(i+1));
|
||||
V = InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
|
||||
V = InsertNoopCastOfTo(V, Ty);
|
||||
|
||||
// Emit a bunch of multiply instructions
|
||||
for (; i >= FirstOp; --i) {
|
||||
Value *W = expand(S->getOperand(i));
|
||||
W = InsertCastOfTo(CastInst::getCastOpcode(W, false, Ty, false), W, Ty);
|
||||
W = InsertNoopCastOfTo(W, Ty);
|
||||
V = InsertBinop(Instruction::Mul, V, W, InsertPt);
|
||||
}
|
||||
|
||||
@ -153,11 +165,10 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
|
||||
Value *LHS = expand(S->getLHS());
|
||||
LHS = InsertCastOfTo(CastInst::getCastOpcode(LHS, false, Ty, false), LHS, Ty);
|
||||
LHS = InsertNoopCastOfTo(LHS, Ty);
|
||||
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
|
||||
const APInt &RHS = SC->getValue()->getValue();
|
||||
if (RHS.isPowerOf2())
|
||||
@ -167,27 +178,22 @@ Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
|
||||
}
|
||||
|
||||
Value *RHS = expand(S->getRHS());
|
||||
RHS = InsertCastOfTo(CastInst::getCastOpcode(RHS, false, Ty, false), RHS, Ty);
|
||||
RHS = InsertNoopCastOfTo(RHS, Ty);
|
||||
return InsertBinop(Instruction::UDiv, LHS, RHS, InsertPt);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
const Loop *L = S->getLoop();
|
||||
// We cannot yet do fp recurrences, e.g. the xform of {X,+,F} --> X+{0,+,F}
|
||||
assert((Ty->isInteger() || isa<PointerType>(Ty)) &&
|
||||
"Cannot expand fp recurrences yet!");
|
||||
|
||||
// {X,+,F} --> X + {0,+,F}
|
||||
if (!S->getStart()->isZero()) {
|
||||
Value *Start = expand(S->getStart());
|
||||
if (isa<PointerType>(Start->getType()))
|
||||
Start = InsertCastOfTo(Instruction::PtrToInt, Start, TD.getIntPtrType());
|
||||
Start = InsertNoopCastOfTo(Start, Ty);
|
||||
std::vector<SCEVHandle> NewOps(S->op_begin(), S->op_end());
|
||||
NewOps[0] = SE.getIntegerSCEV(0, Ty);
|
||||
Value *Rest = expand(SE.getAddRecExpr(NewOps, L));
|
||||
if (isa<PointerType>(Rest->getType()))
|
||||
Rest = InsertCastOfTo(Instruction::PtrToInt, Rest, TD.getIntPtrType());
|
||||
Rest = InsertNoopCastOfTo(Rest, Ty);
|
||||
|
||||
// FIXME: look for an existing add to use.
|
||||
return InsertBinop(Instruction::Add, Rest, Start, InsertPt);
|
||||
@ -227,8 +233,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
// If this is a simple linear addrec, emit it now as a special case.
|
||||
if (S->isAffine()) { // {0,+,F} --> i*F
|
||||
Value *F = expand(S->getOperand(1));
|
||||
if (isa<PointerType>(F->getType()))
|
||||
F = InsertCastOfTo(Instruction::PtrToInt, F, TD.getIntPtrType());
|
||||
F = InsertNoopCastOfTo(F, Ty);
|
||||
|
||||
// IF the step is by one, just return the inserted IV.
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(F))
|
||||
@ -276,38 +281,33 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expand(S->getOperand());
|
||||
if (isa<PointerType>(V->getType()))
|
||||
V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
|
||||
return CastInst::CreateTruncOrBitCast(V, S->getType(), "tmp.", InsertPt);
|
||||
V = InsertNoopCastOfTo(V, SE.getEffectiveSCEVType(V->getType()));
|
||||
return CastInst::CreateTruncOrBitCast(V, Ty, "tmp.", InsertPt);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expand(S->getOperand());
|
||||
if (isa<PointerType>(V->getType()))
|
||||
V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
|
||||
V = InsertNoopCastOfTo(V, SE.getEffectiveSCEVType(V->getType()));
|
||||
return CastInst::CreateZExtOrBitCast(V, Ty, "tmp.", InsertPt);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD.getIntPtrType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *V = expand(S->getOperand());
|
||||
if (isa<PointerType>(V->getType()))
|
||||
V = InsertCastOfTo(Instruction::PtrToInt, V, TD.getIntPtrType());
|
||||
V = InsertNoopCastOfTo(V, SE.getEffectiveSCEVType(V->getType()));
|
||||
return CastInst::CreateSExtOrBitCast(V, Ty, "tmp.", InsertPt);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *LHS = expand(S->getOperand(0));
|
||||
LHS = InsertCastOfTo(CastInst::getCastOpcode(LHS, false, Ty, false), LHS, Ty);
|
||||
LHS = InsertNoopCastOfTo(LHS, Ty);
|
||||
for (unsigned i = 1; i < S->getNumOperands(); ++i) {
|
||||
Value *RHS = expand(S->getOperand(i));
|
||||
RHS = InsertCastOfTo(CastInst::getCastOpcode(RHS, false, Ty, false),
|
||||
RHS, Ty);
|
||||
RHS = InsertNoopCastOfTo(RHS, Ty);
|
||||
Value *ICmp = new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS, "tmp", InsertPt);
|
||||
LHS = SelectInst::Create(ICmp, LHS, RHS, "smax", InsertPt);
|
||||
}
|
||||
@ -315,13 +315,12 @@ Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
|
||||
}
|
||||
|
||||
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
|
||||
const Type *Ty = S->getType();
|
||||
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
|
||||
Value *LHS = expand(S->getOperand(0));
|
||||
LHS = InsertCastOfTo(CastInst::getCastOpcode(LHS, false, Ty, false), LHS, Ty);
|
||||
LHS = InsertNoopCastOfTo(LHS, Ty);
|
||||
for (unsigned i = 1; i < S->getNumOperands(); ++i) {
|
||||
Value *RHS = expand(S->getOperand(i));
|
||||
RHS = InsertCastOfTo(CastInst::getCastOpcode(RHS, false, Ty, false),
|
||||
RHS, Ty);
|
||||
RHS = InsertNoopCastOfTo(RHS, Ty);
|
||||
Value *ICmp = new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS, "tmp", InsertPt);
|
||||
LHS = SelectInst::Create(ICmp, LHS, RHS, "umax", InsertPt);
|
||||
}
|
||||
@ -331,11 +330,11 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
|
||||
Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty,
|
||||
Instruction *IP) {
|
||||
// Expand the code for this SCEV.
|
||||
assert(TD.getTypeSizeInBits(Ty) == TD.getTypeSizeInBits(SH->getType()) &&
|
||||
assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
|
||||
"non-trivial casts should be done with the SCEVs directly!");
|
||||
this->InsertPt = IP;
|
||||
Value *V = expand(SH);
|
||||
return InsertCastOfTo(CastInst::getCastOpcode(V, false, Ty, false), V, Ty);
|
||||
return InsertNoopCastOfTo(V, Ty);
|
||||
}
|
||||
|
||||
Value *SCEVExpander::expand(const SCEV *S) {
|
||||
|
@ -50,7 +50,6 @@
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
@ -67,7 +66,6 @@ STATISTIC(NumLFTR , "Number of loop exit tests replaced");
|
||||
namespace {
|
||||
class VISIBILITY_HIDDEN IndVarSimplify : public LoopPass {
|
||||
LoopInfo *LI;
|
||||
TargetData *TD;
|
||||
ScalarEvolution *SE;
|
||||
bool Changed;
|
||||
public:
|
||||
@ -82,7 +80,6 @@ namespace {
|
||||
AU.addRequiredID(LCSSAID);
|
||||
AU.addRequiredID(LoopSimplifyID);
|
||||
AU.addRequired<LoopInfo>();
|
||||
AU.addRequired<TargetData>();
|
||||
AU.addPreserved<ScalarEvolution>();
|
||||
AU.addPreservedID(LoopSimplifyID);
|
||||
AU.addPreservedID(LCSSAID);
|
||||
@ -217,7 +214,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
|
||||
|
||||
// Scan all of the instructions in the loop, looking at those that have
|
||||
// extra-loop users and which are recurrences.
|
||||
SCEVExpander Rewriter(*SE, *LI, *TD);
|
||||
SCEVExpander Rewriter(*SE, *LI);
|
||||
|
||||
// We insert the code into the preheader of the loop if the loop contains
|
||||
// multiple exit blocks, or in the exit block if there is exactly one.
|
||||
@ -350,7 +347,7 @@ void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
|
||||
/// induction-variable PHINode Phi is cast to.
|
||||
///
|
||||
static const Type *getEffectiveIndvarType(const PHINode *Phi,
|
||||
const TargetData *TD) {
|
||||
const ScalarEvolution *SE) {
|
||||
const Type *Ty = Phi->getType();
|
||||
|
||||
for (Value::use_const_iterator UI = Phi->use_begin(), UE = Phi->use_end();
|
||||
@ -360,8 +357,13 @@ static const Type *getEffectiveIndvarType(const PHINode *Phi,
|
||||
CandidateType = ZI->getDestTy();
|
||||
else if (const SExtInst *SI = dyn_cast<SExtInst>(UI))
|
||||
CandidateType = SI->getDestTy();
|
||||
else if (const IntToPtrInst *IP = dyn_cast<IntToPtrInst>(UI))
|
||||
CandidateType = IP->getDestTy();
|
||||
else if (const PtrToIntInst *PI = dyn_cast<PtrToIntInst>(UI))
|
||||
CandidateType = PI->getDestTy();
|
||||
if (CandidateType &&
|
||||
TD->getTypeSizeInBits(CandidateType) > TD->getTypeSizeInBits(Ty))
|
||||
SE->isSCEVable(CandidateType) &&
|
||||
SE->getTypeSizeInBits(CandidateType) > SE->getTypeSizeInBits(Ty))
|
||||
Ty = CandidateType;
|
||||
}
|
||||
|
||||
@ -389,7 +391,7 @@ static const Type *getEffectiveIndvarType(const PHINode *Phi,
|
||||
static const PHINode *TestOrigIVForWrap(const Loop *L,
|
||||
const BranchInst *BI,
|
||||
const Instruction *OrigCond,
|
||||
const TargetData *TD,
|
||||
const ScalarEvolution &SE,
|
||||
bool &NoSignedWrap,
|
||||
bool &NoUnsignedWrap,
|
||||
const ConstantInt* &InitialVal,
|
||||
@ -462,7 +464,7 @@ static const PHINode *TestOrigIVForWrap(const Loop *L,
|
||||
if (const SExtInst *SI = dyn_cast<SExtInst>(CmpLHS)) {
|
||||
if (!isa<ConstantInt>(CmpRHS) ||
|
||||
!cast<ConstantInt>(CmpRHS)->getValue()
|
||||
.isSignedIntN(TD->getTypeSizeInBits(IncrInst->getType())))
|
||||
.isSignedIntN(SE.getTypeSizeInBits(IncrInst->getType())))
|
||||
return 0;
|
||||
IncrInst = SI->getOperand(0);
|
||||
}
|
||||
@ -470,7 +472,7 @@ static const PHINode *TestOrigIVForWrap(const Loop *L,
|
||||
if (const ZExtInst *ZI = dyn_cast<ZExtInst>(CmpLHS)) {
|
||||
if (!isa<ConstantInt>(CmpRHS) ||
|
||||
!cast<ConstantInt>(CmpRHS)->getValue()
|
||||
.isIntN(TD->getTypeSizeInBits(IncrInst->getType())))
|
||||
.isIntN(SE.getTypeSizeInBits(IncrInst->getType())))
|
||||
return 0;
|
||||
IncrInst = ZI->getOperand(0);
|
||||
}
|
||||
@ -590,7 +592,6 @@ static bool allUsesAreSameTyped(unsigned int Opcode, Instruction *I) {
|
||||
|
||||
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
LI = &getAnalysis<LoopInfo>();
|
||||
TD = &getAnalysis<TargetData>();
|
||||
SE = &getAnalysis<ScalarEvolution>();
|
||||
Changed = false;
|
||||
|
||||
@ -621,7 +622,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
|
||||
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
|
||||
PHINode *PN = cast<PHINode>(I);
|
||||
if (PN->getType()->isInteger() || isa<PointerType>(PN->getType())) {
|
||||
if (SE->isSCEVable(PN->getType())) {
|
||||
SCEVHandle SCEV = SE->getSCEV(PN);
|
||||
// FIXME: It is an extremely bad idea to indvar substitute anything more
|
||||
// complex than affine induction variables. Doing so will put expensive
|
||||
@ -640,26 +641,25 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
SmallSetVector<const Type *, 4> SizesToInsert;
|
||||
if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
|
||||
LargestType = BackedgeTakenCount->getType();
|
||||
if (isa<PointerType>(LargestType))
|
||||
LargestType = TD->getIntPtrType();
|
||||
LargestType = SE->getEffectiveSCEVType(LargestType);
|
||||
SizesToInsert.insert(LargestType);
|
||||
}
|
||||
for (unsigned i = 0, e = IndVars.size(); i != e; ++i) {
|
||||
const PHINode *PN = IndVars[i].first;
|
||||
const Type *PNTy = PN->getType();
|
||||
if (isa<PointerType>(PNTy)) PNTy = TD->getIntPtrType();
|
||||
PNTy = SE->getEffectiveSCEVType(PNTy);
|
||||
SizesToInsert.insert(PNTy);
|
||||
const Type *EffTy = getEffectiveIndvarType(PN, TD);
|
||||
if (isa<PointerType>(EffTy)) EffTy = TD->getIntPtrType();
|
||||
const Type *EffTy = getEffectiveIndvarType(PN, SE);
|
||||
EffTy = SE->getEffectiveSCEVType(EffTy);
|
||||
SizesToInsert.insert(EffTy);
|
||||
if (!LargestType ||
|
||||
TD->getTypeSizeInBits(EffTy) >
|
||||
TD->getTypeSizeInBits(LargestType))
|
||||
SE->getTypeSizeInBits(EffTy) >
|
||||
SE->getTypeSizeInBits(LargestType))
|
||||
LargestType = EffTy;
|
||||
}
|
||||
|
||||
// Create a rewriter object which we'll use to transform the code with.
|
||||
SCEVExpander Rewriter(*SE, *LI, *TD);
|
||||
SCEVExpander Rewriter(*SE, *LI);
|
||||
|
||||
// Now that we know the largest of of the induction variables in this loop,
|
||||
// insert a canonical induction variable of the largest size.
|
||||
@ -683,7 +683,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
if (Instruction *OrigCond = dyn_cast<Instruction>(BI->getCondition())) {
|
||||
// Determine if the OrigIV will ever undergo overflow.
|
||||
OrigControllingPHI =
|
||||
TestOrigIVForWrap(L, BI, OrigCond, TD,
|
||||
TestOrigIVForWrap(L, BI, OrigCond, *SE,
|
||||
NoSignedWrap, NoUnsignedWrap,
|
||||
InitialVal, IncrVal, LimitVal);
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/Support/CFG.h"
|
||||
@ -112,8 +111,6 @@ namespace {
|
||||
LoopInfo *LI;
|
||||
DominatorTree *DT;
|
||||
ScalarEvolution *SE;
|
||||
const TargetData *TD;
|
||||
const Type *UIntPtrTy;
|
||||
bool Changed;
|
||||
|
||||
/// IVUsesByStride - Keep track of all uses of induction variables that we
|
||||
@ -156,7 +153,6 @@ namespace {
|
||||
AU.addRequiredID(LoopSimplifyID);
|
||||
AU.addRequired<LoopInfo>();
|
||||
AU.addRequired<DominatorTree>();
|
||||
AU.addRequired<TargetData>();
|
||||
AU.addRequired<ScalarEvolution>();
|
||||
AU.addPreserved<ScalarEvolution>();
|
||||
}
|
||||
@ -485,11 +481,11 @@ static const Type *getAccessType(const Instruction *Inst) {
|
||||
/// return true. Otherwise, return false.
|
||||
bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
|
||||
SmallPtrSet<Instruction*,16> &Processed) {
|
||||
if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
|
||||
if (!SE->isSCEVable(I->getType()))
|
||||
return false; // Void and FP expressions cannot be reduced.
|
||||
|
||||
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
|
||||
if (TD->getTypeSizeInBits(I->getType()) > 64)
|
||||
if (SE->getTypeSizeInBits(I->getType()) > 64)
|
||||
return false;
|
||||
|
||||
if (!Processed.insert(I))
|
||||
@ -1174,14 +1170,12 @@ bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
|
||||
const Type *Ty2) {
|
||||
if (Ty1 == Ty2)
|
||||
return false;
|
||||
if (SE->getEffectiveSCEVType(Ty1) == SE->getEffectiveSCEVType(Ty2))
|
||||
return false;
|
||||
if (Ty1->canLosslesslyBitCastTo(Ty2))
|
||||
return false;
|
||||
if (TLI && TLI->isTruncateFree(Ty1, Ty2))
|
||||
return false;
|
||||
if (isa<PointerType>(Ty2) && Ty1->canLosslesslyBitCastTo(UIntPtrTy))
|
||||
return false;
|
||||
if (isa<PointerType>(Ty1) && Ty2->canLosslesslyBitCastTo(UIntPtrTy))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1468,7 +1462,6 @@ bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
|
||||
///
|
||||
static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
|
||||
const Loop *L,
|
||||
const TargetData *TD,
|
||||
SCEVExpander &Rewriter) {
|
||||
assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
|
||||
assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
|
||||
@ -1477,7 +1470,7 @@ static PHINode *InsertAffinePhi(SCEVHandle Start, SCEVHandle Step,
|
||||
BasicBlock *Preheader = L->getLoopPreheader();
|
||||
BasicBlock *LatchBlock = L->getLoopLatch();
|
||||
const Type *Ty = Start->getType();
|
||||
if (isa<PointerType>(Ty)) Ty = TD->getIntPtrType();
|
||||
Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
|
||||
|
||||
PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
|
||||
PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
|
||||
@ -1564,7 +1557,7 @@ LoopStrengthReduce::PrepareToStrengthReduceFully(
|
||||
SCEVHandle Imm = UsersToProcess[i].Imm;
|
||||
SCEVHandle Base = UsersToProcess[i].Base;
|
||||
SCEVHandle Start = SE->getAddExpr(CommonExprs, Base, Imm);
|
||||
PHINode *Phi = InsertAffinePhi(Start, Stride, L, TD,
|
||||
PHINode *Phi = InsertAffinePhi(Start, Stride, L,
|
||||
PreheaderRewriter);
|
||||
// Loop over all the users with the same base.
|
||||
do {
|
||||
@ -1591,7 +1584,7 @@ LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
|
||||
DOUT << " Inserting new PHI:\n";
|
||||
|
||||
PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
|
||||
Stride, L, TD,
|
||||
Stride, L,
|
||||
PreheaderRewriter);
|
||||
|
||||
// Remember this in case a later stride is multiple of this.
|
||||
@ -1695,9 +1688,7 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
|
||||
// a register operand, which potentially restricts what stride values are
|
||||
// valid.
|
||||
bool HaveCommonExprs = !CommonExprs->isZero();
|
||||
|
||||
const Type *ReplacedTy = CommonExprs->getType();
|
||||
if (isa<PointerType>(ReplacedTy)) ReplacedTy = TD->getIntPtrType();
|
||||
|
||||
// If all uses are addresses, consider sinking the immediate part of the
|
||||
// common expression back into uses if they can fit in the immediate fields.
|
||||
@ -1739,14 +1730,14 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
|
||||
<< *Stride << ":\n"
|
||||
<< " Common base: " << *CommonExprs << "\n";
|
||||
|
||||
SCEVExpander Rewriter(*SE, *LI, *TD);
|
||||
SCEVExpander PreheaderRewriter(*SE, *LI, *TD);
|
||||
SCEVExpander Rewriter(*SE, *LI);
|
||||
SCEVExpander PreheaderRewriter(*SE, *LI);
|
||||
|
||||
BasicBlock *Preheader = L->getLoopPreheader();
|
||||
Instruction *PreInsertPt = Preheader->getTerminator();
|
||||
BasicBlock *LatchBlock = L->getLoopLatch();
|
||||
|
||||
Value *CommonBaseV = ConstantInt::get(ReplacedTy, 0);
|
||||
Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
|
||||
|
||||
SCEVHandle RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
|
||||
IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
|
||||
@ -1837,10 +1828,10 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
|
||||
|
||||
SCEVHandle RewriteExpr = SE->getUnknown(RewriteOp);
|
||||
|
||||
if (TD->getTypeSizeInBits(RewriteOp->getType()) !=
|
||||
TD->getTypeSizeInBits(ReplacedTy)) {
|
||||
assert(TD->getTypeSizeInBits(RewriteOp->getType()) >
|
||||
TD->getTypeSizeInBits(ReplacedTy) &&
|
||||
if (SE->getTypeSizeInBits(RewriteOp->getType()) !=
|
||||
SE->getTypeSizeInBits(ReplacedTy)) {
|
||||
assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
|
||||
SE->getTypeSizeInBits(ReplacedTy) &&
|
||||
"Unexpected widening cast!");
|
||||
RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
|
||||
}
|
||||
@ -1868,13 +1859,13 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
|
||||
// it here.
|
||||
if (!ReuseIV.Base->isZero()) {
|
||||
SCEVHandle typedBase = ReuseIV.Base;
|
||||
if (TD->getTypeSizeInBits(RewriteExpr->getType()) !=
|
||||
TD->getTypeSizeInBits(ReuseIV.Base->getType())) {
|
||||
if (SE->getTypeSizeInBits(RewriteExpr->getType()) !=
|
||||
SE->getTypeSizeInBits(ReuseIV.Base->getType())) {
|
||||
// It's possible the original IV is a larger type than the new IV,
|
||||
// in which case we have to truncate the Base. We checked in
|
||||
// RequiresTypeConversion that this is valid.
|
||||
assert(TD->getTypeSizeInBits(RewriteExpr->getType()) <
|
||||
TD->getTypeSizeInBits(ReuseIV.Base->getType()) &&
|
||||
assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
|
||||
SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
|
||||
"Unexpected lengthening conversion!");
|
||||
typedBase = SE->getTruncateExpr(ReuseIV.Base,
|
||||
RewriteExpr->getType());
|
||||
@ -1959,8 +1950,8 @@ namespace {
|
||||
// e.g.
|
||||
// 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
|
||||
struct StrideCompare {
|
||||
const TargetData *TD;
|
||||
explicit StrideCompare(const TargetData *td) : TD(td) {}
|
||||
const ScalarEvolution *SE;
|
||||
explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
|
||||
|
||||
bool operator()(const SCEVHandle &LHS, const SCEVHandle &RHS) {
|
||||
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
|
||||
@ -1980,8 +1971,8 @@ namespace {
|
||||
// If it's the same value but different type, sort by bit width so
|
||||
// that we emit larger induction variables before smaller
|
||||
// ones, letting the smaller be re-written in terms of larger ones.
|
||||
return TD->getTypeSizeInBits(RHS->getType()) <
|
||||
TD->getTypeSizeInBits(LHS->getType());
|
||||
return SE->getTypeSizeInBits(RHS->getType()) <
|
||||
SE->getTypeSizeInBits(LHS->getType());
|
||||
}
|
||||
return LHSC && !RHSC;
|
||||
}
|
||||
@ -2014,17 +2005,17 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
|
||||
ICmpInst::Predicate Predicate = Cond->getPredicate();
|
||||
int64_t CmpSSInt = SC->getValue()->getSExtValue();
|
||||
unsigned BitWidth = TD->getTypeSizeInBits((*CondStride)->getType());
|
||||
unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
|
||||
uint64_t SignBit = 1ULL << (BitWidth-1);
|
||||
const Type *CmpTy = Cond->getOperand(0)->getType();
|
||||
const Type *NewCmpTy = NULL;
|
||||
unsigned TyBits = TD->getTypeSizeInBits(CmpTy);
|
||||
unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
|
||||
unsigned NewTyBits = 0;
|
||||
SCEVHandle *NewStride = NULL;
|
||||
Value *NewCmpLHS = NULL;
|
||||
Value *NewCmpRHS = NULL;
|
||||
int64_t Scale = 1;
|
||||
SCEVHandle NewOffset = SE->getIntegerSCEV(0, UIntPtrTy);
|
||||
SCEVHandle NewOffset = SE->getIntegerSCEV(0, CmpTy);
|
||||
|
||||
if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
|
||||
int64_t CmpVal = C->getValue().getSExtValue();
|
||||
@ -2070,7 +2061,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
continue;
|
||||
|
||||
NewCmpTy = NewCmpLHS->getType();
|
||||
NewTyBits = TD->getTypeSizeInBits(NewCmpTy);
|
||||
NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
|
||||
const Type *NewCmpIntTy = IntegerType::get(NewTyBits);
|
||||
if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
|
||||
// Check if it is possible to rewrite it using
|
||||
// an iv / stride of a smaller integer type.
|
||||
@ -2111,13 +2103,13 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
if (!isa<PointerType>(NewCmpTy))
|
||||
NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
|
||||
else {
|
||||
ConstantInt *CI = ConstantInt::get(UIntPtrTy, NewCmpVal);
|
||||
ConstantInt *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
|
||||
NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
|
||||
}
|
||||
NewOffset = TyBits == NewTyBits
|
||||
? SE->getMulExpr(CondUse->Offset,
|
||||
SE->getConstant(ConstantInt::get(CmpTy, Scale)))
|
||||
: SE->getConstant(ConstantInt::get(IntegerType::get(NewTyBits),
|
||||
: SE->getConstant(ConstantInt::get(NewCmpIntTy,
|
||||
cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
|
||||
break;
|
||||
}
|
||||
@ -2335,7 +2327,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
|
||||
const Type *SrcTy = PH->getType();
|
||||
int Mantissa = DestTy->getFPMantissaWidth();
|
||||
if (Mantissa == -1) continue;
|
||||
if ((int)TD->getTypeSizeInBits(SrcTy) > Mantissa)
|
||||
if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
|
||||
continue;
|
||||
|
||||
unsigned Entry, Latch;
|
||||
@ -2462,8 +2454,6 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
LI = &getAnalysis<LoopInfo>();
|
||||
DT = &getAnalysis<DominatorTree>();
|
||||
SE = &getAnalysis<ScalarEvolution>();
|
||||
TD = &getAnalysis<TargetData>();
|
||||
UIntPtrTy = TD->getIntPtrType();
|
||||
Changed = false;
|
||||
|
||||
// Find all uses of induction variables in this loop, and categorize
|
||||
@ -2481,7 +2471,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
#endif
|
||||
|
||||
// Sort the StrideOrder so we process larger strides first.
|
||||
std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare(TD));
|
||||
std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare(SE));
|
||||
|
||||
// Optimize induction variables. Some indvar uses can be transformed to use
|
||||
// strides that will be needed for other purposes. A common example of this
|
||||
|
Loading…
x
Reference in New Issue
Block a user