mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-06-24 08:24:33 +00:00
land David Blaikie's patch to de-constify Type, with a few tweaks.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@135375 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@ -219,7 +219,7 @@ struct Formula {
|
||||
void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
|
||||
|
||||
unsigned getNumRegs() const;
|
||||
const Type *getType() const;
|
||||
Type *getType() const;
|
||||
|
||||
void DeleteBaseReg(const SCEV *&S);
|
||||
|
||||
@ -319,7 +319,7 @@ unsigned Formula::getNumRegs() const {
|
||||
|
||||
/// getType - Return the type of this formula, if it has one, or null
|
||||
/// otherwise. This type is meaningless except for the bit size.
|
||||
const Type *Formula::getType() const {
|
||||
Type *Formula::getType() const {
|
||||
return !BaseRegs.empty() ? BaseRegs.front()->getType() :
|
||||
ScaledReg ? ScaledReg->getType() :
|
||||
AM.BaseGV ? AM.BaseGV->getType() :
|
||||
@ -397,7 +397,7 @@ void Formula::dump() const {
|
||||
/// isAddRecSExtable - Return true if the given addrec can be sign-extended
|
||||
/// without changing its value.
|
||||
static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
|
||||
const Type *WideTy =
|
||||
Type *WideTy =
|
||||
IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
|
||||
return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
|
||||
}
|
||||
@ -405,7 +405,7 @@ static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
|
||||
/// isAddSExtable - Return true if the given add can be sign-extended
|
||||
/// without changing its value.
|
||||
static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
|
||||
const Type *WideTy =
|
||||
Type *WideTy =
|
||||
IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
|
||||
return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
|
||||
}
|
||||
@ -413,7 +413,7 @@ static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
|
||||
/// isMulSExtable - Return true if the given mul can be sign-extended
|
||||
/// without changing its value.
|
||||
static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
|
||||
const Type *WideTy =
|
||||
Type *WideTy =
|
||||
IntegerType::get(SE.getContext(),
|
||||
SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
|
||||
return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
|
||||
@ -594,8 +594,8 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
|
||||
}
|
||||
|
||||
/// getAccessType - Return the type of the memory being accessed.
|
||||
static const Type *getAccessType(const Instruction *Inst) {
|
||||
const Type *AccessTy = Inst->getType();
|
||||
static Type *getAccessType(const Instruction *Inst) {
|
||||
Type *AccessTy = Inst->getType();
|
||||
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
|
||||
AccessTy = SI->getOperand(0)->getType();
|
||||
else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
|
||||
@ -614,7 +614,7 @@ static const Type *getAccessType(const Instruction *Inst) {
|
||||
|
||||
// All pointers have the same requirements, so canonicalize them to an
|
||||
// arbitrary pointer type to minimize variation.
|
||||
if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy))
|
||||
if (PointerType *PTy = dyn_cast<PointerType>(AccessTy))
|
||||
AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
|
||||
PTy->getAddressSpace());
|
||||
|
||||
@ -980,7 +980,7 @@ public:
|
||||
};
|
||||
|
||||
KindType Kind;
|
||||
const Type *AccessTy;
|
||||
Type *AccessTy;
|
||||
|
||||
SmallVector<int64_t, 8> Offsets;
|
||||
int64_t MinOffset;
|
||||
@ -995,7 +995,7 @@ public:
|
||||
/// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
|
||||
/// max fixup widths to be equivalent, because the narrower one may be relying
|
||||
/// on the implicit truncation to truncate away bogus bits.
|
||||
const Type *WidestFixupType;
|
||||
Type *WidestFixupType;
|
||||
|
||||
/// Formulae - A list of ways to build a value that can satisfy this user.
|
||||
/// After the list is populated, one of these is selected heuristically and
|
||||
@ -1005,7 +1005,7 @@ public:
|
||||
/// Regs - The set of register candidates used by all formulae in this LSRUse.
|
||||
SmallPtrSet<const SCEV *, 4> Regs;
|
||||
|
||||
LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T),
|
||||
LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T),
|
||||
MinOffset(INT64_MAX),
|
||||
MaxOffset(INT64_MIN),
|
||||
AllFixupsOutsideLoop(true),
|
||||
@ -1127,7 +1127,7 @@ void LSRUse::dump() const {
|
||||
/// be completely folded into the user instruction at isel time. This includes
|
||||
/// address-mode folding and special icmp tricks.
|
||||
static bool isLegalUse(const TargetLowering::AddrMode &AM,
|
||||
LSRUse::KindType Kind, const Type *AccessTy,
|
||||
LSRUse::KindType Kind, Type *AccessTy,
|
||||
const TargetLowering *TLI) {
|
||||
switch (Kind) {
|
||||
case LSRUse::Address:
|
||||
@ -1176,7 +1176,7 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
|
||||
|
||||
static bool isLegalUse(TargetLowering::AddrMode AM,
|
||||
int64_t MinOffset, int64_t MaxOffset,
|
||||
LSRUse::KindType Kind, const Type *AccessTy,
|
||||
LSRUse::KindType Kind, Type *AccessTy,
|
||||
const TargetLowering *TLI) {
|
||||
// Check for overflow.
|
||||
if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
|
||||
@ -1198,7 +1198,7 @@ static bool isLegalUse(TargetLowering::AddrMode AM,
|
||||
static bool isAlwaysFoldable(int64_t BaseOffs,
|
||||
GlobalValue *BaseGV,
|
||||
bool HasBaseReg,
|
||||
LSRUse::KindType Kind, const Type *AccessTy,
|
||||
LSRUse::KindType Kind, Type *AccessTy,
|
||||
const TargetLowering *TLI) {
|
||||
// Fast-path: zero is always foldable.
|
||||
if (BaseOffs == 0 && !BaseGV) return true;
|
||||
@ -1224,7 +1224,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
|
||||
static bool isAlwaysFoldable(const SCEV *S,
|
||||
int64_t MinOffset, int64_t MaxOffset,
|
||||
bool HasBaseReg,
|
||||
LSRUse::KindType Kind, const Type *AccessTy,
|
||||
LSRUse::KindType Kind, Type *AccessTy,
|
||||
const TargetLowering *TLI,
|
||||
ScalarEvolution &SE) {
|
||||
// Fast-path: zero is always foldable.
|
||||
@ -1299,7 +1299,7 @@ class LSRInstance {
|
||||
SmallSetVector<int64_t, 8> Factors;
|
||||
|
||||
/// Types - Interesting use types, to facilitate truncation reuse.
|
||||
SmallSetVector<const Type *, 4> Types;
|
||||
SmallSetVector<Type *, 4> Types;
|
||||
|
||||
/// Fixups - The list of operands which are to be replaced.
|
||||
SmallVector<LSRFixup, 16> Fixups;
|
||||
@ -1330,11 +1330,11 @@ class LSRInstance {
|
||||
UseMapTy UseMap;
|
||||
|
||||
bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
|
||||
LSRUse::KindType Kind, const Type *AccessTy);
|
||||
LSRUse::KindType Kind, Type *AccessTy);
|
||||
|
||||
std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
|
||||
LSRUse::KindType Kind,
|
||||
const Type *AccessTy);
|
||||
Type *AccessTy);
|
||||
|
||||
void DeleteUse(LSRUse &LU, size_t LUIdx);
|
||||
|
||||
@ -1426,7 +1426,7 @@ void LSRInstance::OptimizeShadowIV() {
|
||||
IVUsers::const_iterator CandidateUI = UI;
|
||||
++UI;
|
||||
Instruction *ShadowUse = CandidateUI->getUser();
|
||||
const Type *DestTy = NULL;
|
||||
Type *DestTy = NULL;
|
||||
|
||||
/* If shadow use is a int->float cast then insert a second IV
|
||||
to eliminate this cast.
|
||||
@ -1457,7 +1457,7 @@ void LSRInstance::OptimizeShadowIV() {
|
||||
if (!PH) continue;
|
||||
if (PH->getNumIncomingValues() != 2) continue;
|
||||
|
||||
const Type *SrcTy = PH->getType();
|
||||
Type *SrcTy = PH->getType();
|
||||
int Mantissa = DestTy->getFPMantissaWidth();
|
||||
if (Mantissa == -1) continue;
|
||||
if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
|
||||
@ -1776,7 +1776,7 @@ LSRInstance::OptimizeLoopTermCond() {
|
||||
if (!TLI)
|
||||
goto decline_post_inc;
|
||||
// Check for possible scaled-address reuse.
|
||||
const Type *AccessTy = getAccessType(UI->getUser());
|
||||
Type *AccessTy = getAccessType(UI->getUser());
|
||||
TargetLowering::AddrMode AM;
|
||||
AM.Scale = C->getSExtValue();
|
||||
if (TLI->isLegalAddressingMode(AM, AccessTy))
|
||||
@ -1840,10 +1840,10 @@ LSRInstance::OptimizeLoopTermCond() {
|
||||
/// return true.
|
||||
bool
|
||||
LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
|
||||
LSRUse::KindType Kind, const Type *AccessTy) {
|
||||
LSRUse::KindType Kind, Type *AccessTy) {
|
||||
int64_t NewMinOffset = LU.MinOffset;
|
||||
int64_t NewMaxOffset = LU.MaxOffset;
|
||||
const Type *NewAccessTy = AccessTy;
|
||||
Type *NewAccessTy = AccessTy;
|
||||
|
||||
// Check for a mismatched kind. It's tempting to collapse mismatched kinds to
|
||||
// something conservative, however this can pessimize in the case that one of
|
||||
@ -1882,7 +1882,7 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
|
||||
/// Either reuse an existing use or create a new one, as needed.
|
||||
std::pair<size_t, int64_t>
|
||||
LSRInstance::getUse(const SCEV *&Expr,
|
||||
LSRUse::KindType Kind, const Type *AccessTy) {
|
||||
LSRUse::KindType Kind, Type *AccessTy) {
|
||||
const SCEV *Copy = Expr;
|
||||
int64_t Offset = ExtractImmediate(Expr, SE);
|
||||
|
||||
@ -2044,7 +2044,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
|
||||
LF.PostIncLoops = UI->getPostIncLoops();
|
||||
|
||||
LSRUse::KindType Kind = LSRUse::Basic;
|
||||
const Type *AccessTy = 0;
|
||||
Type *AccessTy = 0;
|
||||
if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
|
||||
Kind = LSRUse::Address;
|
||||
AccessTy = getAccessType(LF.UserInst);
|
||||
@ -2464,7 +2464,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
|
||||
if (LU.Kind != LSRUse::ICmpZero) return;
|
||||
|
||||
// Determine the integer type for the base formula.
|
||||
const Type *IntTy = Base.getType();
|
||||
Type *IntTy = Base.getType();
|
||||
if (!IntTy) return;
|
||||
if (SE.getTypeSizeInBits(IntTy) > 64) return;
|
||||
|
||||
@ -2538,7 +2538,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
|
||||
/// scaled-offset address modes, for example.
|
||||
void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
|
||||
// Determine the integer type for the base formula.
|
||||
const Type *IntTy = Base.getType();
|
||||
Type *IntTy = Base.getType();
|
||||
if (!IntTy) return;
|
||||
|
||||
// If this Formula already has a scaled register, we can't add another one.
|
||||
@ -2598,13 +2598,13 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
|
||||
if (Base.AM.BaseGV) return;
|
||||
|
||||
// Determine the integer type for the base formula.
|
||||
const Type *DstTy = Base.getType();
|
||||
Type *DstTy = Base.getType();
|
||||
if (!DstTy) return;
|
||||
DstTy = SE.getEffectiveSCEVType(DstTy);
|
||||
|
||||
for (SmallSetVector<const Type *, 4>::const_iterator
|
||||
for (SmallSetVector<Type *, 4>::const_iterator
|
||||
I = Types.begin(), E = Types.end(); I != E; ++I) {
|
||||
const Type *SrcTy = *I;
|
||||
Type *SrcTy = *I;
|
||||
if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
|
||||
Formula F = Base;
|
||||
|
||||
@ -2741,7 +2741,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
|
||||
int64_t Imm = WI.Imm;
|
||||
const SCEV *OrigReg = WI.OrigReg;
|
||||
|
||||
const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
|
||||
Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
|
||||
const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
|
||||
unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
|
||||
|
||||
@ -3440,9 +3440,9 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
|
||||
Rewriter.setPostInc(LF.PostIncLoops);
|
||||
|
||||
// This is the type that the user actually needs.
|
||||
const Type *OpTy = LF.OperandValToReplace->getType();
|
||||
Type *OpTy = LF.OperandValToReplace->getType();
|
||||
// This will be the type that we'll initially expand to.
|
||||
const Type *Ty = F.getType();
|
||||
Type *Ty = F.getType();
|
||||
if (!Ty)
|
||||
// No type known; just expand directly to the ultimate type.
|
||||
Ty = OpTy;
|
||||
@ -3450,7 +3450,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
|
||||
// Expand directly to the ultimate type if it's the right size.
|
||||
Ty = OpTy;
|
||||
// This is the type to do integer arithmetic in.
|
||||
const Type *IntTy = SE.getEffectiveSCEVType(Ty);
|
||||
Type *IntTy = SE.getEffectiveSCEVType(Ty);
|
||||
|
||||
// Build up a list of operands to add together to form the full base.
|
||||
SmallVector<const SCEV *, 8> Ops;
|
||||
@ -3637,7 +3637,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
|
||||
Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts);
|
||||
|
||||
// If this is reuse-by-noop-cast, insert the noop cast.
|
||||
const Type *OpTy = LF.OperandValToReplace->getType();
|
||||
Type *OpTy = LF.OperandValToReplace->getType();
|
||||
if (FullV->getType() != OpTy)
|
||||
FullV =
|
||||
CastInst::Create(CastInst::getCastOpcode(FullV, false,
|
||||
@ -3667,7 +3667,7 @@ void LSRInstance::Rewrite(const LSRFixup &LF,
|
||||
Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts);
|
||||
|
||||
// If this is reuse-by-noop-cast, insert the noop cast.
|
||||
const Type *OpTy = LF.OperandValToReplace->getType();
|
||||
Type *OpTy = LF.OperandValToReplace->getType();
|
||||
if (FullV->getType() != OpTy) {
|
||||
Instruction *Cast =
|
||||
CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
|
||||
@ -3793,7 +3793,7 @@ void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
|
||||
OS << '*' << *I;
|
||||
}
|
||||
|
||||
for (SmallSetVector<const Type *, 4>::const_iterator
|
||||
for (SmallSetVector<Type *, 4>::const_iterator
|
||||
I = Types.begin(), E = Types.end(); I != E; ++I) {
|
||||
if (!First) OS << ", ";
|
||||
First = false;
|
||||
|
Reference in New Issue
Block a user