mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-06-28 06:24:57 +00:00
Rename getSDiv to getExactSDiv to reflect its behavior in cases where
the division would have a remainder. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96693 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@ -364,12 +364,13 @@ static bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) {
|
|||||||
return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy));
|
return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getSDiv - Return an expression for LHS /s RHS, if it can be determined,
|
/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
|
||||||
/// or null otherwise. If IgnoreSignificantBits is true, expressions like
|
/// and if the remainder is known to be zero, or null otherwise. If
|
||||||
/// (X * Y) /s Y are simplified to Y, ignoring that the multiplication may
|
/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified
|
||||||
/// overflow, which is useful when the result will be used in a context where
|
/// to Y, ignoring that the multiplication may overflow, which is useful when
|
||||||
/// the most significant bits are ignored.
|
/// the result will be used in a context where the most significant bits are
|
||||||
static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS,
|
/// ignored.
|
||||||
|
static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
|
||||||
ScalarEvolution &SE,
|
ScalarEvolution &SE,
|
||||||
bool IgnoreSignificantBits = false) {
|
bool IgnoreSignificantBits = false) {
|
||||||
// Handle the trivial case, which works for any SCEV type.
|
// Handle the trivial case, which works for any SCEV type.
|
||||||
@ -395,10 +396,10 @@ static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS,
|
|||||||
// Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
|
// Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
|
||||||
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
|
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
|
||||||
if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
|
if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
|
||||||
const SCEV *Start = getSDiv(AR->getStart(), RHS, SE,
|
const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
|
||||||
IgnoreSignificantBits);
|
IgnoreSignificantBits);
|
||||||
if (!Start) return 0;
|
if (!Start) return 0;
|
||||||
const SCEV *Step = getSDiv(AR->getStepRecurrence(SE), RHS, SE,
|
const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
|
||||||
IgnoreSignificantBits);
|
IgnoreSignificantBits);
|
||||||
if (!Step) return 0;
|
if (!Step) return 0;
|
||||||
return SE.getAddRecExpr(Start, Step, AR->getLoop());
|
return SE.getAddRecExpr(Start, Step, AR->getLoop());
|
||||||
@ -411,7 +412,7 @@ static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS,
|
|||||||
SmallVector<const SCEV *, 8> Ops;
|
SmallVector<const SCEV *, 8> Ops;
|
||||||
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
|
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
|
||||||
I != E; ++I) {
|
I != E; ++I) {
|
||||||
const SCEV *Op = getSDiv(*I, RHS, SE,
|
const SCEV *Op = getExactSDiv(*I, RHS, SE,
|
||||||
IgnoreSignificantBits);
|
IgnoreSignificantBits);
|
||||||
if (!Op) return 0;
|
if (!Op) return 0;
|
||||||
Ops.push_back(Op);
|
Ops.push_back(Op);
|
||||||
@ -428,7 +429,8 @@ static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS,
|
|||||||
for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
|
for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
|
||||||
I != E; ++I) {
|
I != E; ++I) {
|
||||||
if (!Found)
|
if (!Found)
|
||||||
if (const SCEV *Q = getSDiv(*I, RHS, SE, IgnoreSignificantBits)) {
|
if (const SCEV *Q = getExactSDiv(*I, RHS, SE,
|
||||||
|
IgnoreSignificantBits)) {
|
||||||
Ops.push_back(Q);
|
Ops.push_back(Q);
|
||||||
Found = true;
|
Found = true;
|
||||||
continue;
|
continue;
|
||||||
@ -1560,7 +1562,7 @@ LSRInstance::OptimizeLoopTermCond() {
|
|||||||
A = SE.getSignExtendExpr(A, B->getType());
|
A = SE.getSignExtendExpr(A, B->getType());
|
||||||
}
|
}
|
||||||
if (const SCEVConstant *D =
|
if (const SCEVConstant *D =
|
||||||
dyn_cast_or_null<SCEVConstant>(getSDiv(B, A, SE))) {
|
dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
|
||||||
// Stride of one or negative one can have reuse with non-addresses.
|
// Stride of one or negative one can have reuse with non-addresses.
|
||||||
if (D->getValue()->isOne() ||
|
if (D->getValue()->isOne() ||
|
||||||
D->getValue()->isAllOnesValue())
|
D->getValue()->isAllOnesValue())
|
||||||
@ -1754,12 +1756,12 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
|
|||||||
OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
|
OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
|
||||||
}
|
}
|
||||||
if (const SCEVConstant *Factor =
|
if (const SCEVConstant *Factor =
|
||||||
dyn_cast_or_null<SCEVConstant>(getSDiv(NewStride, OldStride,
|
dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
|
||||||
SE, true))) {
|
SE, true))) {
|
||||||
if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
|
if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
|
||||||
Factors.insert(Factor->getValue()->getValue().getSExtValue());
|
Factors.insert(Factor->getValue()->getValue().getSExtValue());
|
||||||
} else if (const SCEVConstant *Factor =
|
} else if (const SCEVConstant *Factor =
|
||||||
dyn_cast_or_null<SCEVConstant>(getSDiv(OldStride, NewStride,
|
dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, NewStride,
|
||||||
SE, true))) {
|
SE, true))) {
|
||||||
if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
|
if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
|
||||||
Factors.insert(Factor->getValue()->getValue().getSExtValue());
|
Factors.insert(Factor->getValue()->getValue().getSExtValue());
|
||||||
@ -2175,14 +2177,14 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
|
|||||||
// Check that multiplying with each base register doesn't overflow.
|
// Check that multiplying with each base register doesn't overflow.
|
||||||
for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
|
for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
|
||||||
F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
|
F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
|
||||||
if (getSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
|
if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that multiplying with the scaled register doesn't overflow.
|
// Check that multiplying with the scaled register doesn't overflow.
|
||||||
if (F.ScaledReg) {
|
if (F.ScaledReg) {
|
||||||
F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
|
F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
|
||||||
if (getSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
|
if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2237,7 +2239,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
|
|||||||
continue;
|
continue;
|
||||||
// Divide out the factor, ignoring high bits, since we'll be
|
// Divide out the factor, ignoring high bits, since we'll be
|
||||||
// scaling the value back up in the end.
|
// scaling the value back up in the end.
|
||||||
if (const SCEV *Quotient = getSDiv(AR, FactorS, SE, true)) {
|
if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
|
||||||
// TODO: This could be optimized to avoid all the copying.
|
// TODO: This could be optimized to avoid all the copying.
|
||||||
Formula F = Base;
|
Formula F = Base;
|
||||||
F.ScaledReg = Quotient;
|
F.ScaledReg = Quotient;
|
||||||
|
Reference in New Issue
Block a user