mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-06 09:44:39 +00:00
[SROA] Refactor the integer and vector promotion testing logic to
operate in terms of the new Partition class, and generally have a more clear set of arguments. No functionality changed. The most notable improvements here are consistently using the terminology of 'partition' for a collection of slices that will be rewritten together and 'slice' for a region of an alloca that is used by a particular instruction. This also makes it more clear that the split things are actually slices as well, just ones that will be split by the proposed partition. This doesn't yet address the confusing aspects of the partition's interface where slices that will be split by the partition and start prior to the partition are accesssed via Partition::splitSlices() while the core range of slices exposed by a Partition includes both unsplit slices and slices which will be split by the end, but started within the offset range of the partition. This is particularly hard to address because the algorithm which computes partitions quite literally doesn't know which slices these will end up being until too late. I'm looking at whether I can fix that or not, but I'm not optimistic. I'll update the comments and/or names to further explain this either way. I've also added one FIXME in this patch relating to this confusion so that I don't forget about it. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224798 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
3545994ffe
commit
c807870534
@ -1878,19 +1878,19 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
|
||||
///
|
||||
/// This function is called to test each entry in a partioning which is slated
|
||||
/// for a single slice.
|
||||
static bool
|
||||
isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
uint64_t SliceEndOffset, VectorType *Ty,
|
||||
uint64_t ElementSize, const Slice &S) {
|
||||
static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P,
|
||||
const Slice &S, VectorType *Ty,
|
||||
uint64_t ElementSize,
|
||||
const DataLayout &DL) {
|
||||
// First validate the slice offsets.
|
||||
uint64_t BeginOffset =
|
||||
std::max(S.beginOffset(), SliceBeginOffset) - SliceBeginOffset;
|
||||
std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
|
||||
uint64_t BeginIndex = BeginOffset / ElementSize;
|
||||
if (BeginIndex * ElementSize != BeginOffset ||
|
||||
BeginIndex >= Ty->getNumElements())
|
||||
return false;
|
||||
uint64_t EndOffset =
|
||||
std::min(S.endOffset(), SliceEndOffset) - SliceBeginOffset;
|
||||
std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
|
||||
uint64_t EndIndex = EndOffset / ElementSize;
|
||||
if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
|
||||
return false;
|
||||
@ -1922,7 +1922,7 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
if (LI->isVolatile())
|
||||
return false;
|
||||
Type *LTy = LI->getType();
|
||||
if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) {
|
||||
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
|
||||
assert(LTy->isIntegerTy());
|
||||
LTy = SplitIntTy;
|
||||
}
|
||||
@ -1932,7 +1932,7 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
if (SI->isVolatile())
|
||||
return false;
|
||||
Type *STy = SI->getValueOperand()->getType();
|
||||
if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) {
|
||||
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
|
||||
assert(STy->isIntegerTy());
|
||||
STy = SplitIntTy;
|
||||
}
|
||||
@ -1954,11 +1954,8 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
/// SSA value. We only can ensure this for a limited set of operations, and we
|
||||
/// don't want to do the rewrites unless we are confident that the result will
|
||||
/// be promotable, so we have an early test here.
|
||||
static VectorType *
|
||||
isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
uint64_t SliceEndOffset,
|
||||
AllocaSlices::const_range Slices,
|
||||
ArrayRef<AllocaSlices::iterator> SplitUses) {
|
||||
static VectorType *isVectorPromotionViable(AllocaSlices::Partition &P,
|
||||
const DataLayout &DL) {
|
||||
// Collect the candidate types for vector-based promotion. Also track whether
|
||||
// we have different element types.
|
||||
SmallVector<VectorType *, 4> CandidateTys;
|
||||
@ -1974,9 +1971,9 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
}
|
||||
};
|
||||
// Consider any loads or stores that are the exact size of the slice.
|
||||
for (const auto &S : Slices)
|
||||
if (S.beginOffset() == SliceBeginOffset &&
|
||||
S.endOffset() == SliceEndOffset) {
|
||||
for (const Slice &S : P)
|
||||
if (S.beginOffset() == P.beginOffset() &&
|
||||
S.endOffset() == P.endOffset()) {
|
||||
if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
|
||||
CheckCandidateType(LI->getType());
|
||||
else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
|
||||
@ -2043,14 +2040,12 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
"vector size not a multiple of element size?");
|
||||
ElementSize /= 8;
|
||||
|
||||
for (const auto &S : Slices)
|
||||
if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset,
|
||||
VTy, ElementSize, S))
|
||||
for (const Slice &S : P)
|
||||
if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
|
||||
return false;
|
||||
|
||||
for (const auto &SI : SplitUses)
|
||||
if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset,
|
||||
VTy, ElementSize, *SI))
|
||||
for (const Slice *S : P.splitSlices())
|
||||
if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -2066,11 +2061,13 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset,
|
||||
///
|
||||
/// This implements the necessary checking for the \c isIntegerWideningViable
|
||||
/// test below on a single slice of the alloca.
|
||||
static bool isIntegerWideningViableForSlice(const DataLayout &DL,
|
||||
Type *AllocaTy,
|
||||
static bool isIntegerWideningViableForSlice(const Slice &S,
|
||||
uint64_t AllocBeginOffset,
|
||||
uint64_t Size, const Slice &S,
|
||||
Type *AllocaTy,
|
||||
const DataLayout &DL,
|
||||
bool &WholeAllocaOp) {
|
||||
uint64_t Size = DL.getTypeStoreSize(AllocaTy);
|
||||
|
||||
uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
|
||||
uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
|
||||
|
||||
@ -2138,11 +2135,8 @@ static bool isIntegerWideningViableForSlice(const DataLayout &DL,
|
||||
/// This is a quick test to check whether we can rewrite the integer loads and
|
||||
/// stores to a particular alloca into wider loads and stores and be able to
|
||||
/// promote the resulting alloca.
|
||||
static bool
|
||||
isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
|
||||
uint64_t AllocBeginOffset,
|
||||
AllocaSlices::const_range Slices,
|
||||
ArrayRef<AllocaSlices::iterator> SplitUses) {
|
||||
static bool isIntegerWideningViable(AllocaSlices::Partition &P, Type *AllocaTy,
|
||||
const DataLayout &DL) {
|
||||
uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
|
||||
// Don't create integer types larger than the maximum bitwidth.
|
||||
if (SizeInBits > IntegerType::MAX_INT_BITS)
|
||||
@ -2160,24 +2154,24 @@ isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
|
||||
!canConvertValue(DL, IntTy, AllocaTy))
|
||||
return false;
|
||||
|
||||
uint64_t Size = DL.getTypeStoreSize(AllocaTy);
|
||||
|
||||
// While examining uses, we ensure that the alloca has a covering load or
|
||||
// store. We don't want to widen the integer operations only to fail to
|
||||
// promote due to some other unsplittable entry (which we may make splittable
|
||||
// later). However, if there are only splittable uses, go ahead and assume
|
||||
// that we cover the alloca.
|
||||
// FIXME: We shouldn't consider split slices that happen to start in the
|
||||
// partition here...
|
||||
bool WholeAllocaOp =
|
||||
Slices.begin() != Slices.end() ? false : DL.isLegalInteger(SizeInBits);
|
||||
P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits);
|
||||
|
||||
for (const auto &S : Slices)
|
||||
if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
|
||||
S, WholeAllocaOp))
|
||||
for (const Slice &S : P)
|
||||
if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
|
||||
WholeAllocaOp))
|
||||
return false;
|
||||
|
||||
for (const auto &SI : SplitUses)
|
||||
if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
|
||||
*SI, WholeAllocaOp))
|
||||
for (const Slice *S : P.splitSlices())
|
||||
if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
|
||||
WholeAllocaOp))
|
||||
return false;
|
||||
|
||||
return WholeAllocaOp;
|
||||
@ -3452,16 +3446,10 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
|
||||
SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
|
||||
assert(DL->getTypeAllocSize(SliceTy) >= P.size());
|
||||
|
||||
bool IsIntegerPromotable = isIntegerWideningViable(
|
||||
*DL, SliceTy, P.beginOffset(),
|
||||
AllocaSlices::const_range(P.begin(), P.end()), P.splitSlices());
|
||||
bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, *DL);
|
||||
|
||||
VectorType *VecTy =
|
||||
IsIntegerPromotable
|
||||
? nullptr
|
||||
: isVectorPromotionViable(
|
||||
*DL, P.beginOffset(), P.endOffset(),
|
||||
AllocaSlices::const_range(P.begin(), P.end()), P.splitSlices());
|
||||
IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, *DL);
|
||||
if (VecTy)
|
||||
SliceTy = VecTy;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user