mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-24 08:33:39 +00:00
refactor the SROA code out into its own method, no functionality change.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36426 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
ea84c5ee95
commit
a10b29b84b
@ -65,6 +65,8 @@ namespace {
|
|||||||
bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI);
|
bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI);
|
||||||
bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI);
|
bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI);
|
||||||
int isSafeAllocaToScalarRepl(AllocationInst *AI);
|
int isSafeAllocaToScalarRepl(AllocationInst *AI);
|
||||||
|
void DoScalarReplacement(AllocationInst *AI,
|
||||||
|
std::vector<AllocationInst*> &WorkList);
|
||||||
void CanonicalizeAllocaUsers(AllocationInst *AI);
|
void CanonicalizeAllocaUsers(AllocationInst *AI);
|
||||||
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
|
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
|
||||||
|
|
||||||
@ -166,118 +168,125 @@ bool SROA::performScalarRepl(Function &F) {
|
|||||||
// We cannot transform the allocation instruction if it is an array
|
// We cannot transform the allocation instruction if it is an array
|
||||||
// allocation (allocations OF arrays are ok though), and an allocation of a
|
// allocation (allocations OF arrays are ok though), and an allocation of a
|
||||||
// scalar value cannot be decomposed at all.
|
// scalar value cannot be decomposed at all.
|
||||||
//
|
if (!AI->isArrayAllocation() &&
|
||||||
if (AI->isArrayAllocation() ||
|
(isa<StructType>(AI->getAllocatedType()) ||
|
||||||
(!isa<StructType>(AI->getAllocatedType()) &&
|
isa<ArrayType>(AI->getAllocatedType()))) {
|
||||||
!isa<ArrayType>(AI->getAllocatedType()))) continue;
|
// Check that all of the users of the allocation are capable of being
|
||||||
|
// transformed.
|
||||||
// Check that all of the users of the allocation are capable of being
|
switch (isSafeAllocaToScalarRepl(AI)) {
|
||||||
// transformed.
|
default: assert(0 && "Unexpected value!");
|
||||||
switch (isSafeAllocaToScalarRepl(AI)) {
|
case 0: // Not safe to scalar replace.
|
||||||
default: assert(0 && "Unexpected value!");
|
break;
|
||||||
case 0: // Not safe to scalar replace.
|
case 1: // Safe, but requires cleanup/canonicalizations first
|
||||||
continue;
|
CanonicalizeAllocaUsers(AI);
|
||||||
case 1: // Safe, but requires cleanup/canonicalizations first
|
// FALL THROUGH.
|
||||||
CanonicalizeAllocaUsers(AI);
|
case 3: // Safe to scalar replace.
|
||||||
case 3: // Safe to scalar replace.
|
DoScalarReplacement(AI, WorkList);
|
||||||
break;
|
Changed = true;
|
||||||
}
|
|
||||||
|
|
||||||
DOUT << "Found inst to xform: " << *AI;
|
|
||||||
Changed = true;
|
|
||||||
|
|
||||||
SmallVector<AllocaInst*, 32> ElementAllocas;
|
|
||||||
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
|
|
||||||
ElementAllocas.reserve(ST->getNumContainedTypes());
|
|
||||||
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
|
|
||||||
AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
|
|
||||||
AI->getAlignment(),
|
|
||||||
AI->getName() + "." + utostr(i), AI);
|
|
||||||
ElementAllocas.push_back(NA);
|
|
||||||
WorkList.push_back(NA); // Add to worklist for recursive processing
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
|
|
||||||
ElementAllocas.reserve(AT->getNumElements());
|
|
||||||
const Type *ElTy = AT->getElementType();
|
|
||||||
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
|
|
||||||
AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
|
|
||||||
AI->getName() + "." + utostr(i), AI);
|
|
||||||
ElementAllocas.push_back(NA);
|
|
||||||
WorkList.push_back(NA); // Add to worklist for recursive processing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we have created the alloca instructions that we want to use,
|
|
||||||
// expand the getelementptr instructions to use them.
|
|
||||||
//
|
|
||||||
while (!AI->use_empty()) {
|
|
||||||
Instruction *User = cast<Instruction>(AI->use_back());
|
|
||||||
if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
|
|
||||||
RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
|
|
||||||
BCInst->eraseFromParent();
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
|
|
||||||
// We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
|
|
||||||
unsigned Idx =
|
|
||||||
(unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
|
|
||||||
|
|
||||||
assert(Idx < ElementAllocas.size() && "Index out of range?");
|
|
||||||
AllocaInst *AllocaToUse = ElementAllocas[Idx];
|
|
||||||
|
|
||||||
Value *RepValue;
|
|
||||||
if (GEPI->getNumOperands() == 3) {
|
|
||||||
// Do not insert a new getelementptr instruction with zero indices, only
|
|
||||||
// to have it optimized out later.
|
|
||||||
RepValue = AllocaToUse;
|
|
||||||
} else {
|
|
||||||
// We are indexing deeply into the structure, so we still need a
|
|
||||||
// getelement ptr instruction to finish the indexing. This may be
|
|
||||||
// expanded itself once the worklist is rerun.
|
|
||||||
//
|
|
||||||
SmallVector<Value*, 8> NewArgs;
|
|
||||||
NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
|
|
||||||
NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
|
|
||||||
RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
|
|
||||||
NewArgs.size(), "", GEPI);
|
|
||||||
RepValue->takeName(GEPI);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this GEP is to the start of the aggregate, check for memcpys.
|
|
||||||
if (Idx == 0) {
|
|
||||||
bool IsStartOfAggregateGEP = true;
|
|
||||||
for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
|
|
||||||
if (!isa<ConstantInt>(GEPI->getOperand(i))) {
|
|
||||||
IsStartOfAggregateGEP = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
|
|
||||||
IsStartOfAggregateGEP = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IsStartOfAggregateGEP)
|
|
||||||
RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Move all of the users over to the new GEP.
|
|
||||||
GEPI->replaceAllUsesWith(RepValue);
|
|
||||||
// Delete the old GEP
|
|
||||||
GEPI->eraseFromParent();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, delete the Alloca instruction
|
// Otherwise, couldn't process this.
|
||||||
AI->eraseFromParent();
|
|
||||||
NumReplaced++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Changed;
|
return Changed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
|
||||||
|
/// predicate, do SROA now.
|
||||||
|
void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||||
|
std::vector<AllocationInst*> &WorkList) {
|
||||||
|
DOUT << "Found inst to xform: " << *AI;
|
||||||
|
SmallVector<AllocaInst*, 32> ElementAllocas;
|
||||||
|
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
|
||||||
|
ElementAllocas.reserve(ST->getNumContainedTypes());
|
||||||
|
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
|
||||||
|
AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
|
||||||
|
AI->getAlignment(),
|
||||||
|
AI->getName() + "." + utostr(i), AI);
|
||||||
|
ElementAllocas.push_back(NA);
|
||||||
|
WorkList.push_back(NA); // Add to worklist for recursive processing
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
|
||||||
|
ElementAllocas.reserve(AT->getNumElements());
|
||||||
|
const Type *ElTy = AT->getElementType();
|
||||||
|
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
|
||||||
|
AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
|
||||||
|
AI->getName() + "." + utostr(i), AI);
|
||||||
|
ElementAllocas.push_back(NA);
|
||||||
|
WorkList.push_back(NA); // Add to worklist for recursive processing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have created the alloca instructions that we want to use,
|
||||||
|
// expand the getelementptr instructions to use them.
|
||||||
|
//
|
||||||
|
while (!AI->use_empty()) {
|
||||||
|
Instruction *User = cast<Instruction>(AI->use_back());
|
||||||
|
if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
|
||||||
|
RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
|
||||||
|
BCInst->eraseFromParent();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
|
||||||
|
// We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
|
||||||
|
unsigned Idx =
|
||||||
|
(unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
|
||||||
|
|
||||||
|
assert(Idx < ElementAllocas.size() && "Index out of range?");
|
||||||
|
AllocaInst *AllocaToUse = ElementAllocas[Idx];
|
||||||
|
|
||||||
|
Value *RepValue;
|
||||||
|
if (GEPI->getNumOperands() == 3) {
|
||||||
|
// Do not insert a new getelementptr instruction with zero indices, only
|
||||||
|
// to have it optimized out later.
|
||||||
|
RepValue = AllocaToUse;
|
||||||
|
} else {
|
||||||
|
// We are indexing deeply into the structure, so we still need a
|
||||||
|
// getelement ptr instruction to finish the indexing. This may be
|
||||||
|
// expanded itself once the worklist is rerun.
|
||||||
|
//
|
||||||
|
SmallVector<Value*, 8> NewArgs;
|
||||||
|
NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
|
||||||
|
NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
|
||||||
|
RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
|
||||||
|
NewArgs.size(), "", GEPI);
|
||||||
|
RepValue->takeName(GEPI);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this GEP is to the start of the aggregate, check for memcpys.
|
||||||
|
if (Idx == 0) {
|
||||||
|
bool IsStartOfAggregateGEP = true;
|
||||||
|
for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
|
||||||
|
if (!isa<ConstantInt>(GEPI->getOperand(i))) {
|
||||||
|
IsStartOfAggregateGEP = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
|
||||||
|
IsStartOfAggregateGEP = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IsStartOfAggregateGEP)
|
||||||
|
RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Move all of the users over to the new GEP.
|
||||||
|
GEPI->replaceAllUsesWith(RepValue);
|
||||||
|
// Delete the old GEP
|
||||||
|
GEPI->eraseFromParent();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, delete the Alloca instruction
|
||||||
|
AI->eraseFromParent();
|
||||||
|
NumReplaced++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// isSafeElementUse - Check to see if this use is an allowed use for a
|
/// isSafeElementUse - Check to see if this use is an allowed use for a
|
||||||
/// getelementptr instruction of an array aggregate allocation. isFirstElt
|
/// getelementptr instruction of an array aggregate allocation. isFirstElt
|
||||||
|
Loading…
x
Reference in New Issue
Block a user