mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-08-07 12:28:24 +00:00
Refactor some code out of ConvertUsesToScalar into their own methods, no
functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47751 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -118,6 +118,10 @@ namespace {
|
||||
const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
|
||||
void ConvertToScalar(AllocationInst *AI, const Type *Ty);
|
||||
void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
|
||||
Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
|
||||
unsigned Offset);
|
||||
Value *ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
|
||||
unsigned Offset);
|
||||
static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
|
||||
};
|
||||
|
||||
@@ -1071,25 +1075,97 @@ void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
|
||||
/// Offset is an offset from the original alloca, in bits that need to be
|
||||
/// shifted to the right. By the end of this, there should be no uses of Ptr.
|
||||
void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
while (!Ptr->use_empty()) {
|
||||
Instruction *User = cast<Instruction>(Ptr->use_back());
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
Value *NV = ConvertUsesOfLoadToScalar(LI, NewAI, Offset);
|
||||
LI->replaceAllUsesWith(NV);
|
||||
LI->eraseFromParent();
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
|
||||
assert(SI->getOperand(0) != Ptr && "Consistency error!");
|
||||
|
||||
Value *SV = ConvertUsesOfStoreToScalar(SI, NewAI, Offset);
|
||||
new StoreInst(SV, NewAI, SI);
|
||||
SI->eraseFromParent();
|
||||
|
||||
} else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
|
||||
ConvertUsesToScalar(CI, NewAI, Offset);
|
||||
CI->eraseFromParent();
|
||||
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
|
||||
const PointerType *AggPtrTy =
|
||||
cast<PointerType>(GEP->getOperand(0)->getType());
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
unsigned AggSizeInBits =
|
||||
TD.getABITypeSizeInBits(AggPtrTy->getElementType());
|
||||
|
||||
// Check to see if this is stepping over an element: GEP Ptr, int C
|
||||
unsigned NewOffset = Offset;
|
||||
if (GEP->getNumOperands() == 2) {
|
||||
unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
|
||||
unsigned BitOffset = Idx*AggSizeInBits;
|
||||
|
||||
NewOffset += BitOffset;
|
||||
} else if (GEP->getNumOperands() == 3) {
|
||||
// We know that operand #2 is zero.
|
||||
unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
|
||||
const Type *AggTy = AggPtrTy->getElementType();
|
||||
if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
|
||||
unsigned ElSizeBits =
|
||||
TD.getABITypeSizeInBits(SeqTy->getElementType());
|
||||
|
||||
NewOffset += ElSizeBits*Idx;
|
||||
} else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
|
||||
unsigned EltBitOffset =
|
||||
TD.getStructLayout(STy)->getElementOffsetInBits(Idx);
|
||||
|
||||
NewOffset += EltBitOffset;
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
ConvertUsesToScalar(GEP, NewAI, NewOffset);
|
||||
GEP->eraseFromParent();
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// ConvertUsesOfLoadToScalar - Convert all of the users the specified load to
|
||||
/// use the new alloca directly, returning the value that should replace the
|
||||
/// load. This happens when we are converting an "integer union" to a
|
||||
/// single integer scalar, or when we are converting a "vector union" to a
|
||||
/// vector with insert/extractelement instructions.
|
||||
///
|
||||
/// Offset is an offset from the original alloca, in bits that need to be
|
||||
/// shifted to the right. By the end of this, there should be no uses of Ptr.
|
||||
Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
|
||||
unsigned Offset) {
|
||||
// The load is a bit extract from NewAI shifted right by Offset bits.
|
||||
Value *NV = new LoadInst(NewAI, LI->getName(), LI);
|
||||
|
||||
if (NV->getType() == LI->getType() && Offset == 0) {
|
||||
// We win, no conversion needed.
|
||||
} else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
|
||||
return NV;
|
||||
}
|
||||
|
||||
if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
|
||||
// If the result alloca is a vector type, this is either an element
|
||||
// access or a bitcast to another vector type.
|
||||
if (isa<VectorType>(LI->getType())) {
|
||||
NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
|
||||
} else {
|
||||
// Must be an element access.
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType());
|
||||
NV = new ExtractElementInst(
|
||||
NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI);
|
||||
NV = new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt),
|
||||
"tmp", LI);
|
||||
}
|
||||
} else if (isa<PointerType>(NV->getType())) {
|
||||
assert(isa<PointerType>(LI->getType()));
|
||||
@@ -1102,6 +1178,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
// If this is a big-endian system and the load is narrower than the
|
||||
// full alloca type, we need to do a shift to get the right bits.
|
||||
int ShAmt = 0;
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
if (TD.isBigEndian()) {
|
||||
// On big-endian machines, the lowest bit is stored at the bit offset
|
||||
// from the pointer given by getTypeStoreSizeInBits. This matters for
|
||||
@@ -1141,10 +1218,20 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
|
||||
}
|
||||
}
|
||||
LI->replaceAllUsesWith(NV);
|
||||
LI->eraseFromParent();
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
|
||||
assert(SI->getOperand(0) != Ptr && "Consistency error!");
|
||||
return NV;
|
||||
}
|
||||
|
||||
|
||||
/// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store
|
||||
/// pair of the new alloca directly, returning the value that should be stored
|
||||
/// to the alloca. This happens when we are converting an "integer union" to a
|
||||
/// single integer scalar, or when we are converting a "vector union" to a
|
||||
/// vector with insert/extractelement instructions.
|
||||
///
|
||||
/// Offset is an offset from the original alloca, in bits that need to be
|
||||
/// shifted to the right. By the end of this, there should be no uses of Ptr.
|
||||
Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
|
||||
unsigned Offset) {
|
||||
|
||||
// Convert the stored type to the actual type, shift it left to insert
|
||||
// then 'or' into place.
|
||||
@@ -1161,6 +1248,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
|
||||
} else {
|
||||
// Must be an element insertion.
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType());
|
||||
SV = new InsertElementInst(Old, SV,
|
||||
ConstantInt::get(Type::Int32Ty, Elt),
|
||||
@@ -1177,6 +1265,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
// If SV is a float, convert it to the appropriate integer type.
|
||||
// If it is a pointer, do the same, and also handle ptr->ptr casts
|
||||
// here.
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
|
||||
unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
|
||||
unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
|
||||
@@ -1228,58 +1317,11 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
|
||||
}
|
||||
}
|
||||
new StoreInst(SV, NewAI, SI);
|
||||
SI->eraseFromParent();
|
||||
|
||||
} else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
|
||||
ConvertUsesToScalar(CI, NewAI, Offset);
|
||||
CI->eraseFromParent();
|
||||
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
|
||||
const PointerType *AggPtrTy =
|
||||
cast<PointerType>(GEP->getOperand(0)->getType());
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
unsigned AggSizeInBits =
|
||||
TD.getABITypeSizeInBits(AggPtrTy->getElementType());
|
||||
|
||||
// Check to see if this is stepping over an element: GEP Ptr, int C
|
||||
unsigned NewOffset = Offset;
|
||||
if (GEP->getNumOperands() == 2) {
|
||||
unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
|
||||
unsigned BitOffset = Idx*AggSizeInBits;
|
||||
|
||||
NewOffset += BitOffset;
|
||||
} else if (GEP->getNumOperands() == 3) {
|
||||
// We know that operand #2 is zero.
|
||||
unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
|
||||
const Type *AggTy = AggPtrTy->getElementType();
|
||||
if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
|
||||
unsigned ElSizeBits =
|
||||
TD.getABITypeSizeInBits(SeqTy->getElementType());
|
||||
|
||||
NewOffset += ElSizeBits*Idx;
|
||||
} else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
|
||||
unsigned EltBitOffset =
|
||||
TD.getStructLayout(STy)->getElementOffsetInBits(Idx);
|
||||
|
||||
NewOffset += EltBitOffset;
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
ConvertUsesToScalar(GEP, NewAI, NewOffset);
|
||||
GEP->eraseFromParent();
|
||||
} else {
|
||||
assert(0 && "Unsupported operation!");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
return SV;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
|
||||
/// some part of a constant global variable. This intentionally only accepts
|
||||
/// constant expressions because we don't can't rewrite arbitrary instructions.
|
||||
|
Reference in New Issue
Block a user