mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-08 21:32:39 +00:00
simplify code, no functionality change.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123525 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
94e8e0cfbe
commit
1a8943a1f8
@ -108,8 +108,7 @@ namespace {
|
|||||||
void EliminateMostlyEmptyBlock(BasicBlock *BB);
|
void EliminateMostlyEmptyBlock(BasicBlock *BB);
|
||||||
bool OptimizeBlock(BasicBlock &BB);
|
bool OptimizeBlock(BasicBlock &BB);
|
||||||
bool OptimizeInst(Instruction *I);
|
bool OptimizeInst(Instruction *I);
|
||||||
bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy,
|
bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy);
|
||||||
DenseMap<Value*,Value*> &SunkAddrs);
|
|
||||||
bool OptimizeInlineAsmInst(CallInst *CS);
|
bool OptimizeInlineAsmInst(CallInst *CS);
|
||||||
bool OptimizeCallInst(CallInst *CI);
|
bool OptimizeCallInst(CallInst *CI);
|
||||||
bool MoveExtToFormExtLoad(Instruction *I);
|
bool MoveExtToFormExtLoad(Instruction *I);
|
||||||
@ -687,8 +686,7 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
|
|||||||
/// This method is used to optimize both load/store and inline asms with memory
|
/// This method is used to optimize both load/store and inline asms with memory
|
||||||
/// operands.
|
/// operands.
|
||||||
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||||
const Type *AccessTy,
|
const Type *AccessTy) {
|
||||||
DenseMap<Value*,Value*> &SunkAddrs) {
|
|
||||||
Value *Repl = Addr;
|
Value *Repl = Addr;
|
||||||
|
|
||||||
// Try to collapse single-value PHI nodes. This is necessary to undo
|
// Try to collapse single-value PHI nodes. This is necessary to undo
|
||||||
@ -883,7 +881,7 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
|
|||||||
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
||||||
OpInfo.isIndirect) {
|
OpInfo.isIndirect) {
|
||||||
Value *OpVal = CS->getArgOperand(ArgNo++);
|
Value *OpVal = CS->getArgOperand(ArgNo++);
|
||||||
MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType(), SunkAddrs);
|
MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
|
||||||
} else if (OpInfo.Type == InlineAsm::isInput)
|
} else if (OpInfo.Type == InlineAsm::isInput)
|
||||||
ArgNo++;
|
ArgNo++;
|
||||||
}
|
}
|
||||||
@ -1007,8 +1005,6 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
||||||
bool MadeChange = false;
|
|
||||||
|
|
||||||
if (PHINode *P = dyn_cast<PHINode>(I)) {
|
if (PHINode *P = dyn_cast<PHINode>(I)) {
|
||||||
// It is possible for very late stage optimizations (such as SimplifyCFG)
|
// It is possible for very late stage optimizations (such as SimplifyCFG)
|
||||||
// to introduce PHI nodes too late to be cleaned up. If we detect such a
|
// to introduce PHI nodes too late to be cleaned up. If we detect such a
|
||||||
@ -1017,8 +1013,12 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
|||||||
P->replaceAllUsesWith(V);
|
P->replaceAllUsesWith(V);
|
||||||
P->eraseFromParent();
|
P->eraseFromParent();
|
||||||
++NumPHIsElim;
|
++NumPHIsElim;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
||||||
// If the source of the cast is a constant, then this should have
|
// If the source of the cast is a constant, then this should have
|
||||||
// already been constant folded. The only reason NOT to constant fold
|
// already been constant folded. The only reason NOT to constant fold
|
||||||
// it is if something (e.g. LSR) was careful to place the constant
|
// it is if something (e.g. LSR) was careful to place the constant
|
||||||
@ -1028,28 +1028,33 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
|||||||
if (isa<Constant>(CI->getOperand(0)))
|
if (isa<Constant>(CI->getOperand(0)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bool Change = false;
|
if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
|
||||||
if (TLI) {
|
return true;
|
||||||
Change = OptimizeNoopCopyExpression(CI, *TLI);
|
|
||||||
MadeChange |= Change;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I))) {
|
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
|
||||||
MadeChange |= MoveExtToFormExtLoad(I);
|
bool MadeChange = MoveExtToFormExtLoad(I);
|
||||||
MadeChange |= OptimizeExtUses(I);
|
return MadeChange | OptimizeExtUses(I);
|
||||||
}
|
}
|
||||||
} else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
|
return false;
|
||||||
MadeChange |= OptimizeCmpExpression(CI);
|
}
|
||||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
|
||||||
|
if (CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||||
|
return OptimizeCmpExpression(CI);
|
||||||
|
|
||||||
|
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||||
if (TLI)
|
if (TLI)
|
||||||
MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(),
|
return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
|
||||||
SunkAddrs);
|
return false;
|
||||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
}
|
||||||
|
|
||||||
|
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
||||||
if (TLI)
|
if (TLI)
|
||||||
MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1),
|
return OptimizeMemoryInst(I, SI->getOperand(1),
|
||||||
SI->getOperand(0)->getType(),
|
SI->getOperand(0)->getType());
|
||||||
SunkAddrs);
|
return false;
|
||||||
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
|
}
|
||||||
|
|
||||||
|
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
|
||||||
if (GEPI->hasAllZeroIndices()) {
|
if (GEPI->hasAllZeroIndices()) {
|
||||||
/// The GEP operand must be a pointer, so must its result -> BitCast
|
/// The GEP operand must be a pointer, so must its result -> BitCast
|
||||||
Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
|
Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
|
||||||
@ -1057,14 +1062,16 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
|||||||
GEPI->replaceAllUsesWith(NC);
|
GEPI->replaceAllUsesWith(NC);
|
||||||
GEPI->eraseFromParent();
|
GEPI->eraseFromParent();
|
||||||
++NumGEPsElim;
|
++NumGEPsElim;
|
||||||
MadeChange = true;
|
|
||||||
OptimizeInst(NC);
|
OptimizeInst(NC);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
return false;
|
||||||
MadeChange |= OptimizeCallInst(CI);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (CallInst *CI = dyn_cast<CallInst>(I))
|
||||||
|
return OptimizeCallInst(CI);
|
||||||
|
|
||||||
return MadeChange;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// In this pass we look for GEP and cast instructions that are used
|
// In this pass we look for GEP and cast instructions that are used
|
||||||
|
Loading…
x
Reference in New Issue
Block a user