mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-17 18:31:04 +00:00
make the current instruction iterator an ivar, allowing xforms that
potentially invalidate it (like inline asm lowering) to be sunk into their proper place, cleaning up a ton of code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123523 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
62fe406dc2
commit
7579609bfe
@ -70,6 +70,11 @@ namespace {
|
|||||||
DominatorTree *DT;
|
DominatorTree *DT;
|
||||||
ProfileInfo *PFI;
|
ProfileInfo *PFI;
|
||||||
|
|
||||||
|
/// CurInstIterator - As we scan instructions optimizing them, this is the
|
||||||
|
/// next instruction to optimize. Xforms that can invalidate this should
|
||||||
|
/// update it.
|
||||||
|
BasicBlock::iterator CurInstIterator;
|
||||||
|
|
||||||
/// BackEdges - Keep a set of all the loop back edges.
|
/// BackEdges - Keep a set of all the loop back edges.
|
||||||
///
|
///
|
||||||
SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges;
|
SmallSet<std::pair<const BasicBlock*, const BasicBlock*>, 8> BackEdges;
|
||||||
@ -104,8 +109,7 @@ namespace {
|
|||||||
bool OptimizeInst(Instruction *I);
|
bool OptimizeInst(Instruction *I);
|
||||||
bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy,
|
bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy,
|
||||||
DenseMap<Value*,Value*> &SunkAddrs);
|
DenseMap<Value*,Value*> &SunkAddrs);
|
||||||
bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
bool OptimizeInlineAsmInst(CallInst *CS);
|
||||||
DenseMap<Value*,Value*> &SunkAddrs);
|
|
||||||
bool OptimizeCallInst(CallInst *CI);
|
bool OptimizeCallInst(CallInst *CI);
|
||||||
bool MoveExtToFormExtLoad(Instruction *I);
|
bool MoveExtToFormExtLoad(Instruction *I);
|
||||||
bool OptimizeExtUses(Instruction *I);
|
bool OptimizeExtUses(Instruction *I);
|
||||||
@ -605,6 +609,25 @@ protected:
|
|||||||
} // end anonymous namespace
|
} // end anonymous namespace
|
||||||
|
|
||||||
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
|
bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
|
||||||
|
BasicBlock *BB = CI->getParent();
|
||||||
|
|
||||||
|
// Lower inline assembly if we can.
|
||||||
|
// If we found an inline asm expession, and if the target knows how to
|
||||||
|
// lower it to normal LLVM code, do so now.
|
||||||
|
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
|
||||||
|
if (TLI->ExpandInlineAsm(CI)) {
|
||||||
|
// Avoid invalidating the iterator.
|
||||||
|
CurInstIterator = BB->begin();
|
||||||
|
// Avoid processing instructions out of order, which could cause
|
||||||
|
// reuse before a value is defined.
|
||||||
|
SunkAddrs.clear();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// Sink address computing for memory operands into the block.
|
||||||
|
if (OptimizeInlineAsmInst(CI))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// Lower all uses of llvm.objectsize.*
|
// Lower all uses of llvm.objectsize.*
|
||||||
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
|
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
|
||||||
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
|
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
|
||||||
@ -833,11 +856,11 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
|||||||
/// OptimizeInlineAsmInst - If there are any memory operands, use
|
/// OptimizeInlineAsmInst - If there are any memory operands, use
|
||||||
/// OptimizeMemoryInst to sink their address computing into the block when
|
/// OptimizeMemoryInst to sink their address computing into the block when
|
||||||
/// possible / profitable.
|
/// possible / profitable.
|
||||||
bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
|
||||||
DenseMap<Value*,Value*> &SunkAddrs) {
|
|
||||||
bool MadeChange = false;
|
bool MadeChange = false;
|
||||||
|
|
||||||
TargetLowering::AsmOperandInfoVector TargetConstraints = TLI->ParseConstraints(CS);
|
TargetLowering::AsmOperandInfoVector
|
||||||
|
TargetConstraints = TLI->ParseConstraints(CS);
|
||||||
unsigned ArgNo = 0;
|
unsigned ArgNo = 0;
|
||||||
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
|
||||||
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
|
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
|
||||||
@ -847,8 +870,8 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
|||||||
|
|
||||||
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
||||||
OpInfo.isIndirect) {
|
OpInfo.isIndirect) {
|
||||||
Value *OpVal = const_cast<Value *>(CS.getArgument(ArgNo++));
|
Value *OpVal = CS->getArgOperand(ArgNo++);
|
||||||
MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs);
|
MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType(), SunkAddrs);
|
||||||
} else if (OpInfo.Type == InlineAsm::isInput)
|
} else if (OpInfo.Type == InlineAsm::isInput)
|
||||||
ArgNo++;
|
ArgNo++;
|
||||||
}
|
}
|
||||||
@ -1026,13 +1049,8 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
|
|||||||
OptimizeInst(NC);
|
OptimizeInst(NC);
|
||||||
}
|
}
|
||||||
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
||||||
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
|
|
||||||
// Sink address computing for memory operands into the block.
|
|
||||||
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
|
|
||||||
} else {
|
|
||||||
MadeChange |= OptimizeCallInst(CI);
|
MadeChange |= OptimizeCallInst(CI);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return MadeChange;
|
return MadeChange;
|
||||||
}
|
}
|
||||||
@ -1057,30 +1075,15 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
|
|||||||
|
|
||||||
SunkAddrs.clear();
|
SunkAddrs.clear();
|
||||||
|
|
||||||
for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
|
CurInstIterator = BB.begin();
|
||||||
Instruction *I = BBI++;
|
for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; ) {
|
||||||
|
Instruction *I = CurInstIterator++;
|
||||||
|
|
||||||
if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
if (CallInst *CI = dyn_cast<CallInst>(I))
|
||||||
// If we found an inline asm expession, and if the target knows how to
|
|
||||||
// lower it to normal LLVM code, do so now.
|
|
||||||
if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
|
|
||||||
if (TLI->ExpandInlineAsm(CI)) {
|
|
||||||
BBI = BB.begin();
|
|
||||||
// Avoid processing instructions out of order, which could cause
|
|
||||||
// reuse before a value is defined.
|
|
||||||
SunkAddrs.clear();
|
|
||||||
} else
|
|
||||||
// Sink address computing for memory operands into the block.
|
|
||||||
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
|
|
||||||
} else {
|
|
||||||
// Other CallInst optimizations that don't need to muck with the
|
|
||||||
// enclosing iterator here.
|
|
||||||
MadeChange |= OptimizeCallInst(CI);
|
MadeChange |= OptimizeCallInst(CI);
|
||||||
}
|
else
|
||||||
} else {
|
|
||||||
MadeChange |= OptimizeInst(I);
|
MadeChange |= OptimizeInst(I);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return MadeChange;
|
return MadeChange;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user