Revert the optimization in r122596. It is correct for all current targets, but

it relies on assumptions that may not be true in the future.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122608 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Cameron Zwarich 2010-12-28 23:02:56 +00:00
parent f661277a9b
commit 438e25ccdc

View File

@ -472,9 +472,16 @@ StrongPHIElimination::SplitInterferencesForBasicBlock(
for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE; ++BBI) {
for (MachineInstr::const_mop_iterator I = BBI->operands_begin(),
E = BBI->operands_end(); I != E && I->isReg() && I->isDef(); ++I) {
E = BBI->operands_end(); I != E; ++I) {
const MachineOperand& MO = *I;
// FIXME: This would be faster if it were possible to bail out of checking
// an instruction's operands after the explicit defs, but this is incorrect
// for variadic instructions, which may appear before register allocation
// in the future.
if (!MO.isReg() || !MO.isDef())
continue;
unsigned DestReg = MO.getReg();
if (!DestReg || !TargetRegisterInfo::isVirtualRegister(DestReg))
continue;