Eliminate another common source of moves that the register allocator

shouldn't be forced to coalesce for us: folded GEP operations.  This too
fires thousands of times across the testsuite.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17947 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nate Begeman 2004-11-18 07:22:46 +00:00
parent 1f5308e5b5
commit db869aad8c

View File

@ -3834,8 +3834,6 @@ void PPC32ISel::emitGEPOperation(MachineBasicBlock *MBB,
// We now have a base register, an index register, and possibly a constant
// remainder. If the GEP is going to be folded, we try to generate the
// optimal addressing mode.
unsigned TargetReg = getReg(GEPI, MBB, IP);
unsigned basePtrReg = getReg(Src, MBB, IP);
ConstantSInt *remainder = ConstantSInt::get(Type::IntTy, constValue);
// If we are emitting this during a fold, copy the current base register to
@ -3853,14 +3851,15 @@ void PPC32ISel::emitGEPOperation(MachineBasicBlock *MBB,
indexReg = TmpReg;
remainder = 0;
}
BuildMI (*MBB, IP, PPC::OR, 2, TargetReg).addReg(basePtrReg)
.addReg(basePtrReg);
GEPMap[GEPI] = FoldedGEP(TargetReg, indexReg, remainder);
unsigned basePtrReg = getReg(Src, MBB, IP);
GEPMap[GEPI] = FoldedGEP(basePtrReg, indexReg, remainder);
return;
}
// We're not folding, so collapse the base, index, and any remainder into the
// destination register.
unsigned TargetReg = getReg(GEPI, MBB, IP);
unsigned basePtrReg = getReg(Src, MBB, IP);
if (indexReg != 0) {
unsigned TmpReg = makeAnotherReg(Type::IntTy);
BuildMI(*MBB, IP, PPC::ADD, 2, TmpReg).addReg(indexReg).addReg(basePtrReg);