X86 Peephole: fold loads to the source register operand if possible.

Add more comments and use early returns to reduce nesting in isLoadFoldable.
Also disable folding for V_SET0 to avoid introducing a const pool entry and
a const pool load.

rdar://10554090 and rdar://11873276


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@161207 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Manman Ren
2012-08-02 19:37:32 +00:00
parent 1de266be13
commit 127eea87d6
5 changed files with 67 additions and 38 deletions

View File

@@ -391,20 +391,21 @@ bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
/// register defined has a single use.
bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
unsigned &FoldAsLoadDefReg) {
if (MI->canFoldAsLoad()) {
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.getNumDefs() == 1) {
unsigned Reg = MI->getOperand(0).getReg();
// To reduce compilation time, we check MRI->hasOneUse when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI->getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
MRI->hasOneUse(Reg)) {
FoldAsLoadDefReg = Reg;
return true;
}
}
if (!MI->canFoldAsLoad() || !MI->mayLoad())
return false;
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI->getOperand(0).getReg();
// To reduce compilation time, we check MRI->hasOneUse when inserting
// loads. It should be checked when processing uses of the load, since
// uses can be removed during peephole.
if (!MI->getOperand(0).getSubReg() &&
TargetRegisterInfo::isVirtualRegister(Reg) &&
MRI->hasOneUse(Reg)) {
FoldAsLoadDefReg = Reg;
return true;
}
return false;
}