Register scavenger should process early clobber defs first. A dead early clobber def should not interfere with a normal def which happens one slot later.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@59559 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2008-11-18 22:28:38 +00:00
parent 01f63cdb04
commit 9c64bf3905
2 changed files with 53 additions and 23 deletions

View File

@ -190,47 +190,61 @@ void RegScavenger::forward() {
if (TID.isTerminator())
restoreScavengedReg();
// Process uses first.
BitVector ChangedRegs(NumPhysRegs);
bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF;
// Separate register operands into 3 classes: uses, defs, earlyclobbers.
SmallVector<const MachineOperand*, 4> UseMOs;
SmallVector<const MachineOperand*, 4> DefMOs;
SmallVector<const MachineOperand*, 4> EarlyClobberMOs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
if (!MO.isReg() || MO.getReg() == 0)
continue;
if (MO.isUse())
UseMOs.push_back(&MO);
else if (MO.isEarlyClobber())
EarlyClobberMOs.push_back(&MO);
else
DefMOs.push_back(&MO);
}
// Process uses first.
BitVector UseRegs(NumPhysRegs);
for (unsigned i = 0, e = UseMOs.size(); i != e; ++i) {
const MachineOperand &MO = *UseMOs[i];
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
if (!isUsed(Reg)) {
// Register has been scavenged. Restore it!
if (Reg != ScavengedReg)
assert(false && "Using an undefined register!");
else
if (Reg == ScavengedReg)
restoreScavengedReg();
else
assert(false && "Using an undefined register!");
}
if (MO.isKill() && !isReserved(Reg)) {
ChangedRegs.set(Reg);
UseRegs.set(Reg);
// Mark sub-registers as changed if they aren't defined in the same
// instruction.
// Mark sub-registers as used.
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
ChangedRegs.set(SubReg);
UseRegs.set(SubReg);
}
}
// Change states of all registers after all the uses are processed to guard
// against multiple uses.
setUnused(ChangedRegs);
setUnused(UseRegs);
// Process defs.
bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
// Process early clobber defs then process defs. We can have a early clobber
// that is dead, it should not conflict with a def that happens one "slot"
// (see InstrSlots in LiveIntervalAnalysis.h) later.
unsigned NumECs = EarlyClobberMOs.size();
unsigned NumDefs = DefMOs.size();
for (unsigned i = 0, e = NumECs + NumDefs; i != e; ++i) {
const MachineOperand &MO = (i < NumECs)
? *EarlyClobberMOs[i] : *DefMOs[i-NumECs];
unsigned Reg = MO.getReg();
// If it's dead upon def, then it is now free.
@ -282,7 +296,7 @@ void RegScavenger::backward() {
}
// Process uses.
BitVector ChangedRegs(NumPhysRegs);
BitVector UseRegs(NumPhysRegs);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
@ -291,14 +305,14 @@ void RegScavenger::backward() {
if (Reg == 0)
continue;
assert(isUnused(Reg) || isReserved(Reg));
ChangedRegs.set(Reg);
UseRegs.set(Reg);
// Set the sub-registers as "used".
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
ChangedRegs.set(SubReg);
UseRegs.set(SubReg);
}
setUsed(ChangedRegs);
setUsed(UseRegs);
}
void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) {

View File

@ -0,0 +1,16 @@
; RUN: llvm-as < %s | llc -march=arm -mattr=+v6,+vfp2
define hidden i64 @__muldi3(i64 %u, i64 %v) nounwind {
entry:
%0 = trunc i64 %u to i32 ; <i32> [#uses=1]
%asmtmp = tail call { i32, i32, i32, i32, i32 } asm "@ Inlined umul_ppmm\0A\09mov\09$2, $5, lsr #16\0A\09mov\09$0, $6, lsr #16\0A\09bic\09$3, $5, $2, lsl #16\0A\09bic\09$4, $6, $0, lsl #16\0A\09mul\09$1, $3, $4\0A\09mul\09$4, $2, $4\0A\09mul\09$3, $0, $3\0A\09mul\09$0, $2, $0\0A\09adds\09$3, $4, $3\0A\09addcs\09$0, $0, #65536\0A\09adds\09$1, $1, $3, lsl #16\0A\09adc\09$0, $0, $3, lsr #16", "=&r,=r,=&r,=&r,=r,r,r,~{cc}"(i32 %0, i32 0) nounwind ; <{ i32, i32, i32, i32, i32 }> [#uses=1]
%asmresult1 = extractvalue { i32, i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
%asmresult116 = zext i32 %asmresult1 to i64 ; <i64> [#uses=1]
%asmresult116.ins = or i64 0, %asmresult116 ; <i64> [#uses=1]
%1 = lshr i64 %v, 32 ; <i64> [#uses=1]
%2 = mul i64 %1, %u ; <i64> [#uses=1]
%3 = add i64 %2, 0 ; <i64> [#uses=1]
%4 = shl i64 %3, 32 ; <i64> [#uses=1]
%5 = add i64 %asmresult116.ins, %4 ; <i64> [#uses=1]
ret i64 %5
}