From 9c64bf3905ea338719800008c03d95a17cb26689 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 18 Nov 2008 22:28:38 +0000 Subject: [PATCH] Register scavenger should process early clobber defs first. A dead early clobber def should not interfere with a normal def which happens one slot later. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@59559 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/RegisterScavenging.cpp | 60 ++++++++++++------- .../CodeGen/ARM/2008-11-18-ScavengerAssert.ll | 16 +++++ 2 files changed, 53 insertions(+), 23 deletions(-) create mode 100644 test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index c1d7ff97e44..a7bb17dc84e 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -190,47 +190,61 @@ void RegScavenger::forward() { if (TID.isTerminator()) restoreScavengedReg(); - // Process uses first. - BitVector ChangedRegs(NumPhysRegs); + bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF; + + // Separate register operands into 3 classes: uses, defs, earlyclobbers. + SmallVector UseMOs; + SmallVector DefMOs; + SmallVector EarlyClobberMOs; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg() || !MO.isUse()) + if (!MO.isReg() || MO.getReg() == 0) continue; + if (MO.isUse()) + UseMOs.push_back(&MO); + else if (MO.isEarlyClobber()) + EarlyClobberMOs.push_back(&MO); + else + DefMOs.push_back(&MO); + } + // Process uses first. + BitVector UseRegs(NumPhysRegs); + for (unsigned i = 0, e = UseMOs.size(); i != e; ++i) { + const MachineOperand &MO = *UseMOs[i]; unsigned Reg = MO.getReg(); - if (Reg == 0) continue; if (!isUsed(Reg)) { // Register has been scavenged. Restore it! - if (Reg != ScavengedReg) - assert(false && "Using an undefined register!"); - else + if (Reg == ScavengedReg) restoreScavengedReg(); + else + assert(false && "Using an undefined register!"); } if (MO.isKill() && !isReserved(Reg)) { - ChangedRegs.set(Reg); + UseRegs.set(Reg); - // Mark sub-registers as changed if they aren't defined in the same - // instruction. + // Mark sub-registers as used. for (const unsigned *SubRegs = TRI->getSubRegisters(Reg); unsigned SubReg = *SubRegs; ++SubRegs) - ChangedRegs.set(SubReg); + UseRegs.set(SubReg); } } // Change states of all registers after all the uses are processed to guard // against multiple uses. - setUnused(ChangedRegs); + setUnused(UseRegs); - // Process defs. - bool IsImpDef = MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF; - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); - - if (!MO.isReg() || !MO.isDef()) - continue; + // Process early clobber defs then process defs. We can have a early clobber + // that is dead, it should not conflict with a def that happens one "slot" + // (see InstrSlots in LiveIntervalAnalysis.h) later. + unsigned NumECs = EarlyClobberMOs.size(); + unsigned NumDefs = DefMOs.size(); + for (unsigned i = 0, e = NumECs + NumDefs; i != e; ++i) { + const MachineOperand &MO = (i < NumECs) + ? *EarlyClobberMOs[i] : *DefMOs[i-NumECs]; unsigned Reg = MO.getReg(); // If it's dead upon def, then it is now free. @@ -282,7 +296,7 @@ void RegScavenger::backward() { } // Process uses. - BitVector ChangedRegs(NumPhysRegs); + BitVector UseRegs(NumPhysRegs); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isUse()) @@ -291,14 +305,14 @@ void RegScavenger::backward() { if (Reg == 0) continue; assert(isUnused(Reg) || isReserved(Reg)); - ChangedRegs.set(Reg); + UseRegs.set(Reg); // Set the sub-registers as "used". for (const unsigned *SubRegs = TRI->getSubRegisters(Reg); unsigned SubReg = *SubRegs; ++SubRegs) - ChangedRegs.set(SubReg); + UseRegs.set(SubReg); } - setUsed(ChangedRegs); + setUsed(UseRegs); } void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) { diff --git a/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll b/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll new file mode 100644 index 00000000000..164e9643f17 --- /dev/null +++ b/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll @@ -0,0 +1,16 @@ +; RUN: llvm-as < %s | llc -march=arm -mattr=+v6,+vfp2 + +define hidden i64 @__muldi3(i64 %u, i64 %v) nounwind { +entry: + %0 = trunc i64 %u to i32 ; [#uses=1] + %asmtmp = tail call { i32, i32, i32, i32, i32 } asm "@ Inlined umul_ppmm\0A\09mov\09$2, $5, lsr #16\0A\09mov\09$0, $6, lsr #16\0A\09bic\09$3, $5, $2, lsl #16\0A\09bic\09$4, $6, $0, lsl #16\0A\09mul\09$1, $3, $4\0A\09mul\09$4, $2, $4\0A\09mul\09$3, $0, $3\0A\09mul\09$0, $2, $0\0A\09adds\09$3, $4, $3\0A\09addcs\09$0, $0, #65536\0A\09adds\09$1, $1, $3, lsl #16\0A\09adc\09$0, $0, $3, lsr #16", "=&r,=r,=&r,=&r,=r,r,r,~{cc}"(i32 %0, i32 0) nounwind ; <{ i32, i32, i32, i32, i32 }> [#uses=1] + %asmresult1 = extractvalue { i32, i32, i32, i32, i32 } %asmtmp, 1 ; [#uses=1] + %asmresult116 = zext i32 %asmresult1 to i64 ; [#uses=1] + %asmresult116.ins = or i64 0, %asmresult116 ; [#uses=1] + %1 = lshr i64 %v, 32 ; [#uses=1] + %2 = mul i64 %1, %u ; [#uses=1] + %3 = add i64 %2, 0 ; [#uses=1] + %4 = shl i64 %3, 32 ; [#uses=1] + %5 = add i64 %asmresult116.ins, %4 ; [#uses=1] + ret i64 %5 +}