From aee4af68ae2016afc5b4ec0c430e539c5810a766 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Sun, 2 Dec 2007 08:30:39 +0000 Subject: [PATCH] Remove redundant foldMemoryOperand variants and other code clean up. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44517 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/LiveIntervalAnalysis.h | 3 +- include/llvm/Target/MRegisterInfo.h | 29 +--- lib/CodeGen/LiveIntervalAnalysis.cpp | 142 ++++++++++---------- lib/CodeGen/RegAllocBigBlock.cpp | 4 +- lib/CodeGen/RegAllocLocal.cpp | 4 +- lib/CodeGen/VirtRegMap.cpp | 18 +-- lib/CodeGen/VirtRegMap.h | 7 +- lib/Target/ARM/ARMRegisterInfo.cpp | 6 +- lib/Target/ARM/ARMRegisterInfo.h | 16 +-- lib/Target/Alpha/AlphaRegisterInfo.cpp | 6 +- lib/Target/Alpha/AlphaRegisterInfo.h | 16 +-- lib/Target/Mips/MipsRegisterInfo.cpp | 9 +- lib/Target/Mips/MipsRegisterInfo.h | 16 +-- lib/Target/PowerPC/PPCRegisterInfo.cpp | 7 +- lib/Target/PowerPC/PPCRegisterInfo.h | 16 +-- lib/Target/Sparc/SparcRegisterInfo.cpp | 7 +- lib/Target/Sparc/SparcRegisterInfo.h | 16 +-- lib/Target/X86/X86RegisterInfo.cpp | 87 +++++------- lib/Target/X86/X86RegisterInfo.h | 19 +-- 19 files changed, 171 insertions(+), 257 deletions(-) diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h index 5ada1ad1496..1be8ea5ea04 100644 --- a/include/llvm/CodeGen/LiveIntervalAnalysis.h +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -275,8 +275,7 @@ namespace llvm { /// returns true. bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm, MachineInstr *DefMI, unsigned InstrIdx, - unsigned OpIdx, - SmallVector &UseOps, + SmallVector &Ops, bool isSS, int Slot, unsigned Reg); /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified diff --git a/include/llvm/Target/MRegisterInfo.h b/include/llvm/Target/MRegisterInfo.h index ecbee642f42..c4d16227574 100644 --- a/include/llvm/Target/MRegisterInfo.h +++ b/include/llvm/Target/MRegisterInfo.h @@ -533,20 +533,13 @@ public: const MachineInstr *Orig) const = 0; /// foldMemoryOperand - Attempt to fold a load or store of the specified stack - /// slot into the specified machine instruction for the specified operand. If - /// this is possible, a new instruction is returned with the specified operand - /// folded, otherwise NULL is returned. The client is responsible for removing - /// the old instruction and adding the new one in the instruction stream + /// slot into the specified machine instruction for the specified operand(s). + /// If this is possible, a new instruction is returned with the specified + /// operand folded, otherwise NULL is returned. The client is responsible for + /// removing the old instruction and adding the new one in the instruction + /// stream. virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - int FrameIndex) const { - return 0; - } - - /// foldMemoryOperand - Same as previous except it tries to fold instruction - /// with multiple uses of the same register. - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, int FrameIndex) const { return 0; } @@ -555,15 +548,7 @@ public: /// of any load and store from / to any address, not just from a specific /// stack slot. virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } - - /// foldMemoryOperand - Same as previous except it tries to fold instruction - /// with multiple uses of the same register. - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 77a5505ca83..eef117af300 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -643,28 +643,32 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, /// returns true. bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm, MachineInstr *DefMI, - unsigned InstrIdx, unsigned OpIdx, - SmallVector &UseOps, + unsigned InstrIdx, + SmallVector &Ops, bool isSS, int Slot, unsigned Reg) { - // FIXME: fold subreg use - if (MI->getOperand(OpIdx).getSubReg()) - return false; - - MachineInstr *fmi = NULL; - - if (UseOps.size() < 2) - fmi = isSS ? mri_->foldMemoryOperand(MI, OpIdx, Slot) - : mri_->foldMemoryOperand(MI, OpIdx, DefMI); - else { - if (OpIdx != UseOps[0]) - // Must be two-address instruction + one more use. Not going to fold. + unsigned MRInfo = 0; + const TargetInstrDescriptor *TID = MI->getInstrDescriptor(); + SmallVector FoldOps; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + unsigned OpIdx = Ops[i]; + // FIXME: fold subreg use. + if (MI->getOperand(OpIdx).getSubReg()) return false; - // It may be possible to fold load when there are multiple uses. - // e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0 - fmi = isSS ? mri_->foldMemoryOperand(MI, UseOps, Slot) - : mri_->foldMemoryOperand(MI, UseOps, DefMI); + if (MI->getOperand(OpIdx).isDef()) + MRInfo |= (unsigned)VirtRegMap::isMod; + else { + // Filter out two-address use operand(s). + if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + MRInfo = VirtRegMap::isModRef; + continue; + } + MRInfo |= (unsigned)VirtRegMap::isRef; + } + FoldOps.push_back(OpIdx); } + MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot) + : mri_->foldMemoryOperand(MI, FoldOps, DefMI); if (fmi) { // Attempt to fold the memory reference into the instruction. If // we can do this, we don't need to insert spill code. @@ -674,7 +678,7 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, LiveVariables::transferKillDeadInfo(MI, fmi, mri_); MachineBasicBlock &MBB = *MI->getParent(); if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot)) - vrm.virtFolded(Reg, MI, OpIdx, fmi); + vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); vrm.transferSpillPts(MI, fmi); vrm.transferRestorePts(MI, fmi); mi2iMap_.erase(MI); @@ -775,28 +779,25 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit, HasUse = mop.isUse(); HasDef = mop.isDef(); - SmallVector UseOps; - if (HasUse) - UseOps.push_back(i); - std::vector UpdateOps; + SmallVector Ops; + Ops.push_back(i); for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { - if (!MI->getOperand(j).isRegister()) + const MachineOperand &MOj = MI->getOperand(j); + if (!MOj.isRegister()) continue; - unsigned RegJ = MI->getOperand(j).getReg(); + unsigned RegJ = MOj.getReg(); if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ)) continue; if (RegJ == RegI) { - UpdateOps.push_back(j); - if (MI->getOperand(j).isUse()) - UseOps.push_back(j); - HasUse |= MI->getOperand(j).isUse(); - HasDef |= MI->getOperand(j).isDef(); + Ops.push_back(j); + HasUse |= MOj.isUse(); + HasDef |= MOj.isDef(); } } if (TryFold && - tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i, - UseOps, FoldSS, FoldSlot, Reg)) { + tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, + Ops, FoldSS, FoldSlot, Reg)) { // Folding the load/store can completely change the instruction in // unpredictable ways, rescan it from the beginning. HasUse = false; @@ -814,8 +815,8 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit, mop.setReg(NewVReg); // Reuse NewVReg for other reads. - for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j) - MI->getOperand(UpdateOps[j]).setReg(NewVReg); + for (unsigned j = 0, e = Ops.size(); j != e; ++j) + MI->getOperand(Ops[j]).setReg(NewVReg); if (CreatedNewVReg) { if (DefIsReMat) { @@ -1226,7 +1227,7 @@ addIntervalsForSpills(const LiveInterval &li, if (!TrySplit) return NewLIs; - SmallVector UseOps; + SmallVector Ops; if (NeedStackSlot) { int Id = SpillMBBs.find_first(); while (Id != -1) { @@ -1236,41 +1237,43 @@ addIntervalsForSpills(const LiveInterval &li, unsigned VReg = spills[i].vreg; bool isReMat = vrm.isReMaterialized(VReg); MachineInstr *MI = getInstructionFromIndex(index); - int OpIdx = -1; - UseOps.clear(); + bool CanFold = false; + bool FoundUse = false; + Ops.clear(); if (spills[i].canFold) { + CanFold = true; for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { MachineOperand &MO = MI->getOperand(j); if (!MO.isRegister() || MO.getReg() != VReg) continue; - if (MO.isDef()) { - OpIdx = (int)j; + + Ops.push_back(j); + if (MO.isDef()) continue; - } - // Can't fold if it's two-address code and the use isn't the - // first and only use. - if (isReMat || - (UseOps.empty() && !alsoFoldARestore(Id, index, VReg, - RestoreMBBs, RestoreIdxes))) { - OpIdx = -1; + if (isReMat || + (!FoundUse && !alsoFoldARestore(Id, index, VReg, + RestoreMBBs, RestoreIdxes))) { + // MI has two-address uses of the same register. If the use + // isn't the first and only use in the BB, then we can't fold + // it. FIXME: Move this to rewriteInstructionsForSpills. + CanFold = false; break; } - UseOps.push_back(j); + FoundUse = true; } } // Fold the store into the def if possible. bool Folded = false; - if (OpIdx != -1) { - if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps, - true, Slot, VReg)) { - if (!UseOps.empty()) - // Folded a two-address instruction, do not issue a load. - eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); + if (CanFold && !Ops.empty()) { + if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ Folded = true; + if (FoundUse > 0) + // Also folded uses, do not issue a load. + eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); } } - // Else tell the spiller to issue a store for us. + // Else tell the spiller to issue a spill. if (!Folded) vrm.addSpillPoint(VReg, MI); } @@ -1287,41 +1290,40 @@ addIntervalsForSpills(const LiveInterval &li, continue; unsigned VReg = restores[i].vreg; MachineInstr *MI = getInstructionFromIndex(index); - int OpIdx = -1; - UseOps.clear(); + bool CanFold = false; + Ops.clear(); if (restores[i].canFold) { + CanFold = true; for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { MachineOperand &MO = MI->getOperand(j); if (!MO.isRegister() || MO.getReg() != VReg) continue; + if (MO.isDef()) { - // Can't fold if it's two-address code and it hasn't already - // been folded. - OpIdx = -1; + // If this restore were to be folded, it would have been folded + // already. + CanFold = false; break; } - if (UseOps.empty()) - // Use the first use index. - OpIdx = (int)j; - UseOps.push_back(j); + Ops.push_back(j); } } // Fold the load into the use if possible. bool Folded = false; - if (OpIdx != -1) { - if (vrm.isReMaterialized(VReg)) { + if (CanFold && !Ops.empty()) { + if (!vrm.isReMaterialized(VReg)) + Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); + else { MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); int LdSlot = 0; bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); // If the rematerializable def is a load, also try to fold it. if (isLoadSS || (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG)) - Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx, - UseOps, isLoadSS, LdSlot, VReg); - } else - Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps, - true, Slot, VReg); + Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, + Ops, isLoadSS, LdSlot, VReg); + } } // If folding is not possible / failed, then tell the spiller to issue a // load / rematerialization for us. diff --git a/lib/CodeGen/RegAllocBigBlock.cpp b/lib/CodeGen/RegAllocBigBlock.cpp index 7f402a62b81..227a238a86d 100644 --- a/lib/CodeGen/RegAllocBigBlock.cpp +++ b/lib/CodeGen/RegAllocBigBlock.cpp @@ -520,7 +520,9 @@ MachineInstr *RABigBlock::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI assignVirtToPhysReg(VirtReg, PhysReg); } else { // no free registers available. // try to fold the spill into the instruction - if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)) { + SmallVector Ops; + Ops.push_back(OpNum); + if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) { ++NumFolded; // Since we changed the address of MI, make sure to update live variables // to know that the new instruction has the properties of the old one. diff --git a/lib/CodeGen/RegAllocLocal.cpp b/lib/CodeGen/RegAllocLocal.cpp index 456c457a316..a666184b96e 100644 --- a/lib/CodeGen/RegAllocLocal.cpp +++ b/lib/CodeGen/RegAllocLocal.cpp @@ -473,7 +473,9 @@ MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, assignVirtToPhysReg(VirtReg, PhysReg); } else { // No registers available. // If we can fold this spill into this instruction, do so now. - if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)){ + SmallVector Ops; + Ops.push_back(OpNum); + if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) { ++NumFolded; // Since we changed the address of MI, make sure to update live variables // to know that the new instruction has the properties of the old one. diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 86db2bc50bf..a7154b2f063 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -115,7 +115,7 @@ void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { } void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, - unsigned OpNo, MachineInstr *NewMI) { + MachineInstr *NewMI, ModRef MRInfo) { // Move previous memory references folded to new instruction. MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), @@ -124,18 +124,6 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, MI2VirtMap.erase(I++); } - ModRef MRInfo; - const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor(); - if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 || - TID->findTiedToSrcOperand(OpNo) != -1) { - // Folded a two-address operand. - MRInfo = isModRef; - } else if (OldMI->getOperand(OpNo).isDef()) { - MRInfo = isMod; - } else { - MRInfo = isRef; - } - // add new memory reference MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); } @@ -830,7 +818,9 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, NewMIs.clear(); int Idx = NewMI->findRegisterUseOperandIdx(VirtReg); assert(Idx != -1); - MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS); + SmallVector Ops; + Ops.push_back(Idx); + MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Ops, SS); if (FoldedMI) { if (!VRM.hasPhys(UnfoldVR)) VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); diff --git a/lib/CodeGen/VirtRegMap.h b/lib/CodeGen/VirtRegMap.h index 7ca06bbd1a3..61a9738469c 100644 --- a/lib/CodeGen/VirtRegMap.h +++ b/lib/CodeGen/VirtRegMap.h @@ -280,10 +280,9 @@ namespace llvm { } /// @brief Updates information about the specified virtual register's value - /// folded into newMI machine instruction. The OpNum argument indicates the - /// operand number of OldMI that is folded. - void virtFolded(unsigned VirtReg, MachineInstr *OldMI, unsigned OpNum, - MachineInstr *NewMI); + /// folded into newMI machine instruction. + void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI, + ModRef MRInfo); /// @brief Updates information about the specified virtual register's value /// folded into the specified machine instruction. diff --git a/lib/Target/ARM/ARMRegisterInfo.cpp b/lib/Target/ARM/ARMRegisterInfo.cpp index b5c04bab0f3..f1665dc0b49 100644 --- a/lib/Target/ARM/ARMRegisterInfo.cpp +++ b/lib/Target/ARM/ARMRegisterInfo.cpp @@ -347,7 +347,11 @@ static bool isLowRegister(unsigned Reg) { } MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI, - unsigned OpNum, int FI) const { + SmallVectorImpl &Ops, + int FI) const { + if (Ops.size() != 1) return NULL; + + unsigned OpNum = Ops[0]; unsigned Opc = MI->getOpcode(); MachineInstr *NewMI = NULL; switch (Opc) { diff --git a/lib/Target/ARM/ARMRegisterInfo.h b/lib/Target/ARM/ARMRegisterInfo.h index 97be04f421a..ed53e4e2da5 100644 --- a/lib/Target/ARM/ARMRegisterInfo.h +++ b/lib/Target/ARM/ARMRegisterInfo.h @@ -74,22 +74,12 @@ public: void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, const MachineInstr *Orig) const; - MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, + MachineInstr* foldMemoryOperand(MachineInstr* MI, + SmallVectorImpl &Ops, int FrameIndex) const; MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, - int FrameIndex) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/Alpha/AlphaRegisterInfo.cpp b/lib/Target/Alpha/AlphaRegisterInfo.cpp index 3d1747e8cfb..b8e2c268271 100644 --- a/lib/Target/Alpha/AlphaRegisterInfo.cpp +++ b/lib/Target/Alpha/AlphaRegisterInfo.cpp @@ -153,8 +153,10 @@ void AlphaRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, } MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI, - unsigned OpNum, + SmallVectorImpl &Ops, int FrameIndex) const { + if (Ops.size() != 1) return NULL; + // Make sure this is a reg-reg copy. unsigned Opc = MI->getOpcode(); @@ -166,7 +168,7 @@ MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI, case Alpha::CPYSS: case Alpha::CPYST: if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) { - if (OpNum == 0) { // move -> store + if (Ops[0] == 0) { // move -> store unsigned InReg = MI->getOperand(1).getReg(); Opc = (Opc == Alpha::BISr) ? Alpha::STQ : ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT); diff --git a/lib/Target/Alpha/AlphaRegisterInfo.h b/lib/Target/Alpha/AlphaRegisterInfo.h index 97d3280a090..04565f7bb22 100644 --- a/lib/Target/Alpha/AlphaRegisterInfo.h +++ b/lib/Target/Alpha/AlphaRegisterInfo.h @@ -48,22 +48,12 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo { const TargetRegisterClass *RC, SmallVectorImpl &NewMIs) const; - MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum, + MachineInstr* foldMemoryOperand(MachineInstr* MI, + SmallVectorImpl &Ops, int FrameIndex) const; MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, - int FrameIndex) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index 94cf59bb8bc..37c09331d5d 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -176,8 +176,11 @@ void MipsRegisterInfo::reMaterialize(MachineBasicBlock &MBB, } MachineInstr *MipsRegisterInfo:: -foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const +foldMemoryOperand(MachineInstr* MI, + SmallVectorImpl &Ops, int FI) const { + if (Ops.size() != 1) return NULL; + MachineInstr *NewMI = NULL; switch (MI->getOpcode()) @@ -188,10 +191,10 @@ foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const (MI->getOperand(1).getReg() == Mips::ZERO) && (MI->getOperand(2).isRegister())) { - if (OpNum == 0) // COPY -> STORE + if (Ops[0] == 0) // COPY -> STORE NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI) .addImm(0).addReg(MI->getOperand(2).getReg()); - else // COPY -> LOAD + else // COPY -> LOAD NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0) .getReg()).addImm(0).addFrameIndex(FI); } diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h index 123f6e87dc8..4ebb7369756 100644 --- a/lib/Target/Mips/MipsRegisterInfo.h +++ b/lib/Target/Mips/MipsRegisterInfo.h @@ -55,22 +55,12 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo { void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, const MachineInstr *Orig) const; - MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, + MachineInstr* foldMemoryOperand(MachineInstr* MI, + SmallVectorImpl &Ops, int FrameIndex) const; MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, - int FrameIndex) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } - - MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 28c1fcba29b..09b0e515790 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -555,11 +555,14 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into /// copy instructions, turning them into load/store instructions. MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI, - unsigned OpNum, - int FrameIndex) const { + SmallVectorImpl &Ops, + int FrameIndex) const { + if (Ops.size() != 1) return NULL; + // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because // it takes more than one instruction to store it. unsigned Opc = MI->getOpcode(); + unsigned OpNum = Ops[0]; MachineInstr *NewMI = NULL; if ((Opc == PPC::OR && diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h index 3fce8924d17..8647a339195 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.h +++ b/lib/Target/PowerPC/PPCRegisterInfo.h @@ -65,22 +65,12 @@ public: /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into /// copy instructions, turning them into load/store instructions. - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, + virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, + SmallVectorImpl &Ops, int FrameIndex) const; - - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, - int FrameIndex) const { - return 0; - } - - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp index f3e2ff88600..fcd0dfcca25 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.cpp +++ b/lib/Target/Sparc/SparcRegisterInfo.cpp @@ -148,8 +148,11 @@ void SparcRegisterInfo::reMaterialize(MachineBasicBlock &MBB, } MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - int FI) const { + SmallVectorImpl &Ops, + int FI) const { + if (Ops.size() != 1) return NULL; + + unsigned OpNum = Ops[0]; bool isFloat = false; MachineInstr *NewMI = NULL; switch (MI->getOpcode()) { diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h index cecbc8a87d9..347b631ecf4 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.h +++ b/lib/Target/Sparc/SparcRegisterInfo.h @@ -59,23 +59,11 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo { unsigned DestReg, const MachineInstr *Orig) const; virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, + SmallVectorImpl &Ops, int FrameIndex) const; virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, - int FrameIndex) const { - return 0; - } - - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - MachineInstr* LoadMI) const { - return 0; - } - - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 29f401ab7bd..122dd9ed758 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -1140,73 +1140,58 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i, } -MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum, - int FrameIndex) const { - // Check switch flag - if (NoFusing) return NULL; - SmallVector MOs; - MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex)); - return foldMemoryOperand(MI, OpNum, MOs); -} - MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, int FrameIndex) const { // Check switch flag if (NoFusing) return NULL; - if (UseOps.size() == 1) - return foldMemoryOperand(MI, UseOps[0], FrameIndex); - else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1) + if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { + unsigned NewOpc = 0; + switch (MI->getOpcode()) { + default: return NULL; + case X86::TEST8rr: NewOpc = X86::CMP8ri; break; + case X86::TEST16rr: NewOpc = X86::CMP16ri; break; + case X86::TEST32rr: NewOpc = X86::CMP32ri; break; + case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; + } + // Change to CMPXXri r, 0 first. + MI->setInstrDescriptor(TII.get(NewOpc)); + MI->getOperand(1).ChangeToImmediate(0); + } else if (Ops.size() != 1) return NULL; - unsigned NewOpc = 0; - switch (MI->getOpcode()) { - default: return NULL; - case X86::TEST8rr: NewOpc = X86::CMP8ri; break; - case X86::TEST16rr: NewOpc = X86::CMP16ri; break; - case X86::TEST32rr: NewOpc = X86::CMP32ri; break; - case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; - } - // Change to CMPXXri r, 0 first. - MI->setInstrDescriptor(TII.get(NewOpc)); - MI->getOperand(1).ChangeToImmediate(0); - return foldMemoryOperand(MI, 0, FrameIndex); + SmallVector MOs; + MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex)); + return foldMemoryOperand(MI, Ops[0], MOs); } -MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum, +MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, + SmallVectorImpl &Ops, MachineInstr *LoadMI) const { // Check switch flag if (NoFusing) return NULL; + + if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { + unsigned NewOpc = 0; + switch (MI->getOpcode()) { + default: return NULL; + case X86::TEST8rr: NewOpc = X86::CMP8ri; break; + case X86::TEST16rr: NewOpc = X86::CMP16ri; break; + case X86::TEST32rr: NewOpc = X86::CMP32ri; break; + case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; + } + // Change to CMPXXri r, 0 first. + MI->setInstrDescriptor(TII.get(NewOpc)); + MI->getOperand(1).ChangeToImmediate(0); + } else if (Ops.size() != 1) + return NULL; + SmallVector MOs; unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode()); for (unsigned i = NumOps - 4; i != NumOps; ++i) MOs.push_back(LoadMI->getOperand(i)); - return foldMemoryOperand(MI, OpNum, MOs); -} - -MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, - SmallVectorImpl &UseOps, - MachineInstr *LoadMI) const { - // Check switch flag - if (NoFusing) return NULL; - - if (UseOps.size() == 1) - return foldMemoryOperand(MI, UseOps[0], LoadMI); - else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1) - return NULL; - unsigned NewOpc = 0; - switch (MI->getOpcode()) { - default: return NULL; - case X86::TEST8rr: NewOpc = X86::CMP8ri; break; - case X86::TEST16rr: NewOpc = X86::CMP16ri; break; - case X86::TEST32rr: NewOpc = X86::CMP32ri; break; - case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; - } - // Change to CMPXXri r, 0 first. - MI->setInstrDescriptor(TII.get(NewOpc)); - MI->getOperand(1).ChangeToImmediate(0); - return foldMemoryOperand(MI, 0, LoadMI); + return foldMemoryOperand(MI, Ops[0], MOs); } diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 53f08440a3f..c74d2e769df 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -133,32 +133,19 @@ public: /// foldMemoryOperand - If this target supports it, fold a load or store of /// the specified stack slot into the specified machine instruction for the - /// specified operand. If this is possible, the target should perform the + /// specified operand(s). If this is possible, the target should perform the /// folding and return true, otherwise it should return false. If it folds /// the instruction, it is likely that the MachineInstruction the iterator /// references has been changed. MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - int FrameIndex) const; - - /// foldMemoryOperand - Same as previous except it tries to fold instruction - /// with multiple uses of the same register. - MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, int FrameIndex) const; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. MachineInstr* foldMemoryOperand(MachineInstr* MI, - unsigned OpNum, - MachineInstr* LoadMI) const; - - /// foldMemoryOperand - Same as the previous version except it allows folding - /// of any load and store from / to any address, not just from a specific - /// stack slot. - MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &UseOps, + SmallVectorImpl &Ops, MachineInstr* LoadMI) const; /// getOpcodeAfterMemoryFold - Returns the opcode of the would be new