2010-08-09 23:59:04 +00:00
|
|
|
//===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
|
2010-01-13 00:30:23 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2010-01-13 07:59:13 +00:00
|
|
|
//
|
2010-08-09 23:59:04 +00:00
|
|
|
// Perform peephole optimizations on the machine code:
|
2010-01-13 07:59:13 +00:00
|
|
|
//
|
2010-08-09 23:59:04 +00:00
|
|
|
// - Optimize Extensions
|
2010-01-13 07:59:13 +00:00
|
|
|
//
|
2010-08-09 23:59:04 +00:00
|
|
|
// Optimization of sign / zero extension instructions. It may be extended to
|
|
|
|
// handle other instructions with similar properties.
|
|
|
|
//
|
|
|
|
// On some targets, some instructions, e.g. X86 sign / zero extension, may
|
|
|
|
// leave the source value in the lower part of the result. This optimization
|
|
|
|
// will replace some uses of the pre-extension value with uses of the
|
|
|
|
// sub-register of the results.
|
|
|
|
//
|
|
|
|
// - Optimize Comparisons
|
|
|
|
//
|
|
|
|
// Optimization of comparison instructions. For instance, in this code:
|
|
|
|
//
|
|
|
|
// sub r1, 1
|
|
|
|
// cmp r1, 0
|
|
|
|
// bz L1
|
|
|
|
//
|
|
|
|
// If the "sub" instruction all ready sets (or could be modified to set) the
|
|
|
|
// same flag that the "cmp" instruction sets and that "bz" uses, then we can
|
|
|
|
// eliminate the "cmp" instruction.
|
2012-05-11 01:30:47 +00:00
|
|
|
//
|
|
|
|
// Another instance, in this code:
|
|
|
|
//
|
|
|
|
// sub r1, r3 | sub r1, imm
|
|
|
|
// cmp r3, r1 or cmp r1, r3 | cmp r1, imm
|
|
|
|
// bge L1
|
|
|
|
//
|
|
|
|
// If the branch instruction can use flag from "sub", then we can replace
|
|
|
|
// "sub" with "subs" and eliminate the "cmp" instruction.
|
2011-03-15 05:13:13 +00:00
|
|
|
//
|
|
|
|
// - Optimize Bitcast pairs:
|
|
|
|
//
|
|
|
|
// v1 = bitcast v0
|
|
|
|
// v2 = bitcast v1
|
|
|
|
// = v2
|
|
|
|
// =>
|
|
|
|
// v1 = bitcast v0
|
|
|
|
// = v0
|
2012-02-08 21:22:43 +00:00
|
|
|
//
|
2010-01-13 07:59:13 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2010-01-13 00:30:23 +00:00
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
#define DEBUG_TYPE "peephole-opt"
|
2010-01-13 00:30:23 +00:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
|
|
|
#include "llvm/CodeGen/MachineDominators.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
2010-11-17 20:13:28 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2010-01-13 00:30:23 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2010-11-17 20:13:28 +00:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2010-01-13 00:30:23 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
// Optimize Extensions
|
|
|
|
static cl::opt<bool>
|
|
|
|
Aggressive("aggressive-ext-opt", cl::Hidden,
|
|
|
|
cl::desc("Aggressive extension optimization"));
|
2010-01-13 00:30:23 +00:00
|
|
|
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117950 91177308-0d34-0410-b5e6-96231b3b80d8
2010-11-01 20:41:43 +00:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
|
|
|
|
cl::desc("Disable the peephole optimizer"));
|
|
|
|
|
2010-08-27 20:39:09 +00:00
|
|
|
STATISTIC(NumReuse, "Number of extension results reused");
|
2011-03-15 05:13:13 +00:00
|
|
|
STATISTIC(NumBitcasts, "Number of bitcasts eliminated");
|
|
|
|
STATISTIC(NumCmps, "Number of compares eliminated");
|
2012-02-25 00:46:38 +00:00
|
|
|
STATISTIC(NumImmFold, "Number of move immediate folded");
|
2012-08-02 00:56:42 +00:00
|
|
|
STATISTIC(NumLoadFold, "Number of loads folded");
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2010-01-13 00:30:23 +00:00
|
|
|
namespace {
|
2010-08-09 23:59:04 +00:00
|
|
|
class PeepholeOptimizer : public MachineFunctionPass {
|
2010-01-13 00:30:23 +00:00
|
|
|
const TargetMachine *TM;
|
|
|
|
const TargetInstrInfo *TII;
|
2010-08-09 23:59:04 +00:00
|
|
|
MachineRegisterInfo *MRI;
|
|
|
|
MachineDominatorTree *DT; // Machine dominator tree
|
2010-01-13 00:30:23 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID; // Pass identification
|
2010-10-19 17:21:58 +00:00
|
|
|
PeepholeOptimizer() : MachineFunctionPass(ID) {
|
|
|
|
initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2010-01-13 00:30:23 +00:00
|
|
|
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF);
|
|
|
|
|
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
2010-01-13 07:59:13 +00:00
|
|
|
if (Aggressive) {
|
|
|
|
AU.addRequired<MachineDominatorTree>();
|
|
|
|
AU.addPreserved<MachineDominatorTree>();
|
|
|
|
}
|
2010-01-13 00:30:23 +00:00
|
|
|
}
|
2010-01-13 07:59:13 +00:00
|
|
|
|
|
|
|
private:
|
2012-05-01 23:21:41 +00:00
|
|
|
bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
|
|
|
|
bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
|
|
|
|
bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
|
2010-08-09 23:59:04 +00:00
|
|
|
SmallPtrSet<MachineInstr*, 8> &LocalMIs);
|
2010-11-17 20:13:28 +00:00
|
|
|
bool isMoveImmediate(MachineInstr *MI,
|
|
|
|
SmallSet<unsigned, 4> &ImmDefRegs,
|
|
|
|
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
|
2012-05-01 23:21:41 +00:00
|
|
|
bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
|
2010-11-17 20:13:28 +00:00
|
|
|
SmallSet<unsigned, 4> &ImmDefRegs,
|
|
|
|
DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
|
2012-08-02 00:56:42 +00:00
|
|
|
bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg);
|
2010-01-13 00:30:23 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
char PeepholeOptimizer::ID = 0;
|
2012-02-08 21:23:13 +00:00
|
|
|
char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
|
2010-10-12 19:48:12 +00:00
|
|
|
INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
|
|
|
|
"Peephole Optimizations", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
|
|
|
INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
|
2010-10-07 22:25:06 +00:00
|
|
|
"Peephole Optimizations", false, false)
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2012-05-01 23:21:41 +00:00
|
|
|
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
|
2010-08-09 23:59:04 +00:00
|
|
|
/// a single register and writes a single register and it does not modify the
|
|
|
|
/// source, and if the source value is preserved as a sub-register of the
|
|
|
|
/// result, then replace all reachable uses of the source with the subreg of the
|
|
|
|
/// result.
|
2012-02-08 21:22:43 +00:00
|
|
|
///
|
2010-08-09 23:59:04 +00:00
|
|
|
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
|
|
|
|
/// the code. Since this code does not currently share EXTRACTs, just ignore all
|
|
|
|
/// debug uses.
|
|
|
|
bool PeepholeOptimizer::
|
2012-05-01 23:21:41 +00:00
|
|
|
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
|
2010-08-09 23:59:04 +00:00
|
|
|
SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
|
2010-01-13 07:59:13 +00:00
|
|
|
unsigned SrcReg, DstReg, SubIdx;
|
2010-08-02 22:06:08 +00:00
|
|
|
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
|
|
|
|
return false;
|
2012-02-08 21:22:43 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(SrcReg))
|
|
|
|
return false;
|
|
|
|
|
2012-06-19 21:10:18 +00:00
|
|
|
if (MRI->hasOneNonDBGUse(SrcReg))
|
2010-08-02 22:06:08 +00:00
|
|
|
// No other uses.
|
|
|
|
return false;
|
|
|
|
|
2012-05-20 18:42:55 +00:00
|
|
|
// Ensure DstReg can get a register class that actually supports
|
|
|
|
// sub-registers. Don't change the class until we commit.
|
|
|
|
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
|
|
|
|
DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
|
|
|
|
if (!DstRC)
|
|
|
|
return false;
|
|
|
|
|
2012-06-19 21:14:34 +00:00
|
|
|
// The ext instr may be operating on a sub-register of SrcReg as well.
|
|
|
|
// PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
|
|
|
|
// register.
|
|
|
|
// If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
|
|
|
|
// SrcReg:SubIdx should be replaced.
|
|
|
|
bool UseSrcSubIdx = TM->getRegisterInfo()->
|
|
|
|
getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;
|
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
// The source has other uses. See if we can replace the other uses with use of
|
|
|
|
// the result of the extension.
|
2010-08-02 22:06:08 +00:00
|
|
|
SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
|
2012-06-19 21:10:18 +00:00
|
|
|
for (MachineRegisterInfo::use_nodbg_iterator
|
|
|
|
UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
|
2010-08-02 22:06:08 +00:00
|
|
|
UI != UE; ++UI)
|
|
|
|
ReachedBBs.insert(UI->getParent());
|
|
|
|
|
|
|
|
// Uses that are in the same BB of uses of the result of the instruction.
|
|
|
|
SmallVector<MachineOperand*, 8> Uses;
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
// Uses that the result of the instruction can reach.
|
|
|
|
SmallVector<MachineOperand*, 8> ExtendedUses;
|
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
bool ExtendLife = true;
|
2012-06-19 21:10:18 +00:00
|
|
|
for (MachineRegisterInfo::use_nodbg_iterator
|
|
|
|
UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
|
2010-08-02 22:06:08 +00:00
|
|
|
UI != UE; ++UI) {
|
|
|
|
MachineOperand &UseMO = UI.getOperand();
|
|
|
|
MachineInstr *UseMI = &*UI;
|
|
|
|
if (UseMI == MI)
|
|
|
|
continue;
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
if (UseMI->isPHI()) {
|
|
|
|
ExtendLife = false;
|
|
|
|
continue;
|
|
|
|
}
|
2010-01-13 07:59:13 +00:00
|
|
|
|
2012-06-19 21:14:34 +00:00
|
|
|
// Only accept uses of SrcReg:SubIdx.
|
|
|
|
if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
|
|
|
|
continue;
|
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
// It's an error to translate this:
|
|
|
|
//
|
|
|
|
// %reg1025 = <sext> %reg1024
|
|
|
|
// ...
|
|
|
|
// %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
|
|
|
|
//
|
|
|
|
// into this:
|
|
|
|
//
|
|
|
|
// %reg1025 = <sext> %reg1024
|
|
|
|
// ...
|
|
|
|
// %reg1027 = COPY %reg1025:4
|
|
|
|
// %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
|
|
|
|
//
|
|
|
|
// The problem here is that SUBREG_TO_REG is there to assert that an
|
|
|
|
// implicit zext occurs. It doesn't insert a zext instruction. If we allow
|
|
|
|
// the COPY here, it will give us the value after the <sext>, not the
|
|
|
|
// original value of %reg1024 before <sext>.
|
|
|
|
if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
MachineBasicBlock *UseMBB = UseMI->getParent();
|
|
|
|
if (UseMBB == MBB) {
|
|
|
|
// Local uses that come after the extension.
|
|
|
|
if (!LocalMIs.count(UseMI))
|
|
|
|
Uses.push_back(&UseMO);
|
2010-08-09 23:59:04 +00:00
|
|
|
} else if (ReachedBBs.count(UseMBB)) {
|
|
|
|
// Non-local uses where the result of the extension is used. Always
|
|
|
|
// replace these unless it's a PHI.
|
2010-08-02 22:06:08 +00:00
|
|
|
Uses.push_back(&UseMO);
|
2010-08-09 23:59:04 +00:00
|
|
|
} else if (Aggressive && DT->dominates(MBB, UseMBB)) {
|
|
|
|
// We may want to extend the live range of the extension result in order
|
|
|
|
// to replace these uses.
|
2010-08-02 22:06:08 +00:00
|
|
|
ExtendedUses.push_back(&UseMO);
|
2010-08-09 23:59:04 +00:00
|
|
|
} else {
|
2010-08-02 22:06:08 +00:00
|
|
|
// Both will be live out of the def MBB anyway. Don't extend live range of
|
|
|
|
// the extension result.
|
|
|
|
ExtendLife = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-01-13 07:59:13 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
if (ExtendLife && !ExtendedUses.empty())
|
2010-08-09 23:59:04 +00:00
|
|
|
// Extend the liveness of the extension result.
|
2010-08-02 22:06:08 +00:00
|
|
|
std::copy(ExtendedUses.begin(), ExtendedUses.end(),
|
|
|
|
std::back_inserter(Uses));
|
2010-01-13 19:16:39 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
// Now replace all uses.
|
|
|
|
bool Changed = false;
|
|
|
|
if (!Uses.empty()) {
|
|
|
|
SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
// Look for PHI uses of the extended result, we don't want to extend the
|
|
|
|
// liveness of a PHI input. It breaks all kinds of assumptions down
|
|
|
|
// stream. A PHI use is expected to be the kill of its source values.
|
2010-08-09 23:59:04 +00:00
|
|
|
for (MachineRegisterInfo::use_nodbg_iterator
|
2012-06-19 21:10:18 +00:00
|
|
|
UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
|
|
|
|
UI != UE; ++UI)
|
2010-08-02 22:06:08 +00:00
|
|
|
if (UI->isPHI())
|
|
|
|
PHIBBs.insert(UI->getParent());
|
2010-06-09 19:00:55 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
|
|
|
|
for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
|
|
|
|
MachineOperand *UseMO = Uses[i];
|
|
|
|
MachineInstr *UseMI = UseMO->getParent();
|
2010-01-13 07:59:13 +00:00
|
|
|
MachineBasicBlock *UseMBB = UseMI->getParent();
|
2010-08-02 22:06:08 +00:00
|
|
|
if (PHIBBs.count(UseMBB))
|
|
|
|
continue;
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2012-02-25 02:01:00 +00:00
|
|
|
// About to add uses of DstReg, clear DstReg's kill flags.
|
2012-05-20 18:42:55 +00:00
|
|
|
if (!Changed) {
|
2012-02-25 02:01:00 +00:00
|
|
|
MRI->clearKillFlags(DstReg);
|
2012-05-20 18:42:55 +00:00
|
|
|
MRI->constrainRegClass(DstReg, DstRC);
|
|
|
|
}
|
2012-02-25 02:01:00 +00:00
|
|
|
|
2010-08-02 22:06:08 +00:00
|
|
|
unsigned NewVR = MRI->createVirtualRegister(RC);
|
2012-06-19 21:14:34 +00:00
|
|
|
MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
|
|
|
|
TII->get(TargetOpcode::COPY), NewVR)
|
2010-08-02 22:06:08 +00:00
|
|
|
.addReg(DstReg, 0, SubIdx);
|
2012-06-19 21:14:34 +00:00
|
|
|
// SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
|
|
|
|
if (UseSrcSubIdx) {
|
|
|
|
Copy->getOperand(0).setSubReg(SubIdx);
|
|
|
|
Copy->getOperand(0).setIsUndef();
|
|
|
|
}
|
2010-08-02 22:06:08 +00:00
|
|
|
UseMO->setReg(NewVR);
|
|
|
|
++NumReuse;
|
|
|
|
Changed = true;
|
2010-01-13 07:59:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2012-05-01 23:21:41 +00:00
|
|
|
/// optimizeBitcastInstr - If the instruction is a bitcast instruction A that
|
2011-03-15 05:13:13 +00:00
|
|
|
/// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast
|
|
|
|
/// a value cross register classes), and the source is defined by another
|
|
|
|
/// bitcast instruction B. And if the register class of source of B matches
|
|
|
|
/// the register class of instruction A, then it is legal to replace all uses
|
|
|
|
/// of the def of A with source of B. e.g.
|
|
|
|
/// %vreg0<def> = VMOVSR %vreg1
|
|
|
|
/// %vreg3<def> = VMOVRS %vreg0
|
|
|
|
/// Replace all uses of vreg3 with vreg1.
|
|
|
|
|
2012-05-01 23:21:41 +00:00
|
|
|
bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI,
|
2011-03-15 05:13:13 +00:00
|
|
|
MachineBasicBlock *MBB) {
|
|
|
|
unsigned NumDefs = MI->getDesc().getNumDefs();
|
|
|
|
unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs;
|
|
|
|
if (NumDefs != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Def = 0;
|
|
|
|
unsigned Src = 0;
|
|
|
|
for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef())
|
|
|
|
Def = Reg;
|
|
|
|
else if (Src)
|
|
|
|
// Multiple sources?
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
Src = Reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Def && Src && "Malformed bitcast instruction!");
|
|
|
|
|
|
|
|
MachineInstr *DefMI = MRI->getVRegDef(Src);
|
2011-12-07 07:15:52 +00:00
|
|
|
if (!DefMI || !DefMI->isBitcast())
|
2011-03-15 05:13:13 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned SrcSrc = 0;
|
|
|
|
NumDefs = DefMI->getDesc().getNumDefs();
|
|
|
|
NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs;
|
|
|
|
if (NumDefs != 1)
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
|
|
|
|
const MachineOperand &MO = DefMI->getOperand(i);
|
|
|
|
if (!MO.isReg() || MO.isDef())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg)
|
|
|
|
continue;
|
2011-07-26 15:05:06 +00:00
|
|
|
if (!MO.isDef()) {
|
|
|
|
if (SrcSrc)
|
|
|
|
// Multiple sources?
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
SrcSrc = Reg;
|
|
|
|
}
|
2011-03-15 05:13:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MRI->replaceRegWith(Def, SrcSrc);
|
|
|
|
MRI->clearKillFlags(SrcSrc);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
++NumBitcasts;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-05-01 23:21:41 +00:00
|
|
|
/// optimizeCmpInstr - If the instruction is a compare and the previous
|
2010-08-09 23:59:04 +00:00
|
|
|
/// instruction it's comparing against all ready sets (or could be modified to
|
|
|
|
/// set) the same flag as the compare, then we can remove the comparison and use
|
|
|
|
/// the flag from the previous instruction.
|
2012-05-01 23:21:41 +00:00
|
|
|
bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
|
2011-03-15 05:13:13 +00:00
|
|
|
MachineBasicBlock *MBB) {
|
2010-08-09 23:59:04 +00:00
|
|
|
// If this instruction is a comparison against zero and isn't comparing a
|
|
|
|
// physical register, we can try to optimize it.
|
2012-06-29 21:33:59 +00:00
|
|
|
unsigned SrcReg, SrcReg2;
|
2010-09-21 12:01:15 +00:00
|
|
|
int CmpMask, CmpValue;
|
2012-06-29 21:33:59 +00:00
|
|
|
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
|
|
|
|
(SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
|
2010-08-09 23:59:04 +00:00
|
|
|
return false;
|
|
|
|
|
2010-09-11 00:13:50 +00:00
|
|
|
// Attempt to optimize the comparison instruction.
|
2012-06-29 21:33:59 +00:00
|
|
|
if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
|
2011-03-15 05:13:13 +00:00
|
|
|
++NumCmps;
|
2010-08-09 23:59:04 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-08-02 00:56:42 +00:00
|
|
|
/// isLoadFoldable - Check whether MI is a candidate for folding into a later
|
|
|
|
/// instruction. We only fold loads to virtual registers and the virtual
|
|
|
|
/// register defined has a single use.
|
|
|
|
bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
|
|
|
|
unsigned &FoldAsLoadDefReg) {
|
2012-08-02 19:37:32 +00:00
|
|
|
if (!MI->canFoldAsLoad() || !MI->mayLoad())
|
|
|
|
return false;
|
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
if (MCID.getNumDefs() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Reg = MI->getOperand(0).getReg();
|
|
|
|
// To reduce compilation time, we check MRI->hasOneUse when inserting
|
|
|
|
// loads. It should be checked when processing uses of the load, since
|
|
|
|
// uses can be removed during peephole.
|
|
|
|
if (!MI->getOperand(0).getSubReg() &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(Reg) &&
|
|
|
|
MRI->hasOneUse(Reg)) {
|
|
|
|
FoldAsLoadDefReg = Reg;
|
|
|
|
return true;
|
2012-08-02 00:56:42 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-11-17 20:13:28 +00:00
|
|
|
bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
|
|
|
|
SmallSet<unsigned, 4> &ImmDefRegs,
|
|
|
|
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
|
2011-06-28 19:10:37 +00:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
2011-12-07 07:15:52 +00:00
|
|
|
if (!MI->isMoveImmediate())
|
2010-11-17 20:13:28 +00:00
|
|
|
return false;
|
2011-06-28 19:10:37 +00:00
|
|
|
if (MCID.getNumDefs() != 1)
|
2010-11-17 20:13:28 +00:00
|
|
|
return false;
|
|
|
|
unsigned Reg = MI->getOperand(0).getReg();
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
ImmDefMIs.insert(std::make_pair(Reg, MI));
|
|
|
|
ImmDefRegs.insert(Reg);
|
|
|
|
return true;
|
|
|
|
}
|
2012-02-08 21:22:43 +00:00
|
|
|
|
2010-11-17 20:13:28 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-05-01 23:21:41 +00:00
|
|
|
/// foldImmediate - Try folding register operands that are defined by move
|
2010-11-17 20:13:28 +00:00
|
|
|
/// immediate instructions, i.e. a trivial constant folding optimization, if
|
|
|
|
/// and only if the def and use are in the same BB.
|
2012-05-01 23:21:41 +00:00
|
|
|
bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
|
2010-11-17 20:13:28 +00:00
|
|
|
SmallSet<unsigned, 4> &ImmDefRegs,
|
|
|
|
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
|
|
|
|
for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg() || MO.isDef())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
2011-01-10 02:58:51 +00:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
2010-11-17 20:13:28 +00:00
|
|
|
continue;
|
|
|
|
if (ImmDefRegs.count(Reg) == 0)
|
|
|
|
continue;
|
|
|
|
DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
|
|
|
|
assert(II != ImmDefMIs.end());
|
|
|
|
if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
|
|
|
|
++NumImmFold;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
|
2010-11-15 21:20:45 +00:00
|
|
|
if (DisablePeephole)
|
|
|
|
return false;
|
2012-02-08 21:22:43 +00:00
|
|
|
|
2010-08-09 23:59:04 +00:00
|
|
|
TM = &MF.getTarget();
|
2010-01-13 00:30:23 +00:00
|
|
|
TII = TM->getInstrInfo();
|
|
|
|
MRI = &MF.getRegInfo();
|
2010-08-09 23:59:04 +00:00
|
|
|
DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
|
2010-01-13 00:30:23 +00:00
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
SmallPtrSet<MachineInstr*, 8> LocalMIs;
|
2010-11-17 20:13:28 +00:00
|
|
|
SmallSet<unsigned, 4> ImmDefRegs;
|
|
|
|
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
|
2012-08-02 00:56:42 +00:00
|
|
|
unsigned FoldAsLoadDefReg;
|
2010-01-13 00:30:23 +00:00
|
|
|
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
|
|
|
|
MachineBasicBlock *MBB = &*I;
|
2012-02-08 21:22:43 +00:00
|
|
|
|
2010-11-17 20:13:28 +00:00
|
|
|
bool SeenMoveImm = false;
|
2010-01-13 07:59:13 +00:00
|
|
|
LocalMIs.clear();
|
2010-11-17 20:13:28 +00:00
|
|
|
ImmDefRegs.clear();
|
|
|
|
ImmDefMIs.clear();
|
2012-08-02 00:56:42 +00:00
|
|
|
FoldAsLoadDefReg = 0;
|
2010-08-09 23:59:04 +00:00
|
|
|
|
2011-02-15 05:00:24 +00:00
|
|
|
bool First = true;
|
|
|
|
MachineBasicBlock::iterator PMII;
|
2010-08-09 23:59:04 +00:00
|
|
|
for (MachineBasicBlock::iterator
|
2010-09-10 21:55:43 +00:00
|
|
|
MII = I->begin(), MIE = I->end(); MII != MIE; ) {
|
2011-02-14 21:50:37 +00:00
|
|
|
MachineInstr *MI = &*MII;
|
2010-11-15 21:20:45 +00:00
|
|
|
LocalMIs.insert(MI);
|
|
|
|
|
2012-08-02 00:56:42 +00:00
|
|
|
// If there exists an instruction which belongs to the following
|
|
|
|
// categories, we will discard the load candidate.
|
2011-01-07 21:08:26 +00:00
|
|
|
if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
|
|
|
|
MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
|
2011-02-14 21:50:37 +00:00
|
|
|
MI->hasUnmodeledSideEffects()) {
|
2012-08-02 00:56:42 +00:00
|
|
|
FoldAsLoadDefReg = 0;
|
2011-02-14 21:50:37 +00:00
|
|
|
++MII;
|
2010-11-15 21:20:45 +00:00
|
|
|
continue;
|
2011-02-14 21:50:37 +00:00
|
|
|
}
|
2012-08-02 00:56:42 +00:00
|
|
|
if (MI->mayStore() || MI->isCall())
|
|
|
|
FoldAsLoadDefReg = 0;
|
2010-11-15 21:20:45 +00:00
|
|
|
|
2011-12-07 07:15:52 +00:00
|
|
|
if (MI->isBitcast()) {
|
2012-05-01 23:21:41 +00:00
|
|
|
if (optimizeBitcastInstr(MI, MBB)) {
|
2011-03-15 05:13:13 +00:00
|
|
|
// MI is deleted.
|
2011-10-13 02:16:18 +00:00
|
|
|
LocalMIs.erase(MI);
|
2011-03-15 05:13:13 +00:00
|
|
|
Changed = true;
|
|
|
|
MII = First ? I->begin() : llvm::next(PMII);
|
|
|
|
continue;
|
2012-02-08 21:22:43 +00:00
|
|
|
}
|
2011-12-07 07:15:52 +00:00
|
|
|
} else if (MI->isCompare()) {
|
2012-05-01 23:21:41 +00:00
|
|
|
if (optimizeCmpInstr(MI, MBB)) {
|
2011-02-14 21:50:37 +00:00
|
|
|
// MI is deleted.
|
2011-10-13 02:16:18 +00:00
|
|
|
LocalMIs.erase(MI);
|
2011-02-14 21:50:37 +00:00
|
|
|
Changed = true;
|
2011-02-15 05:00:24 +00:00
|
|
|
MII = First ? I->begin() : llvm::next(PMII);
|
2011-02-14 21:50:37 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
|
2010-11-17 20:13:28 +00:00
|
|
|
SeenMoveImm = true;
|
2010-08-09 23:59:04 +00:00
|
|
|
} else {
|
2012-05-01 23:21:41 +00:00
|
|
|
Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
|
2010-11-17 20:13:28 +00:00
|
|
|
if (SeenMoveImm)
|
2012-05-01 23:21:41 +00:00
|
|
|
Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
|
2010-08-09 23:59:04 +00:00
|
|
|
}
|
2011-02-15 05:00:24 +00:00
|
|
|
|
2012-08-02 00:56:42 +00:00
|
|
|
// Check whether MI is a load candidate for folding into a later
|
|
|
|
// instruction. If MI is not a candidate, check whether we can fold an
|
|
|
|
// earlier load into MI.
|
|
|
|
if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) {
|
|
|
|
// We need to fold load after optimizeCmpInstr, since optimizeCmpInstr
|
|
|
|
// can enable folding by converting SUB to CMP.
|
|
|
|
MachineInstr *DefMI = 0;
|
|
|
|
MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
|
|
|
|
FoldAsLoadDefReg, DefMI);
|
|
|
|
if (FoldMI) {
|
|
|
|
// Update LocalMIs since we replaced MI with FoldMI and deleted DefMI.
|
|
|
|
LocalMIs.erase(MI);
|
|
|
|
LocalMIs.erase(DefMI);
|
|
|
|
LocalMIs.insert(FoldMI);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
DefMI->eraseFromParent();
|
|
|
|
++NumLoadFold;
|
|
|
|
|
|
|
|
// MI is replaced with FoldMI.
|
|
|
|
Changed = true;
|
|
|
|
PMII = FoldMI;
|
|
|
|
MII = llvm::next(PMII);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2011-02-15 05:00:24 +00:00
|
|
|
First = false;
|
2011-02-14 21:50:37 +00:00
|
|
|
PMII = MII;
|
|
|
|
++MII;
|
2010-01-13 00:30:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|