2009-11-07 22:00:39 +00:00
|
|
|
//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
|
2009-07-08 16:09:28 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the Base ARM implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARM.h"
|
|
|
|
#include "ARMAddressingModes.h"
|
2009-11-07 04:04:34 +00:00
|
|
|
#include "ARMConstantPoolValue.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2009-11-02 00:10:38 +00:00
|
|
|
#include "ARMRegisterInfo.h"
|
2010-07-20 21:17:29 +00:00
|
|
|
#include "ARMGenInstrInfo.inc"
|
2009-11-08 00:15:23 +00:00
|
|
|
#include "llvm/Constants.h"
|
|
|
|
#include "llvm/Function.h"
|
|
|
|
#include "llvm/GlobalValue.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2009-11-07 04:04:34 +00:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2009-10-07 00:06:35 +00:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2010-05-22 01:47:14 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-10-07 00:06:35 +00:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2009-08-22 20:48:53 +00:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-11-02 00:10:38 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 20:10:48 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
|
|
|
|
cl::desc("Enable ARM 2-addr to 3-addr conv"));
|
|
|
|
|
2009-11-02 00:10:38 +00:00
|
|
|
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
|
|
|
|
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
|
|
|
|
Subtarget(STI) {
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
LiveVariables *LV) const {
|
2009-07-27 18:44:00 +00:00
|
|
|
// FIXME: Thumb2 support.
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
if (!EnableARM3Addr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *MI = MBBI;
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2010-06-08 22:51:23 +00:00
|
|
|
uint64_t TSFlags = MI->getDesc().TSFlags;
|
2009-07-08 16:09:28 +00:00
|
|
|
bool isPre = false;
|
|
|
|
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
|
|
|
|
default: return NULL;
|
|
|
|
case ARMII::IndexModePre:
|
|
|
|
isPre = true;
|
|
|
|
break;
|
|
|
|
case ARMII::IndexModePost:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
|
|
|
|
// operation.
|
|
|
|
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
|
|
|
|
if (MemOpc == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *UpdateMI = NULL;
|
|
|
|
MachineInstr *MemMI = NULL;
|
|
|
|
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
unsigned NumOps = TID.getNumOperands();
|
|
|
|
bool isLoad = !TID.mayStore();
|
|
|
|
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
|
|
|
|
const MachineOperand &Base = MI->getOperand(2);
|
|
|
|
const MachineOperand &Offset = MI->getOperand(NumOps-3);
|
|
|
|
unsigned WBReg = WB.getReg();
|
|
|
|
unsigned BaseReg = Base.getReg();
|
|
|
|
unsigned OffReg = Offset.getReg();
|
|
|
|
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
|
|
|
|
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
|
|
|
|
switch (AddrMode) {
|
|
|
|
default:
|
|
|
|
assert(false && "Unknown indexed op!");
|
|
|
|
return NULL;
|
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
|
|
|
|
if (OffReg == 0) {
|
2009-07-08 21:03:57 +00:00
|
|
|
if (ARM_AM::getSOImmVal(Amt) == -1)
|
2009-07-08 16:09:28 +00:00
|
|
|
// Can't encode it in a so_imm operand. This transformation will
|
|
|
|
// add more than 1 instruction. Abandon!
|
|
|
|
return NULL;
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-08 21:03:57 +00:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else if (Amt != 0) {
|
|
|
|
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
|
|
|
|
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3 : {
|
|
|
|
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
|
|
|
|
if (OffReg == 0)
|
|
|
|
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<MachineInstr*> NewMIs;
|
|
|
|
if (isPre) {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
|
|
|
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
} else {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
|
|
|
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
if (WB.isDead())
|
|
|
|
UpdateMI->getOperand(0).setIsDead();
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer LiveVariables states, kill / dead info.
|
|
|
|
if (LV) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (MO.isReg() && MO.getReg() &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
|
|
|
|
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
|
|
|
|
if (MO.isDef()) {
|
|
|
|
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
|
|
|
|
if (MO.isDead())
|
|
|
|
LV->addVirtualRegisterDead(Reg, NewMI);
|
|
|
|
}
|
|
|
|
if (MO.isUse() && MO.isKill()) {
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
// Look at the two new MI's in reverse order.
|
|
|
|
MachineInstr *NewMI = NewMIs[j];
|
|
|
|
if (!NewMI->readsRegister(Reg))
|
|
|
|
continue;
|
|
|
|
LV->addVirtualRegisterKilled(Reg, NewMI);
|
|
|
|
if (VI.removeKill(MI))
|
|
|
|
VI.Kills.push_back(NewMI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MFI->insert(MBBI, NewMIs[1]);
|
|
|
|
MFI->insert(MBBI, NewMIs[0]);
|
|
|
|
return NewMIs[0];
|
|
|
|
}
|
|
|
|
|
2010-05-22 01:47:14 +00:00
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
2010-06-02 21:53:11 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
const std::vector<CalleeSavedInfo> &CSI,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-05-22 01:47:14 +00:00
|
|
|
if (CSI.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DebugLoc DL;
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
|
|
|
|
unsigned Reg = CSI[i].getReg();
|
|
|
|
bool isKill = true;
|
|
|
|
|
|
|
|
// Add the callee-saved register as live-in unless it's LR and
|
|
|
|
// @llvm.returnaddress is called. If LR is returned for @llvm.returnaddress
|
|
|
|
// then it's already added to the function and entry block live-in sets.
|
|
|
|
if (Reg == ARM::LR) {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
if (MF.getFrameInfo()->isReturnAddressTaken() &&
|
|
|
|
MF.getRegInfo().isLiveIn(Reg))
|
|
|
|
isKill = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isKill)
|
|
|
|
MBB.addLiveIn(Reg);
|
|
|
|
|
|
|
|
// Insert the spill to the stack frame. The register is killed at the spill
|
|
|
|
//
|
2010-06-02 20:02:30 +00:00
|
|
|
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
|
2010-05-22 01:47:14 +00:00
|
|
|
storeRegToStackSlot(MBB, MI, Reg, isKill,
|
2010-06-02 20:02:30 +00:00
|
|
|
CSI[i].getFrameIdx(), RC, TRI);
|
2010-05-22 01:47:14 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
// Branch analysis.
|
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
// If the block has no terminators, it just falls into the block after it.
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
2010-04-02 01:38:09 +00:00
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
}
|
|
|
|
if (!isUnpredicatedTerminator(I))
|
2009-07-08 16:09:28 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the last instruction in the block.
|
|
|
|
MachineInstr *LastInst = I;
|
|
|
|
|
|
|
|
// If there is only one terminator instruction, process it.
|
|
|
|
unsigned LastOpc = LastInst->getOpcode();
|
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isCondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
// Block ends with fall-through condbranch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(LastInst->getOperand(1));
|
|
|
|
Cond.push_back(LastInst->getOperand(2));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true; // Can't handle indirect branch.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the instruction before it if it is a terminator.
|
|
|
|
MachineInstr *SecondLastInst = I;
|
|
|
|
|
|
|
|
// If there are three terminators, we don't know what sort of block this is.
|
|
|
|
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
|
|
|
|
return true;
|
|
|
|
|
2009-07-27 18:20:05 +00:00
|
|
|
// If the block ends with a B and a Bcc, handle it.
|
2009-07-08 16:09:28 +00:00
|
|
|
unsigned SecondLastOpc = SecondLastInst->getOpcode();
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(1));
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(2));
|
|
|
|
FBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the block ends with two unconditional branches, handle it. The second
|
|
|
|
// one is not executed, so remove it.
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ...likewise if it ends with a branch table followed by an unconditional
|
|
|
|
// branch. The branch folder can create these, and we must get rid of them for
|
|
|
|
// correctness of Thumb constant islands.
|
2009-10-28 18:26:41 +00:00
|
|
|
if ((isJumpTableBranchOpcode(SecondLastOpc) ||
|
|
|
|
isIndirectBranchOpcode(SecondLastOpc)) &&
|
2009-07-27 18:20:05 +00:00
|
|
|
isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, can't handle this.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
if (I == MBB.begin()) return 0;
|
|
|
|
--I;
|
2010-04-02 01:38:09 +00:00
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return 0;
|
|
|
|
--I;
|
|
|
|
}
|
2009-07-27 18:20:05 +00:00
|
|
|
if (!isUncondBranchOpcode(I->getOpcode()) &&
|
|
|
|
!isCondBranchOpcode(I->getOpcode()))
|
2009-07-08 16:09:28 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
I = MBB.end();
|
|
|
|
|
|
|
|
if (I == MBB.begin()) return 1;
|
|
|
|
--I;
|
2009-07-27 18:20:05 +00:00
|
|
|
if (!isCondBranchOpcode(I->getOpcode()))
|
2009-07-08 16:09:28 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
2010-06-17 22:43:56 +00:00
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
DebugLoc DL) const {
|
2009-07-28 05:48:47 +00:00
|
|
|
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
|
|
|
|
int BOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
|
|
|
|
int BccOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
|
|
|
"ARM branch conditions have two components!");
|
|
|
|
|
|
|
|
if (FBB == 0) {
|
|
|
|
if (Cond.empty()) // Unconditional branch?
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
|
2009-07-08 16:09:28 +00:00
|
|
|
else
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two-way conditional branch.
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
|
2009-07-08 16:09:28 +00:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
|
|
|
|
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
PredicateInstruction(MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(Opc)) {
|
|
|
|
MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
|
2009-07-08 16:09:28 +00:00
|
|
|
MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
|
|
|
|
MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1) {
|
|
|
|
MachineOperand &PMO = MI->getOperand(PIdx);
|
|
|
|
PMO.setImm(Pred[0].getImm());
|
|
|
|
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred2) const {
|
|
|
|
if (Pred1.size() > 2 || Pred2.size() > 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
|
|
|
|
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
|
|
|
|
if (CC1 == CC2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (CC1) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARMCC::AL:
|
|
|
|
return true;
|
|
|
|
case ARMCC::HS:
|
|
|
|
return CC2 == ARMCC::HI;
|
|
|
|
case ARMCC::LS:
|
|
|
|
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
|
|
|
|
case ARMCC::GE:
|
|
|
|
return CC2 == ARMCC::GT;
|
|
|
|
case ARMCC::LE:
|
|
|
|
return CC2 == ARMCC::LT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
|
|
|
|
std::vector<MachineOperand> &Pred) const {
|
2009-08-08 03:20:32 +00:00
|
|
|
// FIXME: This confuses implicit_def with optional CPSR def.
|
2009-07-08 16:09:28 +00:00
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Found = false;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (MO.isReg() && MO.getReg() == ARM::CPSR) {
|
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2009-11-21 06:21:52 +00:00
|
|
|
/// isPredicable - Return true if the specified instruction can be predicated.
|
|
|
|
/// By default, this returns true for every instruction with a
|
|
|
|
/// PredicateOperand.
|
|
|
|
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
if (!TID.isPredicable())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
|
|
|
|
ARMFunctionInfo *AFI =
|
|
|
|
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
|
2009-11-24 08:06:15 +00:00
|
|
|
return AFI->isThumb2Function();
|
2009-11-21 06:21:52 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2009-12-03 06:58:32 +00:00
|
|
|
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
|
|
|
|
DISABLE_INLINE
|
2009-07-08 16:09:28 +00:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
2009-12-03 06:58:32 +00:00
|
|
|
unsigned JTI);
|
2009-07-08 16:09:28 +00:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
|
|
|
unsigned JTI) {
|
2009-12-03 06:58:32 +00:00
|
|
|
assert(JTI < JT.size());
|
2009-07-08 16:09:28 +00:00
|
|
|
return JT[JTI].MBBs.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetInstSize - Return the size of the specified MachineInstr.
|
|
|
|
///
|
|
|
|
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
const MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
const MachineFunction *MF = MBB.getParent();
|
2009-08-22 21:43:10 +00:00
|
|
|
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
|
2009-07-08 16:09:28 +00:00
|
|
|
|
|
|
|
// Basic size info comes from the TSFlags field.
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
2010-06-08 22:51:23 +00:00
|
|
|
uint64_t TSFlags = TID.TSFlags;
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2009-07-31 22:22:22 +00:00
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-08 16:09:28 +00:00
|
|
|
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
|
|
|
|
default: {
|
|
|
|
// If this machine instr is an inline asm, measure it.
|
|
|
|
if (MI->getOpcode() == ARM::INLINEASM)
|
2009-08-22 21:43:10 +00:00
|
|
|
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->isLabel())
|
|
|
|
return 0;
|
2009-07-31 22:22:22 +00:00
|
|
|
switch (Opc) {
|
2009-07-08 16:09:28 +00:00
|
|
|
default:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("Unknown or unset size field for instr!");
|
2010-02-09 19:54:29 +00:00
|
|
|
case TargetOpcode::IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::KILL:
|
2010-07-16 22:20:36 +00:00
|
|
|
case TargetOpcode::PROLOG_LABEL:
|
2010-02-09 19:54:29 +00:00
|
|
|
case TargetOpcode::EH_LABEL:
|
2010-04-07 19:51:44 +00:00
|
|
|
case TargetOpcode::DBG_VALUE:
|
2009-07-08 16:09:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-07-24 18:20:44 +00:00
|
|
|
case ARMII::Size8Bytes: return 8; // ARM instruction x 2.
|
|
|
|
case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction.
|
|
|
|
case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARMII::SizeSpecial: {
|
2009-07-31 22:22:22 +00:00
|
|
|
switch (Opc) {
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARM::CONSTPOOL_ENTRY:
|
|
|
|
// If this machine instr is a constant pool entry, its size is recorded as
|
|
|
|
// operand #2.
|
|
|
|
return MI->getOperand(2).getImm();
|
2010-05-22 01:06:18 +00:00
|
|
|
case ARM::Int_eh_sjlj_longjmp:
|
|
|
|
return 16;
|
|
|
|
case ARM::tInt_eh_sjlj_longjmp:
|
|
|
|
return 10;
|
2009-07-24 18:20:44 +00:00
|
|
|
case ARM::Int_eh_sjlj_setjmp:
|
2010-04-28 20:33:09 +00:00
|
|
|
case ARM::Int_eh_sjlj_setjmp_nofp:
|
2010-05-27 23:49:24 +00:00
|
|
|
return 20;
|
2009-12-01 18:10:36 +00:00
|
|
|
case ARM::tInt_eh_sjlj_setjmp:
|
2009-08-11 19:42:21 +00:00
|
|
|
case ARM::t2Int_eh_sjlj_setjmp:
|
2010-04-28 20:33:09 +00:00
|
|
|
case ARM::t2Int_eh_sjlj_setjmp_nofp:
|
2010-05-27 23:49:24 +00:00
|
|
|
return 12;
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARM::BR_JTr:
|
|
|
|
case ARM::BR_JTm:
|
|
|
|
case ARM::BR_JTadd:
|
2009-07-31 22:22:22 +00:00
|
|
|
case ARM::tBR_JTr:
|
2009-07-31 18:28:05 +00:00
|
|
|
case ARM::t2BR_JT:
|
|
|
|
case ARM::t2TBB:
|
|
|
|
case ARM::t2TBH: {
|
2009-07-08 16:09:28 +00:00
|
|
|
// These are jumptable branches, i.e. a branch followed by an inlined
|
2009-07-31 18:28:05 +00:00
|
|
|
// jumptable. The size is 4 + 4 * number of entries. For TBB, each
|
|
|
|
// entry is one byte; TBH two byte each.
|
2009-07-31 22:22:22 +00:00
|
|
|
unsigned EntrySize = (Opc == ARM::t2TBB)
|
|
|
|
? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
|
2009-07-08 16:09:28 +00:00
|
|
|
unsigned NumOps = TID.getNumOperands();
|
|
|
|
MachineOperand JTOP =
|
|
|
|
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
|
|
|
|
unsigned JTI = JTOP.getIndex();
|
|
|
|
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
|
2010-01-25 23:22:00 +00:00
|
|
|
assert(MJTI != 0);
|
2009-07-08 16:09:28 +00:00
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
assert(JTI < JT.size());
|
|
|
|
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
|
|
|
|
// 4 aligned. The assembler / linker may add 2 byte padding just before
|
|
|
|
// the JT entries. The size does not include this padding; the
|
|
|
|
// constant islands pass does separate bookkeeping for it.
|
|
|
|
// FIXME: If we know the size of the function is less than (1 << 16) *2
|
|
|
|
// bytes, we can use 16-bit entries instead. Then there won't be an
|
|
|
|
// alignment issue.
|
2009-08-01 06:13:52 +00:00
|
|
|
unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
|
|
|
|
unsigned NumEntries = getNumJTEntries(JT, JTI);
|
|
|
|
if (Opc == ARM::t2TBB && (NumEntries & 1))
|
|
|
|
// Make sure the instruction that follows TBB is 2-byte aligned.
|
|
|
|
// FIXME: Constant island pass should insert an "ALIGN" instruction
|
|
|
|
// instead.
|
|
|
|
++NumEntries;
|
|
|
|
return NumEntries * EntrySize + InstSize;
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
// Otherwise, pseudo-instruction sizes are zero.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0; // Not reached
|
|
|
|
}
|
|
|
|
|
2009-08-11 15:33:49 +00:00
|
|
|
unsigned
|
2009-07-08 16:09:28 +00:00
|
|
|
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
2009-07-27 00:24:36 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::LDR:
|
|
|
|
case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::tRestore:
|
2009-07-24 00:16:18 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
2009-11-09 00:11:35 +00:00
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
2009-07-27 00:24:36 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::STR:
|
|
|
|
case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
|
|
|
case ARM::t2STRi12:
|
|
|
|
case ARM::tSpill:
|
2009-07-24 00:16:18 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
2009-11-09 00:11:35 +00:00
|
|
|
case ARM::VSTRD:
|
|
|
|
case ARM::VSTRS:
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
2009-07-27 00:24:36 +00:00
|
|
|
break;
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-07-11 06:33:54 +00:00
|
|
|
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I, DebugLoc DL,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
bool KillSrc) const {
|
|
|
|
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
|
|
|
|
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
|
|
|
|
|
|
|
|
if (GPRDest && GPRSrc) {
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))));
|
|
|
|
return;
|
2009-08-05 21:02:22 +00:00
|
|
|
}
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-07-11 06:33:54 +00:00
|
|
|
bool SPRDest = ARM::SPRRegClass.contains(DestReg);
|
|
|
|
bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
|
|
|
|
|
|
|
|
unsigned Opc;
|
|
|
|
if (SPRDest && SPRSrc)
|
|
|
|
Opc = ARM::VMOVS;
|
|
|
|
else if (GPRDest && SPRSrc)
|
|
|
|
Opc = ARM::VMOVRS;
|
|
|
|
else if (SPRDest && GPRSrc)
|
|
|
|
Opc = ARM::VMOVSR;
|
|
|
|
else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQ;
|
|
|
|
else if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQQ;
|
|
|
|
else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQQQQ;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
|
|
|
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
|
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
if (Opc != ARM::VMOVQQ && Opc != ARM::VMOVQQQQ)
|
|
|
|
AddDefaultPred(MIB);
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
2010-05-07 00:24:52 +00:00
|
|
|
static const
|
|
|
|
MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
|
|
|
|
unsigned Reg, unsigned SubIdx, unsigned State,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
if (!SubIdx)
|
|
|
|
return MIB.addReg(Reg, State);
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
|
|
|
|
return MIB.addReg(Reg, State, SubIdx);
|
|
|
|
}
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned SrcReg, bool isKill, int FI,
|
2010-05-06 19:06:44 +00:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-02 20:16:16 +00:00
|
|
|
DebugLoc DL;
|
2009-07-08 16:09:28 +00:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 00:27:19 +00:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 00:06:35 +00:00
|
|
|
|
|
|
|
MachineMemOperand *MMO =
|
2009-10-18 18:16:27 +00:00
|
|
|
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineMemOperand::MOStore, 0,
|
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 00:27:19 +00:00
|
|
|
Align);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-02-16 22:01:59 +00:00
|
|
|
// tGPR is used sometimes in ARM instructions that need to avoid using
|
Many Thumb2 instructions can reference the full ARM register set (i.e.,
have 4 bits per register in the operand encoding), but have undefined
behavior when the operand value is 13 or 15 (SP and PC, respectively).
The trivial coalescer in linear scan sometimes will merge a copy from
SP into a subsequent instruction which uses the copy, and if that
instruction cannot legally reference SP, we get bad code such as:
mls r0,r9,r0,sp
instead of:
mov r2, sp
mls r0, r9, r0, r2
This patch adds a new register class for use by Thumb2 that excludes
the problematic registers (SP and PC) and is used instead of GPR
for those operands which cannot legally reference PC or SP. The
trivial coalescer explicitly requires that the register class
of the destination for the COPY instruction contain the source
register for the COPY to be considered for coalescing. This prevents
errant instructions like that above.
PR7499
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@109842 91177308-0d34-0410-b5e6-96231b3b80d8
2010-07-30 02:41:01 +00:00
|
|
|
// certain registers. Just treat it as GPR here. Likewise, rGPR.
|
|
|
|
if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass
|
|
|
|
|| RC == ARM::rGPRRegisterClass)
|
2010-02-16 22:01:59 +00:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2010-06-18 21:32:42 +00:00
|
|
|
switch (RC->getID()) {
|
|
|
|
case ARM::GPRRegClassID:
|
2009-07-27 03:14:20 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::SPRRegClassID:
|
2010-05-06 01:34:11 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::DPRRegClassID:
|
|
|
|
case ARM::DPR_VFP2RegClassID:
|
|
|
|
case ARM::DPR_8RegClassID:
|
2009-11-09 00:11:35 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QPRRegClassID:
|
|
|
|
case ARM::QPR_VFP2RegClassID:
|
|
|
|
case ARM::QPR_8RegClassID:
|
2010-09-08 00:26:59 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
|
2010-05-13 01:12:06 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
|
2010-07-06 21:26:18 +00:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 01:12:06 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
} else {
|
2010-05-13 01:12:06 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQ))
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
|
2010-05-13 01:12:06 +00:00
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQPRRegClassID:
|
|
|
|
case ARM::QQPR_VFP2RegClassID:
|
2010-05-07 02:04:02 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2010-05-14 02:13:41 +00:00
|
|
|
// FIXME: It's possible to only store part of the QQ register if the
|
|
|
|
// spilled def has a sub-register index.
|
2010-07-08 17:44:00 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST1d64Q))
|
2010-07-06 21:26:18 +00:00
|
|
|
.addFrameIndex(FI).addImm(16);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
2010-05-07 02:04:02 +00:00
|
|
|
AddDefaultPred(MIB.addMemOperand(MMO));
|
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
|
2010-05-07 02:04:02 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
2010-05-07 02:04:02 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQQQPRRegClassID: {
|
2010-05-14 02:13:41 +00:00
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
|
2010-05-14 02:13:41 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, int FI,
|
2010-05-06 19:06:44 +00:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-02 20:16:16 +00:00
|
|
|
DebugLoc DL;
|
2009-07-08 16:09:28 +00:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 00:27:19 +00:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineMemOperand *MMO =
|
2009-10-18 18:16:27 +00:00
|
|
|
MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineMemOperand::MOLoad, 0,
|
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 00:27:19 +00:00
|
|
|
Align);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-02-16 22:01:59 +00:00
|
|
|
// tGPR is used sometimes in ARM instructions that need to avoid using
|
|
|
|
// certain registers. Just treat it as GPR here.
|
Many Thumb2 instructions can reference the full ARM register set (i.e.,
have 4 bits per register in the operand encoding), but have undefined
behavior when the operand value is 13 or 15 (SP and PC, respectively).
The trivial coalescer in linear scan sometimes will merge a copy from
SP into a subsequent instruction which uses the copy, and if that
instruction cannot legally reference SP, we get bad code such as:
mls r0,r9,r0,sp
instead of:
mov r2, sp
mls r0, r9, r0, r2
This patch adds a new register class for use by Thumb2 that excludes
the problematic registers (SP and PC) and is used instead of GPR
for those operands which cannot legally reference PC or SP. The
trivial coalescer explicitly requires that the register class
of the destination for the COPY instruction contain the source
register for the COPY to be considered for coalescing. This prevents
errant instructions like that above.
PR7499
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@109842 91177308-0d34-0410-b5e6-96231b3b80d8
2010-07-30 02:41:01 +00:00
|
|
|
if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass
|
|
|
|
|| RC == ARM::rGPRRegisterClass)
|
2010-02-16 22:01:59 +00:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2010-06-18 21:32:42 +00:00
|
|
|
switch (RC->getID()) {
|
|
|
|
case ARM::GPRRegClassID:
|
2009-07-27 03:14:20 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::SPRRegClassID:
|
2010-05-06 01:34:11 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::DPRRegClassID:
|
|
|
|
case ARM::DPR_VFP2RegClassID:
|
|
|
|
case ARM::DPR_8RegClassID:
|
2009-11-09 00:11:35 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QPRRegClassID:
|
|
|
|
case ARM::QPR_VFP2RegClassID:
|
|
|
|
case ARM::QPR_8RegClassID:
|
2010-09-08 00:26:59 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
|
2010-05-13 01:12:06 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
|
2010-07-06 21:26:18 +00:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 01:12:06 +00:00
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
} else {
|
2010-05-13 01:12:06 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg)
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
|
2010-05-13 01:12:06 +00:00
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQPRRegClassID:
|
|
|
|
case ARM::QQPR_VFP2RegClassID:
|
2010-05-07 02:04:02 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2010-07-08 17:44:00 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD1d64Q));
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
2010-07-06 21:26:18 +00:00
|
|
|
AddDefaultPred(MIB.addFrameIndex(FI).addImm(16).addMemOperand(MMO));
|
2010-05-07 02:04:02 +00:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
|
2010-05-07 02:04:02 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
2010-05-07 02:04:02 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQQQPRRegClassID: {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
|
|
|
|
.addFrameIndex(FI)
|
2010-08-27 23:18:17 +00:00
|
|
|
.addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)))
|
2010-06-18 21:32:42 +00:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
|
|
|
|
AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-26 07:39:25 +00:00
|
|
|
MachineInstr*
|
|
|
|
ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
|
2010-04-29 01:13:30 +00:00
|
|
|
int FrameIx, uint64_t Offset,
|
2010-04-26 07:39:25 +00:00
|
|
|
const MDNode *MDPtr,
|
|
|
|
DebugLoc DL) const {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
|
|
|
|
.addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
|
|
|
|
return &*MIB;
|
|
|
|
}
|
|
|
|
|
2010-01-06 23:47:07 +00:00
|
|
|
/// Create a copy of a const pool value. Update CPI to the new index and return
|
|
|
|
/// the label UID.
|
|
|
|
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
|
|
|
MachineConstantPool *MCP = MF.getConstantPool();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
|
|
|
|
assert(MCPE.isMachineConstantPoolEntry() &&
|
|
|
|
"Expecting a machine constantpool entry!");
|
|
|
|
ARMConstantPoolValue *ACPV =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
|
|
|
|
|
|
|
|
unsigned PCLabelId = AFI->createConstPoolEntryUId();
|
|
|
|
ARMConstantPoolValue *NewCPV = 0;
|
2010-09-10 21:38:22 +00:00
|
|
|
// FIXME: The below assumes PIC relocation model and that the function
|
|
|
|
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
|
|
|
|
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
|
|
|
|
// instructions, so that's probably OK, but is PIC always correct when
|
|
|
|
// we get here?
|
2010-01-06 23:47:07 +00:00
|
|
|
if (ACPV->isGlobalValue())
|
|
|
|
NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
|
|
|
|
ARMCP::CPValue, 4);
|
|
|
|
else if (ACPV->isExtSymbol())
|
|
|
|
NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
|
|
|
|
ACPV->getSymbol(), PCLabelId, 4);
|
|
|
|
else if (ACPV->isBlockAddress())
|
|
|
|
NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
|
|
|
|
ARMCP::CPBlockAddress, 4);
|
2010-09-10 21:38:22 +00:00
|
|
|
else if (ACPV->isLSDA())
|
|
|
|
NewCPV = new ARMConstantPoolValue(MF.getFunction(), PCLabelId,
|
|
|
|
ARMCP::CPLSDA, 4);
|
2010-01-06 23:47:07 +00:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected ARM constantpool value type!!");
|
|
|
|
CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
|
|
|
|
return PCLabelId;
|
|
|
|
}
|
|
|
|
|
2009-11-08 00:15:23 +00:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, unsigned SubIdx,
|
2009-11-14 02:55:43 +00:00
|
|
|
const MachineInstr *Orig,
|
2010-06-02 22:47:25 +00:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2009-11-08 00:15:23 +00:00
|
|
|
unsigned Opcode = Orig->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default: {
|
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2010-06-02 22:47:25 +00:00
|
|
|
MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
2009-11-08 00:15:23 +00:00
|
|
|
MBB.insert(I, MI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
2010-01-06 23:47:07 +00:00
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
2009-11-08 00:15:23 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
|
|
|
|
DestReg)
|
|
|
|
.addConstantPoolIndex(CPI).addImm(PCLabelId);
|
|
|
|
(*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-06 23:47:07 +00:00
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
|
|
|
|
MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
|
|
|
|
switch(Orig->getOpcode()) {
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
|
|
|
Orig->getOperand(1).setIndex(CPI);
|
|
|
|
Orig->getOperand(2).setImm(PCLabelId);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2010-03-03 01:44:33 +00:00
|
|
|
bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
|
|
|
|
const MachineInstr *MI1) const {
|
2009-11-07 04:04:34 +00:00
|
|
|
int Opcode = MI0->getOpcode();
|
2009-11-20 02:10:27 +00:00
|
|
|
if (Opcode == ARM::t2LDRpci ||
|
|
|
|
Opcode == ARM::t2LDRpci_pic ||
|
|
|
|
Opcode == ARM::tLDRpci ||
|
|
|
|
Opcode == ARM::tLDRpci_pic) {
|
2009-11-07 04:04:34 +00:00
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(1);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(1);
|
|
|
|
if (MO0.getOffset() != MO1.getOffset())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineFunction *MF = MI0->getParent()->getParent();
|
|
|
|
const MachineConstantPool *MCP = MF->getConstantPool();
|
|
|
|
int CPI0 = MO0.getIndex();
|
|
|
|
int CPI1 = MO1.getIndex();
|
|
|
|
const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
|
|
|
|
const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
|
|
|
|
ARMConstantPoolValue *ACPV0 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
|
|
|
|
ARMConstantPoolValue *ACPV1 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
|
|
|
|
return ACPV0->hasSameValue(ACPV1);
|
|
|
|
}
|
|
|
|
|
2010-03-03 01:44:33 +00:00
|
|
|
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
2009-11-07 04:04:34 +00:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:00:16 +00:00
|
|
|
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
|
|
|
|
/// determine if two loads are loading from the same base address. It should
|
|
|
|
/// only return true if the base pointers are the same and the only differences
|
|
|
|
/// between the two addresses is the offset. It also returns the offsets by
|
|
|
|
/// reference.
|
|
|
|
bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t &Offset1,
|
|
|
|
int64_t &Offset2) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (Load1->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARM::LDR:
|
|
|
|
case ARM::LDRB:
|
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Load2->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARM::LDR:
|
|
|
|
case ARM::LDRB:
|
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if base addresses and chain operands match.
|
|
|
|
if (Load1->getOperand(0) != Load2->getOperand(0) ||
|
|
|
|
Load1->getOperand(4) != Load2->getOperand(4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Index should be Reg0.
|
|
|
|
if (Load1->getOperand(3) != Load2->getOperand(3))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Determine the offsets.
|
|
|
|
if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
|
|
|
|
isa<ConstantSDNode>(Load2->getOperand(1))) {
|
|
|
|
Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
|
|
|
|
Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
|
|
|
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
|
|
|
/// be scheduled togther. On some targets if two loads are loading from
|
|
|
|
/// addresses in the same cache line, it's better if they are scheduled
|
|
|
|
/// together. This function takes two integers that represent the load offsets
|
|
|
|
/// from the common base address. It returns true if it decides it's desirable
|
|
|
|
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
|
|
|
/// have already been scheduled after Load1.
|
|
|
|
bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t Offset1, int64_t Offset2,
|
|
|
|
unsigned NumLoads) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
assert(Offset2 > Offset1);
|
|
|
|
|
|
|
|
if ((Offset2 - Offset1) / 8 > 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
|
|
|
|
return false; // FIXME: overly conservative?
|
|
|
|
|
|
|
|
// Four loads in a row should be sufficient.
|
|
|
|
if (NumLoads >= 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-06-18 23:09:54 +00:00
|
|
|
bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const {
|
2010-06-25 18:43:14 +00:00
|
|
|
// Debug info is never a scheduling boundary. It's necessary to be explicit
|
|
|
|
// due to the special treatment of IT instructions below, otherwise a
|
|
|
|
// dbg_value followed by an IT will result in the IT instruction being
|
|
|
|
// considered a scheduling hazard, which is wrong. It should be the actual
|
|
|
|
// instruction preceding the dbg_value instruction(s), just like it is
|
|
|
|
// when debug info is not present.
|
|
|
|
if (MI->isDebugValue())
|
|
|
|
return false;
|
|
|
|
|
2010-06-18 23:09:54 +00:00
|
|
|
// Terminators and labels can't be scheduled around.
|
|
|
|
if (MI->getDesc().isTerminator() || MI->isLabel())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Treat the start of the IT block as a scheduling boundary, but schedule
|
|
|
|
// t2IT along with all instructions following it.
|
|
|
|
// FIXME: This is a big hammer. But the alternative is to add all potential
|
|
|
|
// true and anti dependencies to IT block instructions as implicit operands
|
|
|
|
// to the t2IT instruction. The added compile time and complexity does not
|
|
|
|
// seem worth it.
|
|
|
|
MachineBasicBlock::const_iterator I = MI;
|
2010-06-25 18:43:14 +00:00
|
|
|
// Make sure to skip any dbg_value instructions
|
|
|
|
while (++I != MBB->end() && I->isDebugValue())
|
|
|
|
;
|
|
|
|
if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
|
2010-06-18 23:09:54 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
|
|
|
if (MI->definesRegister(ARM::SP))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-06-25 22:42:03 +00:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
|
|
|
|
if (!NumInstrs)
|
|
|
|
return false;
|
|
|
|
if (Subtarget.getCPUString() == "generic")
|
|
|
|
// Generic (and overly aggressive) if-conversion limits for testing.
|
|
|
|
return NumInstrs <= 10;
|
|
|
|
else if (Subtarget.hasV7Ops())
|
|
|
|
return NumInstrs <= 3;
|
|
|
|
return NumInstrs <= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
|
|
|
|
MachineBasicBlock &FMBB, unsigned NumF) const {
|
|
|
|
return NumT && NumF && NumT <= 2 && NumF <= 2;
|
|
|
|
}
|
|
|
|
|
2009-08-08 03:20:32 +00:00
|
|
|
/// getInstrPredicate - If instruction is predicated, returns its predicate
|
|
|
|
/// condition, otherwise returns AL. It also returns the condition code
|
|
|
|
/// register by reference.
|
2009-09-28 09:14:39 +00:00
|
|
|
ARMCC::CondCodes
|
|
|
|
llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
|
2009-08-08 03:20:32 +00:00
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx == -1) {
|
|
|
|
PredReg = 0;
|
|
|
|
return ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
|
|
|
PredReg = MI->getOperand(PIdx+1).getReg();
|
|
|
|
return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
int llvm::getMatchingCondBranchOpcode(int Opc) {
|
2009-07-27 18:20:05 +00:00
|
|
|
if (Opc == ARM::B)
|
|
|
|
return ARM::Bcc;
|
|
|
|
else if (Opc == ARM::tB)
|
|
|
|
return ARM::tBcc;
|
|
|
|
else if (Opc == ARM::t2B)
|
|
|
|
return ARM::t2Bcc;
|
|
|
|
|
|
|
|
llvm_unreachable("Unknown unconditional branch opcode!");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
|
|
|
|
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
|
|
|
|
unsigned DestReg, unsigned BaseReg, int NumBytes,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg,
|
|
|
|
const ARMBaseInstrInfo &TII) {
|
|
|
|
bool isSub = NumBytes < 0;
|
|
|
|
if (isSub) NumBytes = -NumBytes;
|
|
|
|
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
|
|
|
|
unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
assert(ThisVal && "Didn't extract field correctly");
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
NumBytes &= ~ThisVal;
|
|
|
|
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
|
|
|
|
|
|
|
|
// Build the new ADD / SUB.
|
|
|
|
unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
|
|
|
|
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
|
|
|
.addReg(BaseReg, RegState::Kill).addImm(ThisVal)
|
|
|
|
.addImm((unsigned)Pred).addReg(PredReg).addReg(0);
|
|
|
|
BaseReg = DestReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 01:23:50 +00:00
|
|
|
bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|
|
|
unsigned FrameReg, int &Offset,
|
|
|
|
const ARMBaseInstrInfo &TII) {
|
2009-07-28 05:48:47 +00:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
const TargetInstrDesc &Desc = MI.getDesc();
|
|
|
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
|
|
|
bool isSub = false;
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
// Memory operands in inline assembly always use AddrMode2.
|
|
|
|
if (Opcode == ARM::INLINEASM)
|
|
|
|
AddrMode = ARMII::AddrMode2;
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
if (Opcode == ARM::ADDri) {
|
|
|
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
|
|
|
if (Offset == 0) {
|
|
|
|
// Turn it into a move.
|
|
|
|
MI.setDesc(TII.get(ARM::MOVr));
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.RemoveOperand(FrameRegIdx+1);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
} else if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
MI.setDesc(TII.get(ARM::SUBri));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(Offset) != -1) {
|
|
|
|
// Replace the FrameIndex with sp / fp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, pull as much of the immedidate into this ADDri/SUBri
|
|
|
|
// as possible.
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
|
|
|
|
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
Offset &= ~ThisImmVal;
|
|
|
|
|
|
|
|
// Get the properly encoded SOImmVal field.
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
|
|
|
|
"Bit extraction didn't work?");
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
|
|
|
|
} else {
|
|
|
|
unsigned ImmIdx = 0;
|
|
|
|
int InstrOffs = 0;
|
|
|
|
unsigned NumBits = 0;
|
|
|
|
unsigned Scale = 1;
|
|
|
|
switch (AddrMode) {
|
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
break;
|
|
|
|
}
|
2009-08-08 13:35:48 +00:00
|
|
|
case ARMII::AddrMode4:
|
2009-11-15 21:45:34 +00:00
|
|
|
case ARMII::AddrMode6:
|
2009-08-27 01:23:50 +00:00
|
|
|
// Can't fold any offset even if it's zero.
|
|
|
|
return false;
|
2009-07-28 05:48:47 +00:00
|
|
|
case ARMII::AddrMode5: {
|
|
|
|
ImmIdx = FrameRegIdx+1;
|
|
|
|
InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported addressing mode!");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Offset += InstrOffs * Scale;
|
|
|
|
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
|
|
|
|
if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to fold address comp. if opcode has offset bits
|
|
|
|
if (NumBits > 0) {
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
|
|
|
|
int ImmedOffset = Offset / Scale;
|
|
|
|
unsigned Mask = (1 << NumBits) - 1;
|
|
|
|
if ((unsigned)Offset <= Mask * Scale) {
|
|
|
|
// Replace the FrameIndex with sp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
if (isSub)
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
|
|
|
ImmedOffset = ImmedOffset & Mask;
|
|
|
|
if (isSub)
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
|
|
|
Offset &= ~(Mask*Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = (isSub) ? -Offset : Offset;
|
|
|
|
return Offset == 0;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
2010-08-06 01:32:48 +00:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
2010-08-08 05:04:59 +00:00
|
|
|
AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpValue) const {
|
2010-08-06 01:32:48 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::CMPri:
|
|
|
|
case ARM::CMPzri:
|
2010-08-06 01:32:48 +00:00
|
|
|
case ARM::t2CMPri:
|
|
|
|
case ARM::t2CMPzri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
|
|
|
CmpValue = MI->getOperand(1).getImm();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-11 00:13:50 +00:00
|
|
|
/// OptimizeCompareInstr - Convert the instruction supplying the argument to the
|
2010-09-10 23:34:19 +00:00
|
|
|
/// comparison into one that sets the zero bit in the flags register. Update the
|
|
|
|
/// iterator *only* if a transformation took place.
|
2010-08-06 01:32:48 +00:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-09-11 00:13:50 +00:00
|
|
|
OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpValue,
|
2010-09-10 21:55:43 +00:00
|
|
|
MachineBasicBlock::iterator &MII) const {
|
2010-09-10 23:46:12 +00:00
|
|
|
if (CmpValue != 0)
|
2010-09-10 23:34:19 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineRegisterInfo &MRI = CmpInstr->getParent()->getParent()->getRegInfo();
|
|
|
|
MachineRegisterInfo::def_iterator DI = MRI.def_begin(SrcReg);
|
|
|
|
if (llvm::next(DI) != MRI.def_end())
|
|
|
|
// Only support one definition.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *MI = &*DI;
|
|
|
|
|
2010-08-06 01:32:48 +00:00
|
|
|
// Conservatively refuse to convert an instruction which isn't in the same BB
|
|
|
|
// as the comparison.
|
|
|
|
if (MI->getParent() != CmpInstr->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that CPSR isn't set between the comparison instruction and the one we
|
|
|
|
// want to change.
|
|
|
|
MachineBasicBlock::const_iterator I = CmpInstr, E = MI;
|
|
|
|
--I;
|
|
|
|
for (; I != E; --I) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
2010-08-10 21:38:11 +00:00
|
|
|
if (!MO.isReg() || !MO.isDef()) continue;
|
2010-08-06 01:32:48 +00:00
|
|
|
|
|
|
|
// This instruction modifies CPSR before the one we want to change. We
|
|
|
|
// can't do this transformation.
|
|
|
|
if (MO.getReg() == ARM::CPSR)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the "zero" bit in CPSR.
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::ADDri:
|
|
|
|
case ARM::SUBri:
|
|
|
|
case ARM::t2ADDri:
|
2010-08-18 21:32:07 +00:00
|
|
|
case ARM::t2SUBri:
|
2010-08-06 01:32:48 +00:00
|
|
|
MI->RemoveOperand(5);
|
2010-08-18 21:32:07 +00:00
|
|
|
MachineInstrBuilder(MI)
|
|
|
|
.addReg(ARM::CPSR, RegState::Define | RegState::Implicit);
|
2010-09-10 21:55:43 +00:00
|
|
|
MII = llvm::next(MachineBasicBlock::iterator(CmpInstr));
|
2010-08-06 01:32:48 +00:00
|
|
|
CmpInstr->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-09 18:18:55 +00:00
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI,
|
2010-09-10 01:29:16 +00:00
|
|
|
const InstrItineraryData *ItinData) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2010-09-09 18:18:55 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &Desc = MI->getDesc();
|
|
|
|
unsigned Class = Desc.getSchedClass();
|
2010-09-10 01:29:16 +00:00
|
|
|
unsigned UOps = ItinData->Itineratries[Class].NumMicroOps;
|
2010-09-09 18:18:55 +00:00
|
|
|
if (UOps)
|
|
|
|
return UOps;
|
|
|
|
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected multi-uops instruction!");
|
|
|
|
break;
|
2010-09-10 01:29:16 +00:00
|
|
|
case ARM::VLDMQ:
|
2010-09-09 18:18:55 +00:00
|
|
|
case ARM::VSTMQ:
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
// The number of uOps for load / store multiple are determined by the number
|
|
|
|
// registers.
|
2010-09-10 01:29:16 +00:00
|
|
|
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
|
|
|
|
// same cycle. The scheduling for the first load / store must be done
|
|
|
|
// separately by assuming the the address is not 64-bit aligned.
|
|
|
|
// On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
|
|
|
|
// is not 64-bit aligned, then AGU would take an extra cycle.
|
|
|
|
// For VFP / NEON load / store multiple, the formula is
|
2010-09-09 18:18:55 +00:00
|
|
|
// (#reg / 2) + (#reg % 2) + 1.
|
|
|
|
case ARM::VLDMD:
|
|
|
|
case ARM::VLDMS:
|
|
|
|
case ARM::VLDMD_UPD:
|
|
|
|
case ARM::VLDMS_UPD:
|
|
|
|
case ARM::VSTMD:
|
|
|
|
case ARM::VSTMS:
|
|
|
|
case ARM::VSTMD_UPD:
|
|
|
|
case ARM::VSTMS_UPD: {
|
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
|
|
|
|
return (NumRegs / 2) + (NumRegs % 2) + 1;
|
|
|
|
}
|
|
|
|
case ARM::LDM_RET:
|
|
|
|
case ARM::LDM:
|
|
|
|
case ARM::LDM_UPD:
|
|
|
|
case ARM::STM:
|
|
|
|
case ARM::STM_UPD:
|
|
|
|
case ARM::tLDM:
|
|
|
|
case ARM::tLDM_UPD:
|
|
|
|
case ARM::tSTM_UPD:
|
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
|
|
|
case ARM::tPUSH:
|
|
|
|
case ARM::t2LDM_RET:
|
|
|
|
case ARM::t2LDM:
|
|
|
|
case ARM::t2LDM_UPD:
|
|
|
|
case ARM::t2STM:
|
|
|
|
case ARM::t2STM_UPD: {
|
2010-09-10 01:29:16 +00:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// 4 registers would be issued: 1, 2, 1.
|
|
|
|
// 5 registers would be issued: 1, 2, 2.
|
|
|
|
return 1 + (NumRegs / 2);
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UOps = (NumRegs / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((NumRegs % 2) ||
|
|
|
|
!MI->hasOneMemOperand() ||
|
|
|
|
(*MI->memoperands_begin())->getAlignment() < 8)
|
|
|
|
++UOps;
|
|
|
|
return UOps;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
return NumRegs;
|
|
|
|
}
|
2010-09-09 18:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|