2009-11-07 22:00:39 +00:00
|
|
|
//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
|
2009-07-08 16:09:28 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the Base ARM implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARM.h"
|
|
|
|
#include "ARMAddressingModes.h"
|
2009-11-07 04:04:34 +00:00
|
|
|
#include "ARMConstantPoolValue.h"
|
2010-12-05 22:04:16 +00:00
|
|
|
#include "ARMHazardRecognizer.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2009-11-02 00:10:38 +00:00
|
|
|
#include "ARMRegisterInfo.h"
|
2010-07-20 21:17:29 +00:00
|
|
|
#include "ARMGenInstrInfo.inc"
|
2009-11-08 00:15:23 +00:00
|
|
|
#include "llvm/Constants.h"
|
|
|
|
#include "llvm/Function.h"
|
|
|
|
#include "llvm/GlobalValue.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2009-11-07 04:04:34 +00:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2009-10-07 00:06:35 +00:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2010-05-22 01:47:14 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-10-07 00:06:35 +00:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2009-08-22 20:48:53 +00:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-11-02 00:10:38 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-11 20:10:48 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117950 91177308-0d34-0410-b5e6-96231b3b80d8
2010-11-01 20:41:43 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2009-07-08 16:09:28 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
|
|
|
|
cl::desc("Enable ARM 2-addr to 3-addr conv"));
|
|
|
|
|
2010-12-05 22:04:16 +00:00
|
|
|
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
|
|
|
|
struct ARM_MLxEntry {
|
|
|
|
unsigned MLxOpc; // MLA / MLS opcode
|
|
|
|
unsigned MulOpc; // Expanded multiplication opcode
|
|
|
|
unsigned AddSubOpc; // Expanded add / sub opcode
|
|
|
|
bool NegAcc; // True if the acc is negated before the add / sub.
|
|
|
|
bool HasLane; // True if instruction has an extra "lane" operand.
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARM_MLxEntry ARM_MLxTable[] = {
|
|
|
|
// MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
|
|
|
|
// fp scalar ops
|
|
|
|
{ ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
|
|
|
|
{ ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
|
|
|
|
{ ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
|
|
|
|
{ ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
|
|
|
|
{ ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
|
|
|
|
{ ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
|
|
|
|
|
|
|
|
// fp SIMD ops
|
|
|
|
{ ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
|
|
|
|
{ ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
|
|
|
|
{ ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
|
|
|
|
{ ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
|
|
|
|
{ ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
|
|
|
|
{ ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
|
|
|
|
{ ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
|
|
|
|
{ ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
|
|
|
|
};
|
|
|
|
|
2009-11-02 00:10:38 +00:00
|
|
|
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
|
|
|
|
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
|
|
|
|
Subtarget(STI) {
|
2010-12-05 22:04:16 +00:00
|
|
|
for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
|
|
|
|
if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
|
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122541 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-24 05:03:26 +00:00
|
|
|
// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
|
|
|
|
// currently defaults to no prepass hazard recognizer.
|
2010-12-05 22:04:16 +00:00
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122541 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-24 05:03:26 +00:00
|
|
|
CreateTargetHazardRecognizer(const TargetMachine *TM,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2011-01-21 05:51:33 +00:00
|
|
|
if (usePreRAHazardRecognizer()) {
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122541 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-24 05:03:26 +00:00
|
|
|
const InstrItineraryData *II = TM->getInstrItineraryData();
|
|
|
|
return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
|
|
|
|
}
|
|
|
|
return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
|
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
|
|
|
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2010-12-05 22:04:16 +00:00
|
|
|
if (Subtarget.isThumb2() || Subtarget.hasVFP2())
|
|
|
|
return (ScheduleHazardRecognizer *)
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122541 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-24 05:03:26 +00:00
|
|
|
new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
|
|
|
|
return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG);
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
LiveVariables *LV) const {
|
2009-07-27 18:44:00 +00:00
|
|
|
// FIXME: Thumb2 support.
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
if (!EnableARM3Addr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *MI = MBBI;
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2010-06-08 22:51:23 +00:00
|
|
|
uint64_t TSFlags = MI->getDesc().TSFlags;
|
2009-07-08 16:09:28 +00:00
|
|
|
bool isPre = false;
|
|
|
|
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
|
|
|
|
default: return NULL;
|
|
|
|
case ARMII::IndexModePre:
|
|
|
|
isPre = true;
|
|
|
|
break;
|
|
|
|
case ARMII::IndexModePost:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
|
|
|
|
// operation.
|
|
|
|
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
|
|
|
|
if (MemOpc == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *UpdateMI = NULL;
|
|
|
|
MachineInstr *MemMI = NULL;
|
|
|
|
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
unsigned NumOps = TID.getNumOperands();
|
|
|
|
bool isLoad = !TID.mayStore();
|
|
|
|
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
|
|
|
|
const MachineOperand &Base = MI->getOperand(2);
|
|
|
|
const MachineOperand &Offset = MI->getOperand(NumOps-3);
|
|
|
|
unsigned WBReg = WB.getReg();
|
|
|
|
unsigned BaseReg = Base.getReg();
|
|
|
|
unsigned OffReg = Offset.getReg();
|
|
|
|
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
|
|
|
|
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
|
|
|
|
switch (AddrMode) {
|
|
|
|
default:
|
|
|
|
assert(false && "Unknown indexed op!");
|
|
|
|
return NULL;
|
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
|
|
|
|
if (OffReg == 0) {
|
2009-07-08 21:03:57 +00:00
|
|
|
if (ARM_AM::getSOImmVal(Amt) == -1)
|
2009-07-08 16:09:28 +00:00
|
|
|
// Can't encode it in a so_imm operand. This transformation will
|
|
|
|
// add more than 1 instruction. Abandon!
|
|
|
|
return NULL;
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-08 21:03:57 +00:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else if (Amt != 0) {
|
|
|
|
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
|
|
|
|
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3 : {
|
|
|
|
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
|
|
|
|
if (OffReg == 0)
|
|
|
|
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-27 18:44:00 +00:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<MachineInstr*> NewMIs;
|
|
|
|
if (isPre) {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-26 22:37:02 +00:00
|
|
|
.addReg(WBReg).addImm(0).addImm(Pred);
|
2009-07-08 16:09:28 +00:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
} else {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-26 22:37:02 +00:00
|
|
|
.addReg(BaseReg).addImm(0).addImm(Pred);
|
2009-07-08 16:09:28 +00:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
if (WB.isDead())
|
|
|
|
UpdateMI->getOperand(0).setIsDead();
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer LiveVariables states, kill / dead info.
|
|
|
|
if (LV) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2011-01-10 02:58:51 +00:00
|
|
|
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
2009-07-08 16:09:28 +00:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
|
|
|
|
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
|
|
|
|
if (MO.isDef()) {
|
|
|
|
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
|
|
|
|
if (MO.isDead())
|
|
|
|
LV->addVirtualRegisterDead(Reg, NewMI);
|
|
|
|
}
|
|
|
|
if (MO.isUse() && MO.isKill()) {
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
// Look at the two new MI's in reverse order.
|
|
|
|
MachineInstr *NewMI = NewMIs[j];
|
|
|
|
if (!NewMI->readsRegister(Reg))
|
|
|
|
continue;
|
|
|
|
LV->addVirtualRegisterKilled(Reg, NewMI);
|
|
|
|
if (VI.removeKill(MI))
|
|
|
|
VI.Kills.push_back(NewMI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MFI->insert(MBBI, NewMIs[1]);
|
|
|
|
MFI->insert(MBBI, NewMIs[0]);
|
|
|
|
return NewMIs[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Branch analysis.
|
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
// If the block has no terminators, it just falls into the block after it.
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
2010-04-02 01:38:09 +00:00
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
}
|
|
|
|
if (!isUnpredicatedTerminator(I))
|
2009-07-08 16:09:28 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the last instruction in the block.
|
|
|
|
MachineInstr *LastInst = I;
|
|
|
|
|
|
|
|
// If there is only one terminator instruction, process it.
|
|
|
|
unsigned LastOpc = LastInst->getOpcode();
|
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isCondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
// Block ends with fall-through condbranch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(LastInst->getOperand(1));
|
|
|
|
Cond.push_back(LastInst->getOperand(2));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true; // Can't handle indirect branch.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the instruction before it if it is a terminator.
|
|
|
|
MachineInstr *SecondLastInst = I;
|
2010-09-23 06:54:40 +00:00
|
|
|
unsigned SecondLastOpc = SecondLastInst->getOpcode();
|
|
|
|
|
|
|
|
// If AllowModify is true and the block ends with two or more unconditional
|
|
|
|
// branches, delete all but the first unconditional branch.
|
|
|
|
if (AllowModify && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
while (isUncondBranchOpcode(SecondLastOpc)) {
|
|
|
|
LastInst->eraseFromParent();
|
|
|
|
LastInst = SecondLastInst;
|
|
|
|
LastOpc = LastInst->getOpcode();
|
2010-09-23 19:42:03 +00:00
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
|
|
|
|
// Return now the only terminator is an unconditional branch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
} else {
|
2010-09-23 06:54:40 +00:00
|
|
|
SecondLastInst = I;
|
|
|
|
SecondLastOpc = SecondLastInst->getOpcode();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-07-08 16:09:28 +00:00
|
|
|
|
|
|
|
// If there are three terminators, we don't know what sort of block this is.
|
|
|
|
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
|
|
|
|
return true;
|
|
|
|
|
2009-07-27 18:20:05 +00:00
|
|
|
// If the block ends with a B and a Bcc, handle it.
|
|
|
|
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(1));
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(2));
|
|
|
|
FBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the block ends with two unconditional branches, handle it. The second
|
|
|
|
// one is not executed, so remove it.
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ...likewise if it ends with a branch table followed by an unconditional
|
|
|
|
// branch. The branch folder can create these, and we must get rid of them for
|
|
|
|
// correctness of Thumb constant islands.
|
2009-10-28 18:26:41 +00:00
|
|
|
if ((isJumpTableBranchOpcode(SecondLastOpc) ||
|
|
|
|
isIndirectBranchOpcode(SecondLastOpc)) &&
|
2009-07-27 18:20:05 +00:00
|
|
|
isUncondBranchOpcode(LastOpc)) {
|
2009-07-08 16:09:28 +00:00
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, can't handle this.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
if (I == MBB.begin()) return 0;
|
|
|
|
--I;
|
2010-04-02 01:38:09 +00:00
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return 0;
|
|
|
|
--I;
|
|
|
|
}
|
2009-07-27 18:20:05 +00:00
|
|
|
if (!isUncondBranchOpcode(I->getOpcode()) &&
|
|
|
|
!isCondBranchOpcode(I->getOpcode()))
|
2009-07-08 16:09:28 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
I = MBB.end();
|
|
|
|
|
|
|
|
if (I == MBB.begin()) return 1;
|
|
|
|
--I;
|
2009-07-27 18:20:05 +00:00
|
|
|
if (!isCondBranchOpcode(I->getOpcode()))
|
2009-07-08 16:09:28 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
2010-06-17 22:43:56 +00:00
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
DebugLoc DL) const {
|
2009-07-28 05:48:47 +00:00
|
|
|
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
|
|
|
|
int BOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
|
|
|
|
int BccOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
|
|
|
"ARM branch conditions have two components!");
|
|
|
|
|
|
|
|
if (FBB == 0) {
|
|
|
|
if (Cond.empty()) // Unconditional branch?
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
|
2009-07-08 16:09:28 +00:00
|
|
|
else
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two-way conditional branch.
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-08 16:09:28 +00:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
2010-06-17 22:43:56 +00:00
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
|
2009-07-08 16:09:28 +00:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
|
|
|
|
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
PredicateInstruction(MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-27 18:20:05 +00:00
|
|
|
if (isUncondBranchOpcode(Opc)) {
|
|
|
|
MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
|
2009-07-08 16:09:28 +00:00
|
|
|
MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
|
|
|
|
MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1) {
|
|
|
|
MachineOperand &PMO = MI->getOperand(PIdx);
|
|
|
|
PMO.setImm(Pred[0].getImm());
|
|
|
|
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred2) const {
|
|
|
|
if (Pred1.size() > 2 || Pred2.size() > 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
|
|
|
|
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
|
|
|
|
if (CC1 == CC2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (CC1) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARMCC::AL:
|
|
|
|
return true;
|
|
|
|
case ARMCC::HS:
|
|
|
|
return CC2 == ARMCC::HI;
|
|
|
|
case ARMCC::LS:
|
|
|
|
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
|
|
|
|
case ARMCC::GE:
|
|
|
|
return CC2 == ARMCC::GT;
|
|
|
|
case ARMCC::LE:
|
|
|
|
return CC2 == ARMCC::LT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
|
|
|
|
std::vector<MachineOperand> &Pred) const {
|
2009-08-08 03:20:32 +00:00
|
|
|
// FIXME: This confuses implicit_def with optional CPSR def.
|
2009-07-08 16:09:28 +00:00
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool Found = false;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (MO.isReg() && MO.getReg() == ARM::CPSR) {
|
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2009-11-21 06:21:52 +00:00
|
|
|
/// isPredicable - Return true if the specified instruction can be predicated.
|
|
|
|
/// By default, this returns true for every instruction with a
|
|
|
|
/// PredicateOperand.
|
|
|
|
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
if (!TID.isPredicable())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
|
|
|
|
ARMFunctionInfo *AFI =
|
|
|
|
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
|
2009-11-24 08:06:15 +00:00
|
|
|
return AFI->isThumb2Function();
|
2009-11-21 06:21:52 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2009-12-03 06:58:32 +00:00
|
|
|
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
|
2010-10-23 08:40:19 +00:00
|
|
|
LLVM_ATTRIBUTE_NOINLINE
|
2009-07-08 16:09:28 +00:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
2009-12-03 06:58:32 +00:00
|
|
|
unsigned JTI);
|
2009-07-08 16:09:28 +00:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
|
|
|
unsigned JTI) {
|
2009-12-03 06:58:32 +00:00
|
|
|
assert(JTI < JT.size());
|
2009-07-08 16:09:28 +00:00
|
|
|
return JT[JTI].MBBs.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetInstSize - Return the size of the specified MachineInstr.
|
|
|
|
///
|
|
|
|
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
const MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
const MachineFunction *MF = MBB.getParent();
|
2009-08-22 21:43:10 +00:00
|
|
|
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
|
2009-07-08 16:09:28 +00:00
|
|
|
|
|
|
|
// Basic size info comes from the TSFlags field.
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
2010-06-08 22:51:23 +00:00
|
|
|
uint64_t TSFlags = TID.TSFlags;
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2009-07-31 22:22:22 +00:00
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-08 16:09:28 +00:00
|
|
|
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
|
|
|
|
default: {
|
|
|
|
// If this machine instr is an inline asm, measure it.
|
|
|
|
if (MI->getOpcode() == ARM::INLINEASM)
|
2009-08-22 21:43:10 +00:00
|
|
|
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
|
2009-07-08 16:09:28 +00:00
|
|
|
if (MI->isLabel())
|
|
|
|
return 0;
|
2009-07-31 22:22:22 +00:00
|
|
|
switch (Opc) {
|
2009-07-08 16:09:28 +00:00
|
|
|
default:
|
2009-07-14 16:55:14 +00:00
|
|
|
llvm_unreachable("Unknown or unset size field for instr!");
|
2010-02-09 19:54:29 +00:00
|
|
|
case TargetOpcode::IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::KILL:
|
2010-07-16 22:20:36 +00:00
|
|
|
case TargetOpcode::PROLOG_LABEL:
|
2010-02-09 19:54:29 +00:00
|
|
|
case TargetOpcode::EH_LABEL:
|
2010-04-07 19:51:44 +00:00
|
|
|
case TargetOpcode::DBG_VALUE:
|
2009-07-08 16:09:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2009-07-24 18:20:44 +00:00
|
|
|
case ARMII::Size8Bytes: return 8; // ARM instruction x 2.
|
|
|
|
case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction.
|
|
|
|
case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARMII::SizeSpecial: {
|
2009-07-31 22:22:22 +00:00
|
|
|
switch (Opc) {
|
2011-01-21 18:55:51 +00:00
|
|
|
case ARM::MOVi16_ga_pcrel:
|
|
|
|
case ARM::MOVTi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVTi16_ga_pcrel:
|
2011-01-17 08:03:18 +00:00
|
|
|
return 4;
|
2010-10-06 22:01:26 +00:00
|
|
|
case ARM::MOVi32imm:
|
|
|
|
case ARM::t2MOVi32imm:
|
|
|
|
return 8;
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARM::CONSTPOOL_ENTRY:
|
|
|
|
// If this machine instr is a constant pool entry, its size is recorded as
|
|
|
|
// operand #2.
|
|
|
|
return MI->getOperand(2).getImm();
|
2010-05-22 01:06:18 +00:00
|
|
|
case ARM::Int_eh_sjlj_longjmp:
|
|
|
|
return 16;
|
|
|
|
case ARM::tInt_eh_sjlj_longjmp:
|
|
|
|
return 10;
|
2009-07-24 18:20:44 +00:00
|
|
|
case ARM::Int_eh_sjlj_setjmp:
|
2010-04-28 20:33:09 +00:00
|
|
|
case ARM::Int_eh_sjlj_setjmp_nofp:
|
2010-05-27 23:49:24 +00:00
|
|
|
return 20;
|
2009-12-01 18:10:36 +00:00
|
|
|
case ARM::tInt_eh_sjlj_setjmp:
|
2009-08-11 19:42:21 +00:00
|
|
|
case ARM::t2Int_eh_sjlj_setjmp:
|
2010-04-28 20:33:09 +00:00
|
|
|
case ARM::t2Int_eh_sjlj_setjmp_nofp:
|
2010-05-27 23:49:24 +00:00
|
|
|
return 12;
|
2009-07-08 16:09:28 +00:00
|
|
|
case ARM::BR_JTr:
|
|
|
|
case ARM::BR_JTm:
|
|
|
|
case ARM::BR_JTadd:
|
2009-07-31 22:22:22 +00:00
|
|
|
case ARM::tBR_JTr:
|
2009-07-31 18:28:05 +00:00
|
|
|
case ARM::t2BR_JT:
|
2010-11-29 21:28:32 +00:00
|
|
|
case ARM::t2TBB_JT:
|
|
|
|
case ARM::t2TBH_JT: {
|
2009-07-08 16:09:28 +00:00
|
|
|
// These are jumptable branches, i.e. a branch followed by an inlined
|
2009-07-31 18:28:05 +00:00
|
|
|
// jumptable. The size is 4 + 4 * number of entries. For TBB, each
|
|
|
|
// entry is one byte; TBH two byte each.
|
2010-11-29 21:28:32 +00:00
|
|
|
unsigned EntrySize = (Opc == ARM::t2TBB_JT)
|
|
|
|
? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
|
2009-07-08 16:09:28 +00:00
|
|
|
unsigned NumOps = TID.getNumOperands();
|
|
|
|
MachineOperand JTOP =
|
|
|
|
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
|
|
|
|
unsigned JTI = JTOP.getIndex();
|
|
|
|
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
|
2010-01-25 23:22:00 +00:00
|
|
|
assert(MJTI != 0);
|
2009-07-08 16:09:28 +00:00
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
assert(JTI < JT.size());
|
|
|
|
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
|
|
|
|
// 4 aligned. The assembler / linker may add 2 byte padding just before
|
|
|
|
// the JT entries. The size does not include this padding; the
|
|
|
|
// constant islands pass does separate bookkeeping for it.
|
|
|
|
// FIXME: If we know the size of the function is less than (1 << 16) *2
|
|
|
|
// bytes, we can use 16-bit entries instead. Then there won't be an
|
|
|
|
// alignment issue.
|
2009-08-01 06:13:52 +00:00
|
|
|
unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
|
|
|
|
unsigned NumEntries = getNumJTEntries(JT, JTI);
|
2010-11-29 21:28:32 +00:00
|
|
|
if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
|
2009-08-01 06:13:52 +00:00
|
|
|
// Make sure the instruction that follows TBB is 2-byte aligned.
|
|
|
|
// FIXME: Constant island pass should insert an "ALIGN" instruction
|
|
|
|
// instead.
|
|
|
|
++NumEntries;
|
|
|
|
return NumEntries * EntrySize + InstSize;
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
// Otherwise, pseudo-instruction sizes are zero.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0; // Not reached
|
|
|
|
}
|
|
|
|
|
2010-07-11 06:33:54 +00:00
|
|
|
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I, DebugLoc DL,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
bool KillSrc) const {
|
|
|
|
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
|
|
|
|
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
|
|
|
|
|
|
|
|
if (GPRDest && GPRSrc) {
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))));
|
|
|
|
return;
|
2009-08-05 21:02:22 +00:00
|
|
|
}
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-07-11 06:33:54 +00:00
|
|
|
bool SPRDest = ARM::SPRRegClass.contains(DestReg);
|
|
|
|
bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
|
|
|
|
|
|
|
|
unsigned Opc;
|
|
|
|
if (SPRDest && SPRSrc)
|
|
|
|
Opc = ARM::VMOVS;
|
|
|
|
else if (GPRDest && SPRSrc)
|
|
|
|
Opc = ARM::VMOVRS;
|
|
|
|
else if (SPRDest && GPRSrc)
|
|
|
|
Opc = ARM::VMOVSR;
|
|
|
|
else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQ;
|
|
|
|
else if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQQ;
|
|
|
|
else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVQQQQ;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
|
|
|
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
|
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
|
|
|
if (Opc != ARM::VMOVQQ && Opc != ARM::VMOVQQQQ)
|
|
|
|
AddDefaultPred(MIB);
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
|
2010-05-07 00:24:52 +00:00
|
|
|
static const
|
|
|
|
MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
|
|
|
|
unsigned Reg, unsigned SubIdx, unsigned State,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
if (!SubIdx)
|
|
|
|
return MIB.addReg(Reg, State);
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
|
|
|
|
return MIB.addReg(Reg, State, SubIdx);
|
|
|
|
}
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned SrcReg, bool isKill, int FI,
|
2010-05-06 19:06:44 +00:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-02 20:16:16 +00:00
|
|
|
DebugLoc DL;
|
2009-07-08 16:09:28 +00:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 00:27:19 +00:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 00:06:35 +00:00
|
|
|
|
|
|
|
MachineMemOperand *MMO =
|
2010-09-21 04:39:43 +00:00
|
|
|
MF.getMachineMemOperand(MachinePointerInfo(
|
|
|
|
PseudoSourceValue::getFixedStack(FI)),
|
|
|
|
MachineMemOperand::MOStore,
|
2009-10-07 00:06:35 +00:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 00:27:19 +00:00
|
|
|
Align);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-02-16 22:01:59 +00:00
|
|
|
// tGPR is used sometimes in ARM instructions that need to avoid using
|
Many Thumb2 instructions can reference the full ARM register set (i.e.,
have 4 bits per register in the operand encoding), but have undefined
behavior when the operand value is 13 or 15 (SP and PC, respectively).
The trivial coalescer in linear scan sometimes will merge a copy from
SP into a subsequent instruction which uses the copy, and if that
instruction cannot legally reference SP, we get bad code such as:
mls r0,r9,r0,sp
instead of:
mov r2, sp
mls r0, r9, r0, r2
This patch adds a new register class for use by Thumb2 that excludes
the problematic registers (SP and PC) and is used instead of GPR
for those operands which cannot legally reference PC or SP. The
trivial coalescer explicitly requires that the register class
of the destination for the COPY instruction contain the source
register for the COPY to be considered for coalescing. This prevents
errant instructions like that above.
PR7499
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@109842 91177308-0d34-0410-b5e6-96231b3b80d8
2010-07-30 02:41:01 +00:00
|
|
|
// certain registers. Just treat it as GPR here. Likewise, rGPR.
|
|
|
|
if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass
|
|
|
|
|| RC == ARM::rGPRRegisterClass)
|
2010-02-16 22:01:59 +00:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2010-06-18 21:32:42 +00:00
|
|
|
switch (RC->getID()) {
|
|
|
|
case ARM::GPRRegClassID:
|
2010-10-27 23:12:14 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2010-10-27 23:12:14 +00:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::SPRRegClassID:
|
2010-05-06 01:34:11 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::DPRRegClassID:
|
|
|
|
case ARM::DPR_VFP2RegClassID:
|
|
|
|
case ARM::DPR_8RegClassID:
|
2009-11-09 00:11:35 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
|
2009-07-08 16:09:28 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QPRRegClassID:
|
|
|
|
case ARM::QPR_VFP2RegClassID:
|
|
|
|
case ARM::QPR_8RegClassID:
|
2010-09-08 00:26:59 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
|
2010-09-15 01:48:05 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo))
|
2010-07-06 21:26:18 +00:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 01:12:06 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
} else {
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
|
2010-05-13 01:12:06 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQPRRegClassID:
|
|
|
|
case ARM::QQPR_VFP2RegClassID:
|
2010-05-07 02:04:02 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2010-05-14 02:13:41 +00:00
|
|
|
// FIXME: It's possible to only store part of the QQ register if the
|
|
|
|
// spilled def has a sub-register index.
|
2010-09-15 01:48:05 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
|
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2010-05-07 02:04:02 +00:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2010-05-07 02:04:02 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
2010-05-07 02:04:02 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQQQPRRegClassID: {
|
2010-05-14 02:13:41 +00:00
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2010-05-14 02:13:41 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:36:26 +00:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-27 23:12:14 +00:00
|
|
|
case ARM::STRrs:
|
2010-09-15 16:36:26 +00:00
|
|
|
case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-27 23:12:14 +00:00
|
|
|
case ARM::STRi12:
|
2010-09-15 16:36:26 +00:00
|
|
|
case ARM::t2STRi12:
|
|
|
|
case ARM::tSpill:
|
|
|
|
case ARM::VSTRD:
|
|
|
|
case ARM::VSTRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-09-15 17:27:09 +00:00
|
|
|
case ARM::VST1q64Pseudo:
|
|
|
|
if (MI->getOperand(0).isFI() &&
|
|
|
|
MI->getOperand(2).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(0).getIndex();
|
|
|
|
return MI->getOperand(2).getReg();
|
|
|
|
}
|
2010-09-15 21:40:09 +00:00
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VSTMQIA:
|
2010-09-15 17:27:09 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-09-15 16:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-08 16:09:28 +00:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, int FI,
|
2010-05-06 19:06:44 +00:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-02 20:16:16 +00:00
|
|
|
DebugLoc DL;
|
2009-07-08 16:09:28 +00:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 00:27:19 +00:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 00:06:35 +00:00
|
|
|
MachineMemOperand *MMO =
|
2010-09-21 04:39:43 +00:00
|
|
|
MF.getMachineMemOperand(
|
|
|
|
MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
|
|
|
|
MachineMemOperand::MOLoad,
|
2009-10-07 00:06:35 +00:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 00:27:19 +00:00
|
|
|
Align);
|
2009-07-08 16:09:28 +00:00
|
|
|
|
2010-02-16 22:01:59 +00:00
|
|
|
// tGPR is used sometimes in ARM instructions that need to avoid using
|
|
|
|
// certain registers. Just treat it as GPR here.
|
Many Thumb2 instructions can reference the full ARM register set (i.e.,
have 4 bits per register in the operand encoding), but have undefined
behavior when the operand value is 13 or 15 (SP and PC, respectively).
The trivial coalescer in linear scan sometimes will merge a copy from
SP into a subsequent instruction which uses the copy, and if that
instruction cannot legally reference SP, we get bad code such as:
mls r0,r9,r0,sp
instead of:
mov r2, sp
mls r0, r9, r0, r2
This patch adds a new register class for use by Thumb2 that excludes
the problematic registers (SP and PC) and is used instead of GPR
for those operands which cannot legally reference PC or SP. The
trivial coalescer explicitly requires that the register class
of the destination for the COPY instruction contain the source
register for the COPY to be considered for coalescing. This prevents
errant instructions like that above.
PR7499
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@109842 91177308-0d34-0410-b5e6-96231b3b80d8
2010-07-30 02:41:01 +00:00
|
|
|
if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass
|
|
|
|
|| RC == ARM::rGPRRegisterClass)
|
2010-02-16 22:01:59 +00:00
|
|
|
RC = ARM::GPRRegisterClass;
|
|
|
|
|
2010-06-18 21:32:42 +00:00
|
|
|
switch (RC->getID()) {
|
|
|
|
case ARM::GPRRegClassID:
|
2010-10-26 22:37:02 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::SPRRegClassID:
|
2010-05-06 01:34:11 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::DPRRegClassID:
|
|
|
|
case ARM::DPR_VFP2RegClassID:
|
|
|
|
case ARM::DPR_8RegClassID:
|
2009-11-09 00:11:35 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
|
2009-10-07 00:06:35 +00:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QPRRegClassID:
|
|
|
|
case ARM::QPR_VFP2RegClassID:
|
|
|
|
case ARM::QPR_8RegClassID:
|
2010-09-08 00:26:59 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
|
2010-09-15 01:48:05 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg)
|
2010-07-06 21:26:18 +00:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 01:12:06 +00:00
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
} else {
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
|
2010-05-13 01:12:06 +00:00
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
2009-11-08 00:27:19 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQPRRegClassID:
|
|
|
|
case ARM::QQPR_VFP2RegClassID:
|
2010-05-07 02:04:02 +00:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2010-09-15 01:48:05 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
|
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addMemOperand(MMO));
|
2010-05-07 02:04:02 +00:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2010-05-07 02:04:02 +00:00
|
|
|
.addMemOperand(MMO);
|
2010-05-24 16:54:32 +00:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
2010-05-07 02:04:02 +00:00
|
|
|
}
|
2010-06-18 21:32:42 +00:00
|
|
|
break;
|
|
|
|
case ARM::QQQQPRRegClassID: {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 01:16:36 +00:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2010-06-18 21:32:42 +00:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
|
|
|
|
AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-08 16:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-15 16:36:26 +00:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-26 22:37:02 +00:00
|
|
|
case ARM::LDRrs:
|
2010-09-15 16:36:26 +00:00
|
|
|
case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-26 22:37:02 +00:00
|
|
|
case ARM::LDRi12:
|
2010-09-15 16:36:26 +00:00
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::tRestore:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-15 17:27:09 +00:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ARM::VLD1q64Pseudo:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-15 21:40:11 +00:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VLDMQIA:
|
2010-09-15 21:40:11 +00:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-15 16:36:26 +00:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-26 07:39:25 +00:00
|
|
|
MachineInstr*
|
|
|
|
ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
|
2010-04-29 01:13:30 +00:00
|
|
|
int FrameIx, uint64_t Offset,
|
2010-04-26 07:39:25 +00:00
|
|
|
const MDNode *MDPtr,
|
|
|
|
DebugLoc DL) const {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
|
|
|
|
.addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
|
|
|
|
return &*MIB;
|
|
|
|
}
|
|
|
|
|
2010-01-06 23:47:07 +00:00
|
|
|
/// Create a copy of a const pool value. Update CPI to the new index and return
|
|
|
|
/// the label UID.
|
|
|
|
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
|
|
|
MachineConstantPool *MCP = MF.getConstantPool();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
|
|
|
|
assert(MCPE.isMachineConstantPoolEntry() &&
|
|
|
|
"Expecting a machine constantpool entry!");
|
|
|
|
ARMConstantPoolValue *ACPV =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
|
|
|
|
|
2011-01-17 08:03:18 +00:00
|
|
|
unsigned PCLabelId = AFI->createPICLabelUId();
|
2010-01-06 23:47:07 +00:00
|
|
|
ARMConstantPoolValue *NewCPV = 0;
|
2010-09-10 21:38:22 +00:00
|
|
|
// FIXME: The below assumes PIC relocation model and that the function
|
|
|
|
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
|
|
|
|
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
|
|
|
|
// instructions, so that's probably OK, but is PIC always correct when
|
|
|
|
// we get here?
|
2010-01-06 23:47:07 +00:00
|
|
|
if (ACPV->isGlobalValue())
|
|
|
|
NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
|
|
|
|
ARMCP::CPValue, 4);
|
|
|
|
else if (ACPV->isExtSymbol())
|
|
|
|
NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
|
|
|
|
ACPV->getSymbol(), PCLabelId, 4);
|
|
|
|
else if (ACPV->isBlockAddress())
|
|
|
|
NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
|
|
|
|
ARMCP::CPBlockAddress, 4);
|
2010-09-10 21:38:22 +00:00
|
|
|
else if (ACPV->isLSDA())
|
|
|
|
NewCPV = new ARMConstantPoolValue(MF.getFunction(), PCLabelId,
|
|
|
|
ARMCP::CPLSDA, 4);
|
2010-01-06 23:47:07 +00:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected ARM constantpool value type!!");
|
|
|
|
CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
|
|
|
|
return PCLabelId;
|
|
|
|
}
|
|
|
|
|
2009-11-08 00:15:23 +00:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, unsigned SubIdx,
|
2009-11-14 02:55:43 +00:00
|
|
|
const MachineInstr *Orig,
|
2010-06-02 22:47:25 +00:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2009-11-08 00:15:23 +00:00
|
|
|
unsigned Opcode = Orig->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default: {
|
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2010-06-02 22:47:25 +00:00
|
|
|
MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
2009-11-08 00:15:23 +00:00
|
|
|
MBB.insert(I, MI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
2010-01-06 23:47:07 +00:00
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
2009-11-08 00:15:23 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
|
|
|
|
DestReg)
|
|
|
|
.addConstantPoolIndex(CPI).addImm(PCLabelId);
|
2011-04-29 05:24:29 +00:00
|
|
|
MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
|
2009-11-08 00:15:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-06 23:47:07 +00:00
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
|
|
|
|
MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
|
|
|
|
switch(Orig->getOpcode()) {
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
|
|
|
Orig->getOperand(1).setIndex(CPI);
|
|
|
|
Orig->getOperand(2).setImm(PCLabelId);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2010-03-03 01:44:33 +00:00
|
|
|
bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
|
2011-01-20 08:34:58 +00:00
|
|
|
const MachineInstr *MI1,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
2009-11-07 04:04:34 +00:00
|
|
|
int Opcode = MI0->getOpcode();
|
2011-01-20 23:55:07 +00:00
|
|
|
if (Opcode == ARM::t2LDRpci ||
|
2009-11-20 02:10:27 +00:00
|
|
|
Opcode == ARM::t2LDRpci_pic ||
|
|
|
|
Opcode == ARM::tLDRpci ||
|
2011-01-20 08:34:58 +00:00
|
|
|
Opcode == ARM::tLDRpci_pic ||
|
2011-01-21 18:55:51 +00:00
|
|
|
Opcode == ARM::MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel) {
|
2009-11-07 04:04:34 +00:00
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(1);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(1);
|
|
|
|
if (MO0.getOffset() != MO1.getOffset())
|
|
|
|
return false;
|
|
|
|
|
2011-01-21 18:55:51 +00:00
|
|
|
if (Opcode == ARM::MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel)
|
2011-01-20 08:34:58 +00:00
|
|
|
// Ignore the PC labels.
|
|
|
|
return MO0.getGlobal() == MO1.getGlobal();
|
|
|
|
|
2009-11-07 04:04:34 +00:00
|
|
|
const MachineFunction *MF = MI0->getParent()->getParent();
|
|
|
|
const MachineConstantPool *MCP = MF->getConstantPool();
|
|
|
|
int CPI0 = MO0.getIndex();
|
|
|
|
int CPI1 = MO1.getIndex();
|
|
|
|
const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
|
|
|
|
const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
|
2011-03-24 06:20:03 +00:00
|
|
|
bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
|
|
|
|
bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
|
|
|
|
if (isARMCP0 && isARMCP1) {
|
|
|
|
ARMConstantPoolValue *ACPV0 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
|
|
|
|
ARMConstantPoolValue *ACPV1 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
|
|
|
|
return ACPV0->hasSameValue(ACPV1);
|
|
|
|
} else if (!isARMCP0 && !isARMCP1) {
|
|
|
|
return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
|
|
|
|
}
|
|
|
|
return false;
|
2011-01-20 08:34:58 +00:00
|
|
|
} else if (Opcode == ARM::PICLDR) {
|
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Addr0 = MI0->getOperand(1).getReg();
|
|
|
|
unsigned Addr1 = MI1->getOperand(1).getReg();
|
|
|
|
if (Addr0 != Addr1) {
|
|
|
|
if (!MRI ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr0) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This assumes SSA form.
|
|
|
|
MachineInstr *Def0 = MRI->getVRegDef(Addr0);
|
|
|
|
MachineInstr *Def1 = MRI->getVRegDef(Addr1);
|
|
|
|
// Check if the loaded value, e.g. a constantpool of a global address, are
|
|
|
|
// the same.
|
|
|
|
if (!produceSameValue(Def0, Def1, MRI))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
|
|
|
|
// %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(i);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(i);
|
|
|
|
if (!MO0.isIdenticalTo(MO1))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2009-11-07 04:04:34 +00:00
|
|
|
}
|
|
|
|
|
2010-03-03 01:44:33 +00:00
|
|
|
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
2009-11-07 04:04:34 +00:00
|
|
|
}
|
|
|
|
|
2010-06-23 23:00:16 +00:00
|
|
|
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
|
|
|
|
/// determine if two loads are loading from the same base address. It should
|
|
|
|
/// only return true if the base pointers are the same and the only differences
|
|
|
|
/// between the two addresses is the offset. It also returns the offsets by
|
|
|
|
/// reference.
|
|
|
|
bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t &Offset1,
|
|
|
|
int64_t &Offset2) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (Load1->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-26 22:37:02 +00:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 00:19:44 +00:00
|
|
|
case ARM::LDRBi12:
|
2010-06-23 23:00:16 +00:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Load2->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-26 22:37:02 +00:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 00:19:44 +00:00
|
|
|
case ARM::LDRBi12:
|
2010-06-23 23:00:16 +00:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if base addresses and chain operands match.
|
|
|
|
if (Load1->getOperand(0) != Load2->getOperand(0) ||
|
|
|
|
Load1->getOperand(4) != Load2->getOperand(4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Index should be Reg0.
|
|
|
|
if (Load1->getOperand(3) != Load2->getOperand(3))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Determine the offsets.
|
|
|
|
if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
|
|
|
|
isa<ConstantSDNode>(Load2->getOperand(1))) {
|
|
|
|
Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
|
|
|
|
Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
2011-04-15 05:18:47 +00:00
|
|
|
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
2010-06-23 23:00:16 +00:00
|
|
|
/// be scheduled togther. On some targets if two loads are loading from
|
|
|
|
/// addresses in the same cache line, it's better if they are scheduled
|
|
|
|
/// together. This function takes two integers that represent the load offsets
|
|
|
|
/// from the common base address. It returns true if it decides it's desirable
|
|
|
|
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
|
|
|
/// have already been scheduled after Load1.
|
|
|
|
bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t Offset1, int64_t Offset2,
|
|
|
|
unsigned NumLoads) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
assert(Offset2 > Offset1);
|
|
|
|
|
|
|
|
if ((Offset2 - Offset1) / 8 > 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
|
|
|
|
return false; // FIXME: overly conservative?
|
|
|
|
|
|
|
|
// Four loads in a row should be sufficient.
|
|
|
|
if (NumLoads >= 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-06-18 23:09:54 +00:00
|
|
|
bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const {
|
2010-06-25 18:43:14 +00:00
|
|
|
// Debug info is never a scheduling boundary. It's necessary to be explicit
|
|
|
|
// due to the special treatment of IT instructions below, otherwise a
|
|
|
|
// dbg_value followed by an IT will result in the IT instruction being
|
|
|
|
// considered a scheduling hazard, which is wrong. It should be the actual
|
|
|
|
// instruction preceding the dbg_value instruction(s), just like it is
|
|
|
|
// when debug info is not present.
|
|
|
|
if (MI->isDebugValue())
|
|
|
|
return false;
|
|
|
|
|
2010-06-18 23:09:54 +00:00
|
|
|
// Terminators and labels can't be scheduled around.
|
|
|
|
if (MI->getDesc().isTerminator() || MI->isLabel())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Treat the start of the IT block as a scheduling boundary, but schedule
|
|
|
|
// t2IT along with all instructions following it.
|
|
|
|
// FIXME: This is a big hammer. But the alternative is to add all potential
|
|
|
|
// true and anti dependencies to IT block instructions as implicit operands
|
|
|
|
// to the t2IT instruction. The added compile time and complexity does not
|
|
|
|
// seem worth it.
|
|
|
|
MachineBasicBlock::const_iterator I = MI;
|
2010-06-25 18:43:14 +00:00
|
|
|
// Make sure to skip any dbg_value instructions
|
|
|
|
while (++I != MBB->end() && I->isDebugValue())
|
|
|
|
;
|
|
|
|
if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
|
2010-06-18 23:09:54 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
|
|
|
if (MI->definesRegister(ARM::SP))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-28 18:32:13 +00:00
|
|
|
bool ARMBaseInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
|
2011-04-13 06:39:16 +00:00
|
|
|
unsigned NumCycles,
|
2010-11-03 00:45:17 +00:00
|
|
|
unsigned ExtraPredCycles,
|
2010-10-01 22:45:50 +00:00
|
|
|
float Probability,
|
|
|
|
float Confidence) const {
|
2011-04-13 06:39:16 +00:00
|
|
|
if (!NumCycles)
|
2010-06-25 22:42:03 +00:00
|
|
|
return false;
|
2010-10-05 06:00:33 +00:00
|
|
|
|
2010-09-28 18:32:13 +00:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2011-04-13 06:39:16 +00:00
|
|
|
float UnpredCost = Probability * NumCycles;
|
2010-09-28 21:57:50 +00:00
|
|
|
UnpredCost += 1.0; // The branch itself
|
2010-10-01 22:45:50 +00:00
|
|
|
UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
|
2010-10-05 06:00:33 +00:00
|
|
|
|
2011-04-13 06:39:16 +00:00
|
|
|
return (float)(NumCycles + ExtraPredCycles) < UnpredCost;
|
2010-06-25 22:42:03 +00:00
|
|
|
}
|
2010-10-05 06:00:33 +00:00
|
|
|
|
2010-06-25 22:42:03 +00:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-11-03 00:45:17 +00:00
|
|
|
isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
|
|
|
unsigned TCycles, unsigned TExtra,
|
|
|
|
MachineBasicBlock &FMBB,
|
|
|
|
unsigned FCycles, unsigned FExtra,
|
2010-10-01 22:45:50 +00:00
|
|
|
float Probability, float Confidence) const {
|
2010-11-03 00:45:17 +00:00
|
|
|
if (!TCycles || !FCycles)
|
2010-09-28 18:32:13 +00:00
|
|
|
return false;
|
2010-10-05 06:00:33 +00:00
|
|
|
|
2010-09-28 18:32:13 +00:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2010-11-03 00:45:17 +00:00
|
|
|
float UnpredCost = Probability * TCycles + (1.0 - Probability) * FCycles;
|
2010-09-28 21:57:50 +00:00
|
|
|
UnpredCost += 1.0; // The branch itself
|
2010-10-01 22:45:50 +00:00
|
|
|
UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
|
2010-10-05 06:00:33 +00:00
|
|
|
|
2010-11-03 00:45:17 +00:00
|
|
|
return (float)(TCycles + FCycles + TExtra + FExtra) < UnpredCost;
|
2010-06-25 22:42:03 +00:00
|
|
|
}
|
|
|
|
|
2009-08-08 03:20:32 +00:00
|
|
|
/// getInstrPredicate - If instruction is predicated, returns its predicate
|
|
|
|
/// condition, otherwise returns AL. It also returns the condition code
|
|
|
|
/// register by reference.
|
2009-09-28 09:14:39 +00:00
|
|
|
ARMCC::CondCodes
|
|
|
|
llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
|
2009-08-08 03:20:32 +00:00
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx == -1) {
|
|
|
|
PredReg = 0;
|
|
|
|
return ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
|
|
|
PredReg = MI->getOperand(PIdx+1).getReg();
|
|
|
|
return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
int llvm::getMatchingCondBranchOpcode(int Opc) {
|
2009-07-27 18:20:05 +00:00
|
|
|
if (Opc == ARM::B)
|
|
|
|
return ARM::Bcc;
|
|
|
|
else if (Opc == ARM::tB)
|
|
|
|
return ARM::tBcc;
|
|
|
|
else if (Opc == ARM::t2B)
|
|
|
|
return ARM::t2Bcc;
|
|
|
|
|
|
|
|
llvm_unreachable("Unknown unconditional branch opcode!");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
|
|
|
|
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
|
|
|
|
unsigned DestReg, unsigned BaseReg, int NumBytes,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg,
|
2011-03-05 18:43:32 +00:00
|
|
|
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
|
2009-07-28 05:48:47 +00:00
|
|
|
bool isSub = NumBytes < 0;
|
|
|
|
if (isSub) NumBytes = -NumBytes;
|
|
|
|
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
|
|
|
|
unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
assert(ThisVal && "Didn't extract field correctly");
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
NumBytes &= ~ThisVal;
|
|
|
|
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
|
|
|
|
|
|
|
|
// Build the new ADD / SUB.
|
|
|
|
unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
|
|
|
|
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
|
|
|
.addReg(BaseReg, RegState::Kill).addImm(ThisVal)
|
2011-03-05 18:43:32 +00:00
|
|
|
.addImm((unsigned)Pred).addReg(PredReg).addReg(0)
|
|
|
|
.setMIFlags(MIFlags);
|
2009-07-28 05:48:47 +00:00
|
|
|
BaseReg = DestReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 01:23:50 +00:00
|
|
|
bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|
|
|
unsigned FrameReg, int &Offset,
|
|
|
|
const ARMBaseInstrInfo &TII) {
|
2009-07-28 05:48:47 +00:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
const TargetInstrDesc &Desc = MI.getDesc();
|
|
|
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
|
|
|
bool isSub = false;
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
// Memory operands in inline assembly always use AddrMode2.
|
|
|
|
if (Opcode == ARM::INLINEASM)
|
|
|
|
AddrMode = ARMII::AddrMode2;
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
if (Opcode == ARM::ADDri) {
|
|
|
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
|
|
|
if (Offset == 0) {
|
|
|
|
// Turn it into a move.
|
|
|
|
MI.setDesc(TII.get(ARM::MOVr));
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.RemoveOperand(FrameRegIdx+1);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
} else if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
MI.setDesc(TII.get(ARM::SUBri));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(Offset) != -1) {
|
|
|
|
// Replace the FrameIndex with sp / fp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, pull as much of the immedidate into this ADDri/SUBri
|
|
|
|
// as possible.
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
|
|
|
|
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
Offset &= ~ThisImmVal;
|
|
|
|
|
|
|
|
// Get the properly encoded SOImmVal field.
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
|
|
|
|
"Bit extraction didn't work?");
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
|
|
|
|
} else {
|
|
|
|
unsigned ImmIdx = 0;
|
|
|
|
int InstrOffs = 0;
|
|
|
|
unsigned NumBits = 0;
|
|
|
|
unsigned Scale = 1;
|
|
|
|
switch (AddrMode) {
|
2010-10-26 22:37:02 +00:00
|
|
|
case ARMII::AddrMode_i12: {
|
|
|
|
ImmIdx = FrameRegIdx + 1;
|
|
|
|
InstrOffs = MI.getOperand(ImmIdx).getImm();
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
2009-07-28 05:48:47 +00:00
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
break;
|
|
|
|
}
|
2009-08-08 13:35:48 +00:00
|
|
|
case ARMII::AddrMode4:
|
2009-11-15 21:45:34 +00:00
|
|
|
case ARMII::AddrMode6:
|
2009-08-27 01:23:50 +00:00
|
|
|
// Can't fold any offset even if it's zero.
|
|
|
|
return false;
|
2009-07-28 05:48:47 +00:00
|
|
|
case ARMII::AddrMode5: {
|
|
|
|
ImmIdx = FrameRegIdx+1;
|
|
|
|
InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported addressing mode!");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Offset += InstrOffs * Scale;
|
|
|
|
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
|
|
|
|
if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to fold address comp. if opcode has offset bits
|
|
|
|
if (NumBits > 0) {
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
|
|
|
|
int ImmedOffset = Offset / Scale;
|
|
|
|
unsigned Mask = (1 << NumBits) - 1;
|
|
|
|
if ((unsigned)Offset <= Mask * Scale) {
|
|
|
|
// Replace the FrameIndex with sp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
2010-10-27 01:19:41 +00:00
|
|
|
// FIXME: When addrmode2 goes away, this will simplify (like the
|
|
|
|
// T2 version), as the LDR.i12 versions don't need the encoding
|
|
|
|
// tricks for the offset value.
|
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 05:48:47 +00:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
2009-08-11 15:33:49 +00:00
|
|
|
|
2009-07-28 05:48:47 +00:00
|
|
|
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
|
|
|
ImmedOffset = ImmedOffset & Mask;
|
2010-10-27 16:50:31 +00:00
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 05:48:47 +00:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
|
|
|
Offset &= ~(Mask*Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 01:23:50 +00:00
|
|
|
Offset = (isSub) ? -Offset : Offset;
|
|
|
|
return Offset == 0;
|
2009-07-28 05:48:47 +00:00
|
|
|
}
|
2010-08-06 01:32:48 +00:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
2010-09-28 04:18:29 +00:00
|
|
|
AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask,
|
|
|
|
int &CmpValue) const {
|
2010-08-06 01:32:48 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::CMPri:
|
2010-08-06 01:32:48 +00:00
|
|
|
case ARM::t2CMPri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
2010-09-21 12:01:15 +00:00
|
|
|
CmpMask = ~0;
|
2010-08-06 01:32:48 +00:00
|
|
|
CmpValue = MI->getOperand(1).getImm();
|
|
|
|
return true;
|
2010-09-21 12:01:15 +00:00
|
|
|
case ARM::TSTri:
|
|
|
|
case ARM::t2TSTri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
|
|
|
CmpMask = MI->getOperand(1).getImm();
|
|
|
|
CmpValue = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-29 10:12:08 +00:00
|
|
|
/// isSuitableForMask - Identify a suitable 'and' instruction that
|
|
|
|
/// operates on the given source register and applies the same mask
|
|
|
|
/// as a 'tst' instruction. Provide a limited look-through for copies.
|
|
|
|
/// When successful, MI will hold the found instruction.
|
|
|
|
static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
|
2010-09-21 13:30:57 +00:00
|
|
|
int CmpMask, bool CommonUse) {
|
2010-09-29 10:12:08 +00:00
|
|
|
switch (MI->getOpcode()) {
|
2010-09-21 12:01:15 +00:00
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDri:
|
2010-09-29 10:12:08 +00:00
|
|
|
if (CmpMask != MI->getOperand(2).getImm())
|
2010-09-21 13:30:57 +00:00
|
|
|
return false;
|
2010-09-29 10:12:08 +00:00
|
|
|
if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
|
2010-09-21 12:01:15 +00:00
|
|
|
return true;
|
|
|
|
break;
|
2010-09-29 10:12:08 +00:00
|
|
|
case ARM::COPY: {
|
|
|
|
// Walk down one instruction which is potentially an 'and'.
|
|
|
|
const MachineInstr &Copy = *MI;
|
2010-10-05 06:00:43 +00:00
|
|
|
MachineBasicBlock::iterator AND(
|
|
|
|
llvm::next(MachineBasicBlock::iterator(MI)));
|
2010-09-29 10:12:08 +00:00
|
|
|
if (AND == MI->getParent()->end()) return false;
|
|
|
|
MI = AND;
|
|
|
|
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
|
|
|
|
CmpMask, true);
|
|
|
|
}
|
2010-08-06 01:32:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-11 00:13:50 +00:00
|
|
|
/// OptimizeCompareInstr - Convert the instruction supplying the argument to the
|
2010-11-15 21:20:45 +00:00
|
|
|
/// comparison into one that sets the zero bit in the flags register.
|
2010-08-06 01:32:48 +00:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-09-21 12:01:15 +00:00
|
|
|
OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
|
2010-11-15 21:20:45 +00:00
|
|
|
int CmpValue, const MachineRegisterInfo *MRI) const {
|
2010-09-10 23:46:12 +00:00
|
|
|
if (CmpValue != 0)
|
2010-09-10 23:34:19 +00:00
|
|
|
return false;
|
|
|
|
|
2010-10-18 21:22:31 +00:00
|
|
|
MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
|
|
|
|
if (llvm::next(DI) != MRI->def_end())
|
2010-09-10 23:34:19 +00:00
|
|
|
// Only support one definition.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *MI = &*DI;
|
|
|
|
|
2010-09-21 12:01:15 +00:00
|
|
|
// Masked compares sometimes use the same register as the corresponding 'and'.
|
|
|
|
if (CmpMask != ~0) {
|
2010-09-29 10:12:08 +00:00
|
|
|
if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
|
2010-09-21 12:01:15 +00:00
|
|
|
MI = 0;
|
2010-10-18 21:22:31 +00:00
|
|
|
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
|
|
|
|
UE = MRI->use_end(); UI != UE; ++UI) {
|
2010-09-21 12:01:15 +00:00
|
|
|
if (UI->getParent() != CmpInstr->getParent()) continue;
|
2010-09-29 10:12:08 +00:00
|
|
|
MachineInstr *PotentialAND = &*UI;
|
2010-09-21 13:30:57 +00:00
|
|
|
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
|
2010-09-21 12:01:15 +00:00
|
|
|
continue;
|
2010-09-29 10:12:08 +00:00
|
|
|
MI = PotentialAND;
|
2010-09-21 12:01:15 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!MI) return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-06 01:32:48 +00:00
|
|
|
// Conservatively refuse to convert an instruction which isn't in the same BB
|
|
|
|
// as the comparison.
|
|
|
|
if (MI->getParent() != CmpInstr->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that CPSR isn't set between the comparison instruction and the one we
|
|
|
|
// want to change.
|
2010-09-21 23:49:07 +00:00
|
|
|
MachineBasicBlock::const_iterator I = CmpInstr, E = MI,
|
|
|
|
B = MI->getParent()->begin();
|
2010-10-09 00:03:48 +00:00
|
|
|
|
|
|
|
// Early exit if CmpInstr is at the beginning of the BB.
|
|
|
|
if (I == B) return false;
|
|
|
|
|
2010-08-06 01:32:48 +00:00
|
|
|
--I;
|
|
|
|
for (; I != E; --I) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117950 91177308-0d34-0410-b5e6-96231b3b80d8
2010-11-01 20:41:43 +00:00
|
|
|
if (!MO.isReg()) continue;
|
2010-08-06 01:32:48 +00:00
|
|
|
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117950 91177308-0d34-0410-b5e6-96231b3b80d8
2010-11-01 20:41:43 +00:00
|
|
|
// This instruction modifies or uses CPSR after the one we want to
|
|
|
|
// change. We can't do this transformation.
|
2010-08-06 01:32:48 +00:00
|
|
|
if (MO.getReg() == ARM::CPSR)
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-21 23:49:07 +00:00
|
|
|
|
|
|
|
if (I == B)
|
|
|
|
// The 'and' is below the comparison instruction.
|
|
|
|
return false;
|
2010-08-06 01:32:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the "zero" bit in CPSR.
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::RSBrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::RSBri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::RSCrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::RSCri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::ADDrr:
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::ADDri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::ADCrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::ADCri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::SUBrr:
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::SUBri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::SBCrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::SBCri:
|
|
|
|
case ARM::t2RSBri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::t2ADDrr:
|
2010-08-11 00:23:00 +00:00
|
|
|
case ARM::t2ADDri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::t2ADCrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::t2ADCri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::t2SUBrr:
|
2011-04-06 23:35:59 +00:00
|
|
|
case ARM::t2SUBri:
|
2011-04-15 20:28:28 +00:00
|
|
|
case ARM::t2SBCrr:
|
2011-04-15 20:45:00 +00:00
|
|
|
case ARM::t2SBCri:
|
|
|
|
case ARM::ANDrr:
|
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDrr:
|
2011-04-15 21:24:38 +00:00
|
|
|
case ARM::t2ANDri:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::ORRri:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2ORRri:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::EORri:
|
|
|
|
case ARM::t2EORrr:
|
|
|
|
case ARM::t2EORri: {
|
2011-03-23 22:52:04 +00:00
|
|
|
// Scan forward for the use of CPSR, if it's a conditional code requires
|
|
|
|
// checking of V bit, then this is not safe to do. If we can't find the
|
|
|
|
// CPSR use (i.e. used in another block), then it's not safe to perform
|
|
|
|
// the optimization.
|
|
|
|
bool isSafe = false;
|
|
|
|
I = CmpInstr;
|
|
|
|
E = MI->getParent()->end();
|
|
|
|
while (!isSafe && ++I != E) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands();
|
|
|
|
!isSafe && IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
|
|
|
if (!MO.isReg() || MO.getReg() != ARM::CPSR)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef()) {
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Condition code is after the operand before CPSR.
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
case ARMCC::VS:
|
|
|
|
case ARMCC::VC:
|
|
|
|
case ARMCC::GE:
|
|
|
|
case ARMCC::LT:
|
|
|
|
case ARMCC::GT:
|
|
|
|
case ARMCC::LE:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isSafe)
|
|
|
|
return false;
|
|
|
|
|
2010-11-17 08:06:50 +00:00
|
|
|
// Toggle the optional operand to CPSR.
|
|
|
|
MI->getOperand(5).setReg(ARM::CPSR);
|
|
|
|
MI->getOperand(5).setIsDef(true);
|
2010-08-06 01:32:48 +00:00
|
|
|
CmpInstr->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2011-04-15 20:45:00 +00:00
|
|
|
}
|
2010-08-06 01:32:48 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-09 18:18:55 +00:00
|
|
|
|
2010-11-17 20:13:28 +00:00
|
|
|
bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
|
|
|
|
MachineInstr *DefMI, unsigned Reg,
|
|
|
|
MachineRegisterInfo *MRI) const {
|
|
|
|
// Fold large immediates into add, sub, or, xor.
|
|
|
|
unsigned DefOpc = DefMI->getOpcode();
|
|
|
|
if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
|
|
|
|
return false;
|
|
|
|
if (!DefMI->getOperand(1).isImm())
|
|
|
|
// Could be t2MOVi32imm <ga:xx>
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MRI->hasOneNonDBGUse(Reg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned UseOpc = UseMI->getOpcode();
|
2010-11-18 01:43:23 +00:00
|
|
|
unsigned NewUseOpc = 0;
|
2010-11-17 20:13:28 +00:00
|
|
|
uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
|
2010-11-18 01:43:23 +00:00
|
|
|
uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
|
2010-11-17 20:13:28 +00:00
|
|
|
bool Commute = false;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: return false;
|
|
|
|
case ARM::SUBrr:
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::t2SUBrr:
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
Commute = UseMI->getOperand(2).getReg() != Reg;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr: {
|
|
|
|
if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
|
|
|
|
case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
|
|
|
|
case ARM::EORrr: NewUseOpc = ARM::EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::t2SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
|
|
|
|
case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
|
|
|
|
case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpIdx = Commute ? 2 : 1;
|
|
|
|
unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
|
|
|
|
bool isKill = UseMI->getOperand(OpIdx).isKill();
|
|
|
|
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
|
|
|
|
*UseMI, UseMI->getDebugLoc(),
|
|
|
|
get(NewUseOpc), NewReg)
|
|
|
|
.addReg(Reg1, getKillRegState(isKill))
|
|
|
|
.addImm(SOImmValV1)));
|
|
|
|
UseMI->setDesc(get(NewUseOpc));
|
|
|
|
UseMI->getOperand(1).setReg(NewReg);
|
|
|
|
UseMI->getOperand(1).setIsKill();
|
|
|
|
UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
|
|
|
|
DefMI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-09 18:18:55 +00:00
|
|
|
unsigned
|
2010-11-03 00:45:17 +00:00
|
|
|
ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI) const {
|
2010-09-10 01:29:16 +00:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2010-09-09 18:18:55 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &Desc = MI->getDesc();
|
|
|
|
unsigned Class = Desc.getSchedClass();
|
2010-09-15 16:28:21 +00:00
|
|
|
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
|
2010-09-09 18:18:55 +00:00
|
|
|
if (UOps)
|
|
|
|
return UOps;
|
|
|
|
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected multi-uops instruction!");
|
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-09-09 18:18:55 +00:00
|
|
|
return 2;
|
|
|
|
|
|
|
|
// The number of uOps for load / store multiple are determined by the number
|
|
|
|
// registers.
|
2010-12-24 04:28:06 +00:00
|
|
|
//
|
2010-09-10 01:29:16 +00:00
|
|
|
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
|
|
|
|
// same cycle. The scheduling for the first load / store must be done
|
|
|
|
// separately by assuming the the address is not 64-bit aligned.
|
2010-11-16 01:16:36 +00:00
|
|
|
//
|
2010-09-10 01:29:16 +00:00
|
|
|
// On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
|
2010-11-16 01:16:36 +00:00
|
|
|
// is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
|
|
|
|
// load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD: {
|
2010-09-09 18:18:55 +00:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
|
|
|
|
return (NumRegs / 2) + (NumRegs % 2) + 1;
|
|
|
|
}
|
2010-11-16 01:16:36 +00:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-09-09 18:18:55 +00:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
|
|
|
case ARM::tPUSH:
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD: {
|
2010-09-10 01:29:16 +00:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
2010-11-03 00:45:17 +00:00
|
|
|
if (NumRegs < 4)
|
|
|
|
return 2;
|
|
|
|
// 4 registers would be issued: 2, 2.
|
|
|
|
// 5 registers would be issued: 2, 2, 1.
|
|
|
|
UOps = (NumRegs / 2);
|
|
|
|
if (NumRegs % 2)
|
|
|
|
++UOps;
|
|
|
|
return UOps;
|
2010-09-10 01:29:16 +00:00
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UOps = (NumRegs / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((NumRegs % 2) ||
|
|
|
|
!MI->hasOneMemOperand() ||
|
|
|
|
(*MI->memoperands_begin())->getAlignment() < 8)
|
|
|
|
++UOps;
|
|
|
|
return UOps;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
return NumRegs;
|
2010-10-05 06:00:33 +00:00
|
|
|
}
|
2010-09-09 18:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-10-06 06:27:31 +00:00
|
|
|
|
2010-10-07 23:12:15 +00:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
|
|
|
|
const TargetInstrDesc &DefTID,
|
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
|
|
|
int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
|
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
DefCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++DefCycle;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
DefCycle = RegNo;
|
|
|
|
bool isSLoad = false;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
2010-10-07 23:12:15 +00:00
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default: break;
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2010-10-07 23:12:15 +00:00
|
|
|
isSLoad = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 01:16:36 +00:00
|
|
|
|
2010-10-07 23:12:15 +00:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
|
|
|
|
const TargetInstrDesc &DefTID,
|
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
|
|
|
int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
|
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// 4 registers would be issued: 1, 2, 1.
|
|
|
|
// 5 registers would be issued: 1, 2, 2.
|
|
|
|
DefCycle = RegNo / 2;
|
|
|
|
if (DefCycle < 1)
|
|
|
|
DefCycle = 1;
|
|
|
|
// Result latency is issue cycle + 2: E2.
|
|
|
|
DefCycle += 2;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
DefCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
// Result latency is AGU cycles + 2.
|
|
|
|
DefCycle += 2;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
|
|
|
|
const TargetInstrDesc &UseTID,
|
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
|
|
|
int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
|
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
UseCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++UseCycle;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UseCycle = RegNo;
|
|
|
|
bool isSStore = false;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
2010-10-07 23:12:15 +00:00
|
|
|
switch (UseTID.getOpcode()) {
|
|
|
|
default: break;
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2010-10-07 23:12:15 +00:00
|
|
|
isSStore = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 01:16:36 +00:00
|
|
|
|
2010-10-07 23:12:15 +00:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSStore && (RegNo % 2)) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
|
|
|
|
const TargetInstrDesc &UseTID,
|
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
|
|
|
int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
|
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
UseCycle = RegNo / 2;
|
|
|
|
if (UseCycle < 2)
|
|
|
|
UseCycle = 2;
|
|
|
|
// Read in E3.
|
|
|
|
UseCycle += 2;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UseCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = 1;
|
|
|
|
}
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
2010-10-06 06:27:31 +00:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const TargetInstrDesc &DefTID,
|
|
|
|
unsigned DefIdx, unsigned DefAlign,
|
|
|
|
const TargetInstrDesc &UseTID,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
|
|
|
unsigned DefClass = DefTID.getSchedClass();
|
|
|
|
unsigned UseClass = UseTID.getSchedClass();
|
|
|
|
|
|
|
|
if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands())
|
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
|
|
|
|
// This may be a def / use of a variable_ops instruction, the operand
|
|
|
|
// latency might be determinable dynamically. Let the target try to
|
|
|
|
// figure it out.
|
2010-10-28 02:00:25 +00:00
|
|
|
int DefCycle = -1;
|
2010-10-28 06:47:08 +00:00
|
|
|
bool LdmBypass = false;
|
2010-10-06 06:27:31 +00:00
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default:
|
|
|
|
DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2010-10-07 23:12:15 +00:00
|
|
|
DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
|
2010-10-07 01:50:48 +00:00
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
2010-10-06 06:27:31 +00:00
|
|
|
case ARM::tPUSH:
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
2010-10-06 06:27:31 +00:00
|
|
|
LdmBypass = 1;
|
2010-10-07 23:12:15 +00:00
|
|
|
DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
|
|
|
|
break;
|
2010-10-06 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (DefCycle == -1)
|
|
|
|
// We can't seem to determine the result latency of the def, assume it's 2.
|
|
|
|
DefCycle = 2;
|
|
|
|
|
|
|
|
int UseCycle = -1;
|
|
|
|
switch (UseTID.getOpcode()) {
|
|
|
|
default:
|
|
|
|
UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2010-10-07 23:12:15 +00:00
|
|
|
UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
|
2010-10-07 01:50:48 +00:00
|
|
|
break;
|
2010-11-16 01:16:36 +00:00
|
|
|
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tSTMIA:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-10-06 06:27:31 +00:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD:
|
2010-10-07 23:12:15 +00:00
|
|
|
UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
|
2010-10-07 01:50:48 +00:00
|
|
|
break;
|
2010-10-06 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (UseCycle == -1)
|
|
|
|
// Assume it's read in the first stage.
|
|
|
|
UseCycle = 1;
|
|
|
|
|
|
|
|
UseCycle = DefCycle - UseCycle + 1;
|
|
|
|
if (UseCycle > 0) {
|
|
|
|
if (LdmBypass) {
|
|
|
|
// It's a variable_ops instruction so we can't use DefIdx here. Just use
|
|
|
|
// first def operand.
|
|
|
|
if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1,
|
|
|
|
UseClass, UseIdx))
|
|
|
|
--UseCycle;
|
|
|
|
} else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
|
2010-11-16 01:16:36 +00:00
|
|
|
UseClass, UseIdx)) {
|
2010-10-06 06:27:31 +00:00
|
|
|
--UseCycle;
|
2010-11-16 01:16:36 +00:00
|
|
|
}
|
2010-10-06 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI, unsigned UseIdx) const {
|
|
|
|
if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
|
|
|
|
DefMI->isRegSequence() || DefMI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &DefTID = DefMI->getDesc();
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return DefTID.mayLoad() ? 3 : 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &UseTID = UseMI->getDesc();
|
2010-10-23 02:04:38 +00:00
|
|
|
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
|
2010-10-29 23:16:55 +00:00
|
|
|
if (DefMO.getReg() == ARM::CPSR) {
|
|
|
|
if (DefMI->getOpcode() == ARM::FMSTAT) {
|
|
|
|
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
|
|
|
|
return Subtarget.isCortexA9() ? 1 : 20;
|
|
|
|
}
|
|
|
|
|
2010-10-23 02:04:38 +00:00
|
|
|
// CPSR set and branch can be paired in the same cycle.
|
2010-10-29 23:16:55 +00:00
|
|
|
if (UseTID.isBranch())
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-23 02:04:38 +00:00
|
|
|
|
2010-10-06 06:27:31 +00:00
|
|
|
unsigned DefAlign = DefMI->hasOneMemOperand()
|
|
|
|
? (*DefMI->memoperands_begin())->getAlignment() : 0;
|
|
|
|
unsigned UseAlign = UseMI->hasOneMemOperand()
|
|
|
|
? (*UseMI->memoperands_begin())->getAlignment() : 0;
|
2010-10-28 06:47:08 +00:00
|
|
|
int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
|
|
|
|
UseTID, UseIdx, UseAlign);
|
|
|
|
|
|
|
|
if (Latency > 1 &&
|
|
|
|
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
|
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal = DefMI->getOperand(3).getImm();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt = DefMI->getOperand(3).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-19 01:21:49 +00:00
|
|
|
if (DefAlign < 8 && Subtarget.isCortexA9())
|
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::VLD1q8:
|
|
|
|
case ARM::VLD1q16:
|
|
|
|
case ARM::VLD1q32:
|
|
|
|
case ARM::VLD1q64:
|
|
|
|
case ARM::VLD1q8_UPD:
|
|
|
|
case ARM::VLD1q16_UPD:
|
|
|
|
case ARM::VLD1q32_UPD:
|
|
|
|
case ARM::VLD1q64_UPD:
|
|
|
|
case ARM::VLD2d8:
|
|
|
|
case ARM::VLD2d16:
|
|
|
|
case ARM::VLD2d32:
|
|
|
|
case ARM::VLD2q8:
|
|
|
|
case ARM::VLD2q16:
|
|
|
|
case ARM::VLD2q32:
|
|
|
|
case ARM::VLD2d8_UPD:
|
|
|
|
case ARM::VLD2d16_UPD:
|
|
|
|
case ARM::VLD2d32_UPD:
|
|
|
|
case ARM::VLD2q8_UPD:
|
|
|
|
case ARM::VLD2q16_UPD:
|
|
|
|
case ARM::VLD2q32_UPD:
|
|
|
|
case ARM::VLD3d8:
|
|
|
|
case ARM::VLD3d16:
|
|
|
|
case ARM::VLD3d32:
|
|
|
|
case ARM::VLD1d64T:
|
|
|
|
case ARM::VLD3d8_UPD:
|
|
|
|
case ARM::VLD3d16_UPD:
|
|
|
|
case ARM::VLD3d32_UPD:
|
|
|
|
case ARM::VLD1d64T_UPD:
|
|
|
|
case ARM::VLD3q8_UPD:
|
|
|
|
case ARM::VLD3q16_UPD:
|
|
|
|
case ARM::VLD3q32_UPD:
|
|
|
|
case ARM::VLD4d8:
|
|
|
|
case ARM::VLD4d16:
|
|
|
|
case ARM::VLD4d32:
|
|
|
|
case ARM::VLD1d64Q:
|
|
|
|
case ARM::VLD4d8_UPD:
|
|
|
|
case ARM::VLD4d16_UPD:
|
|
|
|
case ARM::VLD4d32_UPD:
|
|
|
|
case ARM::VLD1d64Q_UPD:
|
|
|
|
case ARM::VLD4q8_UPD:
|
|
|
|
case ARM::VLD4q16_UPD:
|
|
|
|
case ARM::VLD4q32_UPD:
|
|
|
|
case ARM::VLD1DUPq8:
|
|
|
|
case ARM::VLD1DUPq16:
|
|
|
|
case ARM::VLD1DUPq32:
|
|
|
|
case ARM::VLD1DUPq8_UPD:
|
|
|
|
case ARM::VLD1DUPq16_UPD:
|
|
|
|
case ARM::VLD1DUPq32_UPD:
|
|
|
|
case ARM::VLD2DUPd8:
|
|
|
|
case ARM::VLD2DUPd16:
|
|
|
|
case ARM::VLD2DUPd32:
|
|
|
|
case ARM::VLD2DUPd8_UPD:
|
|
|
|
case ARM::VLD2DUPd16_UPD:
|
|
|
|
case ARM::VLD2DUPd32_UPD:
|
|
|
|
case ARM::VLD4DUPd8:
|
|
|
|
case ARM::VLD4DUPd16:
|
|
|
|
case ARM::VLD4DUPd32:
|
|
|
|
case ARM::VLD4DUPd8_UPD:
|
|
|
|
case ARM::VLD4DUPd16_UPD:
|
|
|
|
case ARM::VLD4DUPd32_UPD:
|
|
|
|
case ARM::VLD1LNd8:
|
|
|
|
case ARM::VLD1LNd16:
|
|
|
|
case ARM::VLD1LNd32:
|
|
|
|
case ARM::VLD1LNd8_UPD:
|
|
|
|
case ARM::VLD1LNd16_UPD:
|
|
|
|
case ARM::VLD1LNd32_UPD:
|
|
|
|
case ARM::VLD2LNd8:
|
|
|
|
case ARM::VLD2LNd16:
|
|
|
|
case ARM::VLD2LNd32:
|
|
|
|
case ARM::VLD2LNq16:
|
|
|
|
case ARM::VLD2LNq32:
|
|
|
|
case ARM::VLD2LNd8_UPD:
|
|
|
|
case ARM::VLD2LNd16_UPD:
|
|
|
|
case ARM::VLD2LNd32_UPD:
|
|
|
|
case ARM::VLD2LNq16_UPD:
|
|
|
|
case ARM::VLD2LNq32_UPD:
|
|
|
|
case ARM::VLD4LNd8:
|
|
|
|
case ARM::VLD4LNd16:
|
|
|
|
case ARM::VLD4LNd32:
|
|
|
|
case ARM::VLD4LNq16:
|
|
|
|
case ARM::VLD4LNq32:
|
|
|
|
case ARM::VLD4LNd8_UPD:
|
|
|
|
case ARM::VLD4LNd16_UPD:
|
|
|
|
case ARM::VLD4LNd32_UPD:
|
|
|
|
case ARM::VLD4LNq16_UPD:
|
|
|
|
case ARM::VLD4LNq32_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
|
|
|
++Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-10-28 06:47:08 +00:00
|
|
|
return Latency;
|
2010-10-06 06:27:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *DefNode, unsigned DefIdx,
|
|
|
|
SDNode *UseNode, unsigned UseIdx) const {
|
|
|
|
if (!DefNode->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode());
|
2011-01-21 05:51:33 +00:00
|
|
|
|
|
|
|
if (isZeroCost(DefTID.Opcode))
|
|
|
|
return 0;
|
|
|
|
|
2010-10-06 06:27:31 +00:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return DefTID.mayLoad() ? 3 : 1;
|
|
|
|
|
Avoiding overly aggressive latency scheduling. If the two nodes share an
operand and one of them has a single use that is a live out copy, favor the
one that is live out. Otherwise it will be difficult to eliminate the copy
if the instruction is a loop induction variable update. e.g.
BB:
sub r1, r3, #1
str r0, [r2, r3]
mov r3, r1
cmp
bne BB
=>
BB:
str r0, [r2, r3]
sub r3, r3, #1
cmp
bne BB
This fixed the recent 256.bzip2 regression.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117675 91177308-0d34-0410-b5e6-96231b3b80d8
2010-10-29 18:09:28 +00:00
|
|
|
if (!UseNode->isMachineOpcode()) {
|
|
|
|
int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx);
|
|
|
|
if (Subtarget.isCortexA9())
|
|
|
|
return Latency <= 2 ? 1 : Latency - 1;
|
|
|
|
else
|
|
|
|
return Latency <= 3 ? 1 : Latency - 2;
|
|
|
|
}
|
2010-10-06 06:27:31 +00:00
|
|
|
|
|
|
|
const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode());
|
|
|
|
const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
|
|
|
|
unsigned DefAlign = !DefMN->memoperands_empty()
|
|
|
|
? (*DefMN->memoperands_begin())->getAlignment() : 0;
|
|
|
|
const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
|
|
|
|
unsigned UseAlign = !UseMN->memoperands_empty()
|
|
|
|
? (*UseMN->memoperands_begin())->getAlignment() : 0;
|
2010-10-28 06:47:08 +00:00
|
|
|
int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
|
|
|
|
UseTID, UseIdx, UseAlign);
|
|
|
|
|
|
|
|
if (Latency > 1 &&
|
|
|
|
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
|
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-19 01:21:49 +00:00
|
|
|
if (DefAlign < 8 && Subtarget.isCortexA9())
|
|
|
|
switch (DefTID.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::VLD1q8Pseudo:
|
|
|
|
case ARM::VLD1q16Pseudo:
|
|
|
|
case ARM::VLD1q32Pseudo:
|
|
|
|
case ARM::VLD1q64Pseudo:
|
|
|
|
case ARM::VLD1q8Pseudo_UPD:
|
|
|
|
case ARM::VLD1q16Pseudo_UPD:
|
|
|
|
case ARM::VLD1q32Pseudo_UPD:
|
|
|
|
case ARM::VLD1q64Pseudo_UPD:
|
|
|
|
case ARM::VLD2d8Pseudo:
|
|
|
|
case ARM::VLD2d16Pseudo:
|
|
|
|
case ARM::VLD2d32Pseudo:
|
|
|
|
case ARM::VLD2q8Pseudo:
|
|
|
|
case ARM::VLD2q16Pseudo:
|
|
|
|
case ARM::VLD2q32Pseudo:
|
|
|
|
case ARM::VLD2d8Pseudo_UPD:
|
|
|
|
case ARM::VLD2d16Pseudo_UPD:
|
|
|
|
case ARM::VLD2d32Pseudo_UPD:
|
|
|
|
case ARM::VLD2q8Pseudo_UPD:
|
|
|
|
case ARM::VLD2q16Pseudo_UPD:
|
|
|
|
case ARM::VLD2q32Pseudo_UPD:
|
|
|
|
case ARM::VLD3d8Pseudo:
|
|
|
|
case ARM::VLD3d16Pseudo:
|
|
|
|
case ARM::VLD3d32Pseudo:
|
|
|
|
case ARM::VLD1d64TPseudo:
|
|
|
|
case ARM::VLD3d8Pseudo_UPD:
|
|
|
|
case ARM::VLD3d16Pseudo_UPD:
|
|
|
|
case ARM::VLD3d32Pseudo_UPD:
|
|
|
|
case ARM::VLD1d64TPseudo_UPD:
|
|
|
|
case ARM::VLD3q8Pseudo_UPD:
|
|
|
|
case ARM::VLD3q16Pseudo_UPD:
|
|
|
|
case ARM::VLD3q32Pseudo_UPD:
|
|
|
|
case ARM::VLD3q8oddPseudo:
|
|
|
|
case ARM::VLD3q16oddPseudo:
|
|
|
|
case ARM::VLD3q32oddPseudo:
|
|
|
|
case ARM::VLD3q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q32oddPseudo_UPD:
|
|
|
|
case ARM::VLD4d8Pseudo:
|
|
|
|
case ARM::VLD4d16Pseudo:
|
|
|
|
case ARM::VLD4d32Pseudo:
|
|
|
|
case ARM::VLD1d64QPseudo:
|
|
|
|
case ARM::VLD4d8Pseudo_UPD:
|
|
|
|
case ARM::VLD4d16Pseudo_UPD:
|
|
|
|
case ARM::VLD4d32Pseudo_UPD:
|
|
|
|
case ARM::VLD1d64QPseudo_UPD:
|
|
|
|
case ARM::VLD4q8Pseudo_UPD:
|
|
|
|
case ARM::VLD4q16Pseudo_UPD:
|
|
|
|
case ARM::VLD4q32Pseudo_UPD:
|
|
|
|
case ARM::VLD4q8oddPseudo:
|
|
|
|
case ARM::VLD4q16oddPseudo:
|
|
|
|
case ARM::VLD4q32oddPseudo:
|
|
|
|
case ARM::VLD4q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q32oddPseudo_UPD:
|
|
|
|
case ARM::VLD1DUPq8Pseudo:
|
|
|
|
case ARM::VLD1DUPq16Pseudo:
|
|
|
|
case ARM::VLD1DUPq32Pseudo:
|
|
|
|
case ARM::VLD1DUPq8Pseudo_UPD:
|
|
|
|
case ARM::VLD1DUPq16Pseudo_UPD:
|
|
|
|
case ARM::VLD1DUPq32Pseudo_UPD:
|
|
|
|
case ARM::VLD2DUPd8Pseudo:
|
|
|
|
case ARM::VLD2DUPd16Pseudo:
|
|
|
|
case ARM::VLD2DUPd32Pseudo:
|
|
|
|
case ARM::VLD2DUPd8Pseudo_UPD:
|
|
|
|
case ARM::VLD2DUPd16Pseudo_UPD:
|
|
|
|
case ARM::VLD2DUPd32Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd8Pseudo:
|
|
|
|
case ARM::VLD4DUPd16Pseudo:
|
|
|
|
case ARM::VLD4DUPd32Pseudo:
|
|
|
|
case ARM::VLD4DUPd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd32Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq8Pseudo:
|
|
|
|
case ARM::VLD1LNq16Pseudo:
|
|
|
|
case ARM::VLD1LNq32Pseudo:
|
|
|
|
case ARM::VLD1LNq8Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd8Pseudo:
|
|
|
|
case ARM::VLD2LNd16Pseudo:
|
|
|
|
case ARM::VLD2LNd32Pseudo:
|
|
|
|
case ARM::VLD2LNq16Pseudo:
|
|
|
|
case ARM::VLD2LNq32Pseudo:
|
|
|
|
case ARM::VLD2LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd8Pseudo:
|
|
|
|
case ARM::VLD4LNd16Pseudo:
|
|
|
|
case ARM::VLD4LNd32Pseudo:
|
|
|
|
case ARM::VLD4LNq16Pseudo:
|
|
|
|
case ARM::VLD4LNq32Pseudo:
|
|
|
|
case ARM::VLD4LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq32Pseudo_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
|
|
|
++Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-10-28 06:47:08 +00:00
|
|
|
return Latency;
|
2010-10-06 06:27:31 +00:00
|
|
|
}
|
2010-10-19 18:58:51 +00:00
|
|
|
|
2010-11-03 00:45:17 +00:00
|
|
|
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI,
|
|
|
|
unsigned *PredCost) const {
|
|
|
|
if (MI->isCopyLike() || MI->isInsertSubreg() ||
|
|
|
|
MI->isRegSequence() || MI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
unsigned Class = TID.getSchedClass();
|
|
|
|
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
|
|
|
|
if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR))
|
|
|
|
// When predicated, CPSR is an additional source operand for CPSR updating
|
|
|
|
// instructions, this apparently increases their latencies.
|
|
|
|
*PredCost = 1;
|
|
|
|
if (UOps)
|
|
|
|
return ItinData->getStageLatency(Class);
|
|
|
|
return getNumMicroOps(ItinData, MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *Node) const {
|
|
|
|
if (!Node->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
unsigned Opcode = Node->getMachineOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
return ItinData->getStageLatency(get(Opcode).getSchedClass());
|
2010-11-16 01:16:36 +00:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-11-03 00:45:17 +00:00
|
|
|
return 2;
|
2010-11-18 19:40:05 +00:00
|
|
|
}
|
2010-11-03 00:45:17 +00:00
|
|
|
}
|
|
|
|
|
2010-10-19 18:58:51 +00:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasHighOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI, unsigned UseIdx) const {
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (Subtarget.isCortexA8() &&
|
|
|
|
(DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
|
|
|
|
// CortexA8 VFP instructions are not pipelined.
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Hoist VFP / NEON instructions with 4 or higher latency.
|
|
|
|
int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
|
|
|
|
if (Latency <= 3)
|
|
|
|
return false;
|
|
|
|
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
|
|
|
|
UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
|
|
|
|
}
|
2010-10-26 02:08:50 +00:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasLowDefLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (DDomain == ARMII::DomainGeneral) {
|
|
|
|
unsigned DefClass = DefMI->getDesc().getSchedClass();
|
|
|
|
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
return (DefCycle != -1 && DefCycle <= 2);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2010-12-05 22:04:16 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
|
|
|
|
unsigned &AddSubOpc,
|
|
|
|
bool &NegAcc, bool &HasLane) const {
|
|
|
|
DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
|
|
|
|
if (I == MLxEntryMap.end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
|
|
|
|
MulOpc = Entry.MulOpc;
|
|
|
|
AddSubOpc = Entry.AddSubOpc;
|
|
|
|
NegAcc = Entry.NegAcc;
|
|
|
|
HasLane = Entry.HasLane;
|
|
|
|
return true;
|
|
|
|
}
|