- Rename TargetInstrDesc, TargetOperandInfo to MCInstrDesc and MCOperandInfo and

sink them into MC layer.
- Added MCInstrInfo, which captures the tablegen generated static data. Chang
TargetInstrInfo so it's based off MCInstrInfo.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134021 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2011-06-28 19:10:37 +00:00
parent 9bbe4d6c00
commit e837dead3c
77 changed files with 903 additions and 875 deletions

View File

@ -345,7 +345,7 @@ public:
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
///
MachineInstr *CreateMachineInstr(const TargetInstrDesc &TID,
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL,
bool NoImp = false);

View File

@ -17,7 +17,7 @@
#define LLVM_CODEGEN_MACHINEINSTR_H
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
@ -30,7 +30,6 @@ namespace llvm {
template <typename T> class SmallVectorImpl;
class AliasAnalysis;
class TargetInstrDesc;
class TargetInstrInfo;
class TargetRegisterInfo;
class MachineFunction;
@ -57,7 +56,7 @@ public:
// function frame setup code.
};
private:
const TargetInstrDesc *TID; // Instruction descriptor.
const MCInstrDesc *MCID; // Instruction descriptor.
uint16_t NumImplicitOps; // Number of implicit operands (which
// are determined at construction time).
@ -94,7 +93,7 @@ private:
MachineInstr(MachineFunction &, const MachineInstr &);
/// MachineInstr ctor - This constructor creates a dummy MachineInstr with
/// TID NULL and no operands.
/// MCID NULL and no operands.
MachineInstr();
// The next two constructors have DebugLoc and non-DebugLoc versions;
@ -103,25 +102,25 @@ private:
/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
/// implicit operands. It reserves space for the number of operands specified
/// by the TargetInstrDesc. The version with a DebugLoc should be preferred.
explicit MachineInstr(const TargetInstrDesc &TID, bool NoImp = false);
/// by the MCInstrDesc. The version with a DebugLoc should be preferred.
explicit MachineInstr(const MCInstrDesc &MCID, bool NoImp = false);
/// MachineInstr ctor - Work exactly the same as the ctor above, except that
/// the MachineInstr is created and added to the end of the specified basic
/// block. The version with a DebugLoc should be preferred.
MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &TID);
MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &MCID);
/// MachineInstr ctor - This constructor create a MachineInstr and add the
/// implicit operands. It reserves space for number of operands specified by
/// TargetInstrDesc. An explicit DebugLoc is supplied.
explicit MachineInstr(const TargetInstrDesc &TID, const DebugLoc dl,
/// MCInstrDesc. An explicit DebugLoc is supplied.
explicit MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl,
bool NoImp = false);
/// MachineInstr ctor - Work exactly the same as the ctor above, except that
/// the MachineInstr is created and added to the end of the specified basic
/// block.
MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const TargetInstrDesc &TID);
const MCInstrDesc &MCID);
~MachineInstr();
@ -183,11 +182,11 @@ public:
/// getDesc - Returns the target instruction descriptor of this
/// MachineInstr.
const TargetInstrDesc &getDesc() const { return *TID; }
const MCInstrDesc &getDesc() const { return *MCID; }
/// getOpcode - Returns the opcode of this MachineInstr.
///
int getOpcode() const { return TID->Opcode; }
int getOpcode() const { return MCID->Opcode; }
/// Access to explicit operands of the instruction.
///
@ -464,8 +463,8 @@ public:
/// hasUnmodeledSideEffects - Return true if this instruction has side
/// effects that are not modeled by mayLoad / mayStore, etc.
/// For all instructions, the property is encoded in TargetInstrDesc::Flags
/// (see TargetInstrDesc::hasUnmodeledSideEffects(). The only exception is
/// For all instructions, the property is encoded in MCInstrDesc::Flags
/// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
/// INLINEASM instruction, in which case the side effect property is encoded
/// in one of its operands (see InlineAsm::Extra_HasSideEffect).
///
@ -497,7 +496,7 @@ public:
/// setDesc - Replace the instruction descriptor (thus opcode) of
/// the current instruction with a new one.
///
void setDesc(const TargetInstrDesc &tid) { TID = &tid; }
void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
/// setDebugLoc - Replace current source information with new such.
/// Avoid using this, the constructor argument is preferable.

View File

@ -22,7 +22,7 @@
namespace llvm {
class TargetInstrDesc;
class MCInstrDesc;
class MDNode;
namespace RegState {
@ -180,8 +180,8 @@ public:
///
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
const TargetInstrDesc &TID) {
return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL));
const MCInstrDesc &MCID) {
return MachineInstrBuilder(MF.CreateMachineInstr(MCID, DL));
}
/// BuildMI - This version of the builder sets up the first operand as a
@ -189,9 +189,9 @@ inline MachineInstrBuilder BuildMI(MachineFunction &MF,
///
inline MachineInstrBuilder BuildMI(MachineFunction &MF,
DebugLoc DL,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
unsigned DestReg) {
return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL))
return MachineInstrBuilder(MF.CreateMachineInstr(MCID, DL))
.addReg(DestReg, RegState::Define);
}
@ -202,9 +202,9 @@ inline MachineInstrBuilder BuildMI(MachineFunction &MF,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
unsigned DestReg) {
MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define);
}
@ -216,8 +216,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
DebugLoc DL,
const TargetInstrDesc &TID) {
MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
const MCInstrDesc &MCID) {
MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
BB.insert(I, MI);
return MachineInstrBuilder(MI);
}
@ -228,8 +228,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
const TargetInstrDesc &TID) {
return BuildMI(*BB, BB->end(), DL, TID);
const MCInstrDesc &MCID) {
return BuildMI(*BB, BB->end(), DL, MCID);
}
/// BuildMI - This version of the builder inserts the newly-built
@ -238,9 +238,9 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
///
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
DebugLoc DL,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
unsigned DestReg) {
return BuildMI(*BB, BB->end(), DL, TID, DestReg);
return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
}
inline unsigned getDefRegState(bool B) {

View File

@ -34,7 +34,7 @@ namespace llvm {
class ScheduleDAG;
class SDNode;
class TargetInstrInfo;
class TargetInstrDesc;
class MCInstrDesc;
class TargetMachine;
class TargetRegisterClass;
template<class Graph> class GraphWriter;
@ -507,9 +507,9 @@ namespace llvm {
virtual ~ScheduleDAG();
/// getInstrDesc - Return the TargetInstrDesc of this SUnit.
/// getInstrDesc - Return the MCInstrDesc of this SUnit.
/// Return NULL for SDNodes without a machine opcode.
const TargetInstrDesc *getInstrDesc(const SUnit *SU) const {
const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
if (SU->isInstr()) return &SU->getInstr()->getDesc();
return getNodeDesc(SU->getNode());
}
@ -579,8 +579,8 @@ namespace llvm {
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
private:
// Return the TargetInstrDesc of this SDNode or NULL.
const TargetInstrDesc *getNodeDesc(const SDNode *Node) const;
// Return the MCInstrDesc of this SDNode or NULL.
const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
};
class SUnitIterator : public std::iterator<std::forward_iterator_tag,

View File

@ -25,7 +25,6 @@
namespace llvm {
class InstrItineraryData;
class TargetInstrDesc;
class ScheduleDAG;
class SUnit;

View File

@ -1,4 +1,4 @@
//===-- llvm/Target/TargetInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
//===-- llvm/Mc/McInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -7,26 +7,23 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines the TargetOperandInfo and TargetInstrDesc classes, which
// This file defines the McOperandInfo and McInstrDesc classes, which
// are used to describe target instructions and their operands.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
#ifndef LLVM_MC_MCINSTRDESC_H
#define LLVM_MC_MCINSTRDESC_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
class TargetRegisterClass;
class TargetRegisterInfo;
//===----------------------------------------------------------------------===//
// Machine Operand Flags and Description
//===----------------------------------------------------------------------===//
namespace TOI {
namespace MCOI {
// Operand constraints
enum OperandConstraint {
TIED_TO = 0, // Must be allocated the same register as.
@ -34,7 +31,7 @@ namespace TOI {
};
/// OperandFlags - These are flags set on operands, but should be considered
/// private, all access should go through the TargetOperandInfo accessors.
/// private, all access should go through the MCOperandInfo accessors.
/// See the accessors for a description of what these are.
enum OperandFlags {
LookupPtrRegClass = 0,
@ -43,10 +40,10 @@ namespace TOI {
};
}
/// TargetOperandInfo - This holds information about one operand of a machine
/// MCOperandInfo - This holds information about one operand of a machine
/// instruction, indicating the register class for register operands, etc.
///
class TargetOperandInfo {
class MCOperandInfo {
public:
/// RegClass - This specifies the register class enumeration of the operand
/// if the operand is a register. If isLookupPtrRegClass is set, then this is
@ -54,7 +51,7 @@ public:
/// get a dynamic register class.
short RegClass;
/// Flags - These are flags from the TOI::OperandFlags enum.
/// Flags - These are flags from the MCOI::OperandFlags enum.
unsigned short Flags;
/// Lower 16 bits are used to specify which constraints are set. The higher 16
@ -64,15 +61,15 @@ public:
/// isLookupPtrRegClass - Set if this operand is a pointer value and it
/// requires a callback to look up its register class.
bool isLookupPtrRegClass() const { return Flags&(1 <<TOI::LookupPtrRegClass);}
bool isLookupPtrRegClass() const { return Flags&(1 <<MCOI::LookupPtrRegClass);}
/// isPredicate - Set if this is one of the operands that made up of
/// the predicate operand that controls an isPredicable() instruction.
bool isPredicate() const { return Flags & (1 << TOI::Predicate); }
bool isPredicate() const { return Flags & (1 << MCOI::Predicate); }
/// isOptionalDef - Set if this operand is a optional def.
///
bool isOptionalDef() const { return Flags & (1 << TOI::OptionalDef); }
bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
};
@ -80,11 +77,11 @@ public:
// Machine Instruction Flags and Description
//===----------------------------------------------------------------------===//
/// TargetInstrDesc flags - These should be considered private to the
/// implementation of the TargetInstrDesc class. Clients should use the
/// predicate methods on TargetInstrDesc, not use these directly. These
/// all correspond to bitfields in the TargetInstrDesc::Flags field.
namespace TID {
/// MCInstrDesc flags - These should be considered private to the
/// implementation of the MCInstrDesc class. Clients should use the predicate
/// methods on MCInstrDesc, not use these directly. These all correspond to
/// bitfields in the MCInstrDesc::Flags field.
namespace MCID {
enum {
Variadic = 0,
HasOptionalDef,
@ -114,12 +111,12 @@ namespace TID {
};
}
/// TargetInstrDesc - Describe properties that are true of each
/// instruction in the target description file. This captures information about
/// side effects, register use and many other things. There is one instance of
/// this struct for each target instruction class, and the MachineInstr class
/// points to this struct directly to describe itself.
class TargetInstrDesc {
/// MCInstrDesc - Describe properties that are true of each instruction in the
/// target description file. This captures information about side effects,
/// register use and many other things. There is one instance of this struct
/// for each target instruction class, and the MachineInstr class points to
/// this struct directly to describe itself.
class MCInstrDesc {
public:
unsigned short Opcode; // The opcode number
unsigned short NumOperands; // Num of args (may be more if variable_ops)
@ -130,12 +127,12 @@ public:
uint64_t TSFlags; // Target Specific Flag values
const unsigned *ImplicitUses; // Registers implicitly read by this instr
const unsigned *ImplicitDefs; // Registers implicitly defined by this instr
const TargetOperandInfo *OpInfo; // 'NumOperands' entries about operands
const MCOperandInfo *OpInfo; // 'NumOperands' entries about operands
/// getOperandConstraint - Returns the value of the specific constraint if
/// it is set. Returns -1 if it is not set.
int getOperandConstraint(unsigned OpNum,
TOI::OperandConstraint Constraint) const {
MCOI::OperandConstraint Constraint) const {
if (OpNum < NumOperands &&
(OpInfo[OpNum].Constraints & (1 << Constraint))) {
unsigned Pos = 16 + Constraint * 4;
@ -177,13 +174,13 @@ public:
/// operands but before the implicit definitions and uses (if any are
/// present).
bool isVariadic() const {
return Flags & (1 << TID::Variadic);
return Flags & (1 << MCID::Variadic);
}
/// hasOptionalDef - Set if this instruction has an optional definition, e.g.
/// ARM instructions which can set condition code if 's' bit is set.
bool hasOptionalDef() const {
return Flags & (1 << TID::HasOptionalDef);
return Flags & (1 << MCID::HasOptionalDef);
}
/// getImplicitUses - Return a list of registers that are potentially
@ -208,7 +205,6 @@ public:
return i;
}
/// getImplicitDefs - Return a list of registers that are potentially
/// written by any instance of this machine instruction. For example, on X86,
/// many instructions implicitly set the flags register. In this case, they
@ -260,18 +256,18 @@ public:
}
bool isReturn() const {
return Flags & (1 << TID::Return);
return Flags & (1 << MCID::Return);
}
bool isCall() const {
return Flags & (1 << TID::Call);
return Flags & (1 << MCID::Call);
}
/// isBarrier - Returns true if the specified instruction stops control flow
/// from executing the instruction immediately following it. Examples include
/// unconditional branches and return instructions.
bool isBarrier() const {
return Flags & (1 << TID::Barrier);
return Flags & (1 << MCID::Barrier);
}
/// isTerminator - Returns true if this instruction part of the terminator for
@ -281,7 +277,7 @@ public:
/// Various passes use this to insert code into the bottom of a basic block,
/// but before control flow occurs.
bool isTerminator() const {
return Flags & (1 << TID::Terminator);
return Flags & (1 << MCID::Terminator);
}
/// isBranch - Returns true if this is a conditional, unconditional, or
@ -289,13 +285,13 @@ public:
/// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
/// get more information.
bool isBranch() const {
return Flags & (1 << TID::Branch);
return Flags & (1 << MCID::Branch);
}
/// isIndirectBranch - Return true if this is an indirect branch, such as a
/// branch through a register.
bool isIndirectBranch() const {
return Flags & (1 << TID::IndirectBranch);
return Flags & (1 << MCID::IndirectBranch);
}
/// isConditionalBranch - Return true if this is a branch which may fall
@ -319,37 +315,37 @@ public:
/// values. There are various methods in TargetInstrInfo that can be used to
/// control and modify the predicate in this instruction.
bool isPredicable() const {
return Flags & (1 << TID::Predicable);
return Flags & (1 << MCID::Predicable);
}
/// isCompare - Return true if this instruction is a comparison.
bool isCompare() const {
return Flags & (1 << TID::Compare);
return Flags & (1 << MCID::Compare);
}
/// isMoveImmediate - Return true if this instruction is a move immediate
/// (including conditional moves) instruction.
bool isMoveImmediate() const {
return Flags & (1 << TID::MoveImm);
return Flags & (1 << MCID::MoveImm);
}
/// isBitcast - Return true if this instruction is a bitcast instruction.
///
bool isBitcast() const {
return Flags & (1 << TID::Bitcast);
return Flags & (1 << MCID::Bitcast);
}
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
bool isNotDuplicable() const {
return Flags & (1 << TID::NotDuplicable);
return Flags & (1 << MCID::NotDuplicable);
}
/// hasDelaySlot - Returns true if the specified instruction has a delay slot
/// which must be filled by the code generator.
bool hasDelaySlot() const {
return Flags & (1 << TID::DelaySlot);
return Flags & (1 << MCID::DelaySlot);
}
/// canFoldAsLoad - Return true for instructions that can be folded as
@ -361,7 +357,7 @@ public:
/// This should only be set on instructions that return a value in their
/// only virtual register definition.
bool canFoldAsLoad() const {
return Flags & (1 << TID::FoldableAsLoad);
return Flags & (1 << MCID::FoldableAsLoad);
}
//===--------------------------------------------------------------------===//
@ -372,7 +368,7 @@ public:
/// Instructions with this flag set are not necessarily simple load
/// instructions, they may load a value and modify it, for example.
bool mayLoad() const {
return Flags & (1 << TID::MayLoad);
return Flags & (1 << MCID::MayLoad);
}
@ -381,7 +377,7 @@ public:
/// instructions, they may store a modified value based on their operands, or
/// may not actually modify anything, for example.
bool mayStore() const {
return Flags & (1 << TID::MayStore);
return Flags & (1 << MCID::MayStore);
}
/// hasUnmodeledSideEffects - Return true if this instruction has side
@ -398,7 +394,7 @@ public:
/// LLVM, etc.
///
bool hasUnmodeledSideEffects() const {
return Flags & (1 << TID::UnmodeledSideEffects);
return Flags & (1 << MCID::UnmodeledSideEffects);
}
//===--------------------------------------------------------------------===//
@ -416,7 +412,7 @@ public:
/// Also note that some instructions require non-trivial modification to
/// commute them.
bool isCommutable() const {
return Flags & (1 << TID::Commutable);
return Flags & (1 << MCID::Commutable);
}
/// isConvertibleTo3Addr - Return true if this is a 2-address instruction
@ -434,7 +430,7 @@ public:
/// instruction (e.g. shl reg, 4 on x86).
///
bool isConvertibleTo3Addr() const {
return Flags & (1 << TID::ConvertibleTo3Addr);
return Flags & (1 << MCID::ConvertibleTo3Addr);
}
/// usesCustomInsertionHook - Return true if this instruction requires
@ -446,7 +442,7 @@ public:
/// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
/// is used to insert this into the MachineBasicBlock.
bool usesCustomInsertionHook() const {
return Flags & (1 << TID::UsesCustomInserter);
return Flags & (1 << MCID::UsesCustomInserter);
}
/// isRematerializable - Returns true if this instruction is a candidate for
@ -454,7 +450,7 @@ public:
/// flag is set, the isReallyTriviallyReMaterializable() method is called to
/// verify the instruction is really rematable.
bool isRematerializable() const {
return Flags & (1 << TID::Rematerializable);
return Flags & (1 << MCID::Rematerializable);
}
/// isAsCheapAsAMove - Returns true if this instruction has the same cost (or
@ -464,7 +460,7 @@ public:
/// more than moving the instruction into the appropriate register. Note, we
/// are not marking copies from and to the same register class with this flag.
bool isAsCheapAsAMove() const {
return Flags & (1 << TID::CheapAsAMove);
return Flags & (1 << MCID::CheapAsAMove);
}
/// hasExtraSrcRegAllocReq - Returns true if this instruction source operands
@ -474,7 +470,7 @@ public:
/// Post-register allocation passes should not attempt to change allocations
/// for sources of instructions with this flag.
bool hasExtraSrcRegAllocReq() const {
return Flags & (1 << TID::ExtraSrcRegAllocReq);
return Flags & (1 << MCID::ExtraSrcRegAllocReq);
}
/// hasExtraDefRegAllocReq - Returns true if this instruction def operands
@ -484,7 +480,7 @@ public:
/// Post-register allocation passes should not attempt to change allocations
/// for definitions of instructions with this flag.
bool hasExtraDefRegAllocReq() const {
return Flags & (1 << TID::ExtraDefRegAllocReq);
return Flags & (1 << MCID::ExtraDefRegAllocReq);
}
};

View File

@ -0,0 +1,51 @@
//===-- llvm/MC/MCInstrInfo.h - Target Instruction Info ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the target machine instruction set.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCINSTRINFO_H
#define LLVM_MC_MCINSTRINFO_H
#include "llvm/MC/MCInstrDesc.h"
#include <cassert>
namespace llvm {
//---------------------------------------------------------------------------
///
/// MCInstrInfo - Interface to description of machine instruction set
///
class MCInstrInfo {
const MCInstrDesc *Desc; // Raw array to allow static init'n
unsigned NumOpcodes; // Number of entries in the desc array
public:
/// InitMCInstrInfo - Initialize MCInstrInfo, called by TableGen
/// auto-generated routines. *DO NOT USE*.
void InitMCInstrInfo(const MCInstrDesc *D, unsigned NO) {
Desc = D;
NumOpcodes = NO;
}
unsigned getNumOpcodes() const { return NumOpcodes; }
/// get - Return the machine instruction descriptor that corresponds to the
/// specified instruction opcode.
///
const MCInstrDesc &get(unsigned Opcode) const {
assert(Opcode < NumOpcodes && "Invalid opcode!");
return Desc[Opcode];
}
};
} // End llvm namespace
#endif

View File

@ -14,7 +14,7 @@
#ifndef LLVM_TARGET_TARGETINSTRINFO_H
#define LLVM_TARGET_TARGETINSTRINFO_H
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
namespace llvm {
@ -40,29 +40,16 @@ template<class T> class SmallVectorImpl;
///
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo {
const TargetInstrDesc *Descriptors; // Raw array to allow static init'n
unsigned NumOpcodes; // Number of entries in the desc array
class TargetInstrInfo : public MCInstrInfo {
TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT
void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT
public:
TargetInstrInfo(const TargetInstrDesc *desc, unsigned NumOpcodes);
TargetInstrInfo(const MCInstrDesc *desc, unsigned NumOpcodes);
virtual ~TargetInstrInfo();
unsigned getNumOpcodes() const { return NumOpcodes; }
/// get - Return the machine instruction descriptor that corresponds to the
/// specified instruction opcode.
///
const TargetInstrDesc &get(unsigned Opcode) const {
assert(Opcode < NumOpcodes && "Invalid opcode!");
return Descriptors[Opcode];
}
/// getRegClass - Givem a machine instruction descriptor, returns the register
/// class constraint for OpNum, or NULL.
const TargetRegisterClass *getRegClass(const TargetInstrDesc &TID,
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
unsigned OpNum,
const TargetRegisterInfo *TRI) const;
@ -677,7 +664,7 @@ public:
/// libcodegen, not in libtarget.
class TargetInstrInfoImpl : public TargetInstrInfo {
protected:
TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
TargetInstrInfoImpl(const MCInstrDesc *desc, unsigned NumOpcodes)
: TargetInstrInfo(desc, NumOpcodes) {}
public:
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,

View File

@ -495,7 +495,7 @@ public:
}
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class TargetOperandInfo.
/// value. See class MCOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
assert(i < getNumRegClasses() && "Register Class ID out of range");
return RegClassBegin[i];

View File

@ -421,10 +421,10 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
for (; I != E; ++I) {
if (I->isDebugValue())
continue;
const TargetInstrDesc &TID = I->getDesc();
if (TID.isCall())
const MCInstrDesc &MCID = I->getDesc();
if (MCID.isCall())
Time += 10;
else if (TID.mayLoad() || TID.mayStore())
else if (MCID.mayLoad() || MCID.mayStore())
Time += 2;
else
++Time;

View File

@ -62,8 +62,8 @@ bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
MachineInstr *MI = MBBI++;
// If MI is a pseudo, expand it.
const TargetInstrDesc &TID = MI->getDesc();
if (TID.usesCustomInsertionHook()) {
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.usesCustomInsertionHook()) {
Changed = true;
MachineBasicBlock *NewMBB =
TLI->EmitInstrWithCustomInserter(MI, MBB);

View File

@ -651,12 +651,12 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
if (I->isDebugValue())
continue;
const TargetInstrDesc &TID = I->getDesc();
if (TID.isNotDuplicable())
const MCInstrDesc &MCID = I->getDesc();
if (MCID.isNotDuplicable())
BBI.CannotBeCopied = true;
bool isPredicated = TII->isPredicated(I);
bool isCondBr = BBI.IsBrAnalyzable && TID.isConditionalBranch();
bool isCondBr = BBI.IsBrAnalyzable && MCID.isConditionalBranch();
if (!isCondBr) {
if (!isPredicated) {
@ -1414,9 +1414,9 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
const TargetInstrDesc &TID = I->getDesc();
const MCInstrDesc &MCID = I->getDesc();
// Do not copy the end of the block branches.
if (IgnoreBr && TID.isBranch())
if (IgnoreBr && MCID.isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);

View File

@ -22,7 +22,6 @@
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h"

View File

@ -260,12 +260,12 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) {
return false;
// Ignore stuff that we obviously can't move.
const TargetInstrDesc &TID = MI->getDesc();
if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.mayStore() || MCID.isCall() || MCID.isTerminator() ||
MI->hasUnmodeledSideEffects())
return false;
if (TID.mayLoad()) {
if (MCID.mayLoad()) {
// Okay, this instruction does a load. As a refinement, we allow the target
// to decide whether the loaded value is actually a constant. If so, we can
// actually use it as a load.

View File

@ -152,10 +152,10 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
/// of `new MachineInstr'.
///
MachineInstr *
MachineFunction::CreateMachineInstr(const TargetInstrDesc &TID,
MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL, bool NoImp) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(TID, DL, NoImp);
MachineInstr(MCID, DL, NoImp);
}
/// CloneMachineInstr - Create a new MachineInstr which is a copy of the

View File

@ -24,10 +24,10 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DebugInfo.h"
@ -457,9 +457,9 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
//===----------------------------------------------------------------------===//
/// MachineInstr ctor - This constructor creates a dummy MachineInstr with
/// TID NULL and no operands.
/// MCID NULL and no operands.
MachineInstr::MachineInstr()
: TID(0), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
: MCID(0), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(0), MemRefsEnd(0),
Parent(0) {
// Make sure that we get added to a machine basicblock
@ -467,23 +467,23 @@ MachineInstr::MachineInstr()
}
void MachineInstr::addImplicitDefUseOperands() {
if (TID->ImplicitDefs)
for (const unsigned *ImpDefs = TID->ImplicitDefs; *ImpDefs; ++ImpDefs)
if (MCID->ImplicitDefs)
for (const unsigned *ImpDefs = MCID->ImplicitDefs; *ImpDefs; ++ImpDefs)
addOperand(MachineOperand::CreateReg(*ImpDefs, true, true));
if (TID->ImplicitUses)
for (const unsigned *ImpUses = TID->ImplicitUses; *ImpUses; ++ImpUses)
if (MCID->ImplicitUses)
for (const unsigned *ImpUses = MCID->ImplicitUses; *ImpUses; ++ImpUses)
addOperand(MachineOperand::CreateReg(*ImpUses, false, true));
}
/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
/// implicit operands. It reserves space for the number of operands specified by
/// the TargetInstrDesc.
MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
: TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
/// the MCInstrDesc.
MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp)
: MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(0), MemRefsEnd(0), Parent(0) {
if (!NoImp)
NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + MCID->getNumOperands());
if (!NoImp)
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
@ -491,13 +491,13 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
}
/// MachineInstr ctor - As above, but with a DebugLoc.
MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
bool NoImp)
: TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
: MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
if (!NoImp)
NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + MCID->getNumOperands());
if (!NoImp)
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
@ -507,12 +507,12 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
/// MachineInstr ctor - Work exactly the same as the ctor two above, except
/// that the MachineInstr is created and added to the end of the specified
/// basic block.
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
: TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid)
: MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(0), MemRefsEnd(0), Parent(0) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + MCID->getNumOperands());
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
LeakDetector::addGarbageObject(this);
@ -522,12 +522,12 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
/// MachineInstr ctor - As above, but with a DebugLoc.
///
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const TargetInstrDesc &tid)
: TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
const MCInstrDesc &tid)
: MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + MCID->getNumOperands());
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
LeakDetector::addGarbageObject(this);
@ -537,7 +537,7 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: TID(&MI.getDesc()), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
: MCID(&MI.getDesc()), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
MemRefs(MI.MemRefs), MemRefsEnd(MI.MemRefsEnd),
Parent(0), debugLoc(MI.getDebugLoc()) {
Operands.reserve(MI.getNumOperands());
@ -624,7 +624,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
Operands.back().AddRegOperandToRegInfo(RegInfo);
// If the register operand is flagged as early, mark the operand as such
unsigned OpNo = Operands.size() - 1;
if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
Operands[OpNo].setIsEarlyClobber(true);
}
return;
@ -646,7 +646,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (Operands[OpNo].isReg()) {
Operands[OpNo].AddRegOperandToRegInfo(0);
// If the register operand is flagged as early, mark the operand as such
if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
Operands[OpNo].setIsEarlyClobber(true);
}
@ -671,7 +671,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
if (Operands[OpNo].isReg()) {
Operands[OpNo].AddRegOperandToRegInfo(RegInfo);
// If the register operand is flagged as early, mark the operand as such
if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
Operands[OpNo].setIsEarlyClobber(true);
}
@ -694,7 +694,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) {
// If the register operand is flagged as early, mark the operand as such
if (Operands[OpNo].isReg()
&& TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
&& MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
Operands[OpNo].setIsEarlyClobber(true);
}
}
@ -820,8 +820,8 @@ void MachineInstr::eraseFromParent() {
/// OperandComplete - Return true if it's illegal to add a new operand
///
bool MachineInstr::OperandsComplete() const {
unsigned short NumOperands = TID->getNumOperands();
if (!TID->isVariadic() && getNumOperands()-NumImplicitOps >= NumOperands)
unsigned short NumOperands = MCID->getNumOperands();
if (!MCID->isVariadic() && getNumOperands()-NumImplicitOps >= NumOperands)
return true; // Broken: we have all the operands of this instruction!
return false;
}
@ -829,8 +829,8 @@ bool MachineInstr::OperandsComplete() const {
/// getNumExplicitOperands - Returns the number of non-implicit operands.
///
unsigned MachineInstr::getNumExplicitOperands() const {
unsigned NumOperands = TID->getNumOperands();
if (!TID->isVariadic())
unsigned NumOperands = MCID->getNumOperands();
if (!MCID->isVariadic())
return NumOperands;
for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
@ -931,10 +931,10 @@ MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
/// operand list that is used to represent the predicate. It returns -1 if
/// none is found.
int MachineInstr::findFirstPredOperandIdx() const {
const TargetInstrDesc &TID = getDesc();
if (TID.isPredicable()) {
const MCInstrDesc &MCID = getDesc();
if (MCID.isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (TID.OpInfo[i].isPredicate())
if (MCID.OpInfo[i].isPredicate())
return i;
}
@ -990,11 +990,11 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
}
assert(getOperand(DefOpIdx).isDef() && "DefOpIdx is not a def!");
const TargetInstrDesc &TID = getDesc();
for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
const MCInstrDesc &MCID = getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (MO.isReg() && MO.isUse() &&
TID.getOperandConstraint(i, TOI::TIED_TO) == (int)DefOpIdx) {
MCID.getOperandConstraint(i, MCOI::TIED_TO) == (int)DefOpIdx) {
if (UseOpIdx)
*UseOpIdx = (unsigned)i;
return true;
@ -1050,13 +1050,13 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
return false;
}
const TargetInstrDesc &TID = getDesc();
if (UseOpIdx >= TID.getNumOperands())
const MCInstrDesc &MCID = getDesc();
if (UseOpIdx >= MCID.getNumOperands())
return false;
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse())
return false;
int DefIdx = TID.getOperandConstraint(UseOpIdx, TOI::TIED_TO);
int DefIdx = MCID.getOperandConstraint(UseOpIdx, MCOI::TIED_TO);
if (DefIdx == -1)
return false;
if (DefOpIdx)
@ -1096,11 +1096,11 @@ void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) {
/// copyPredicates - Copies predicate operand(s) from MI.
void MachineInstr::copyPredicates(const MachineInstr *MI) {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isPredicable())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isPredicable())
return;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (TID.OpInfo[i].isPredicate()) {
if (MCID.OpInfo[i].isPredicate()) {
// Predicated operands must be last operands.
addOperand(MI->getOperand(i));
}
@ -1137,13 +1137,13 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
AliasAnalysis *AA,
bool &SawStore) const {
// Ignore stuff that we obviously can't move.
if (TID->mayStore() || TID->isCall()) {
if (MCID->mayStore() || MCID->isCall()) {
SawStore = true;
return false;
}
if (isLabel() || isDebugValue() ||
TID->isTerminator() || hasUnmodeledSideEffects())
MCID->isTerminator() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
@ -1151,7 +1151,7 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
// destination. The check for isInvariantLoad gives the targe the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
if (TID->mayLoad() && !isInvariantLoad(AA))
if (MCID->mayLoad() && !isInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
// end of block, or if the load is volatile, we can't move it.
return !SawStore && !hasVolatileMemoryRef();
@ -1191,9 +1191,9 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII,
/// have no volatile memory references.
bool MachineInstr::hasVolatileMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
if (!TID->mayStore() &&
!TID->mayLoad() &&
!TID->isCall() &&
if (!MCID->mayStore() &&
!MCID->mayLoad() &&
!MCID->isCall() &&
!hasUnmodeledSideEffects())
return false;
@ -1217,7 +1217,7 @@ bool MachineInstr::hasVolatileMemoryRef() const {
/// *all* loads the instruction does are invariant (if it does multiple loads).
bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
// If the instruction doesn't load at all, it isn't an invariant load.
if (!TID->mayLoad())
if (!MCID->mayLoad())
return false;
// If the instruction has lost its memoperands, conservatively assume that
@ -1421,10 +1421,10 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (FirstOp) FirstOp = false; else OS << ",";
OS << " ";
if (i < getDesc().NumOperands) {
const TargetOperandInfo &TOI = getDesc().OpInfo[i];
if (TOI.isPredicate())
const MCOperandInfo &MCOI = getDesc().OpInfo[i];
if (MCOI.isPredicate())
OS << "pred:";
if (TOI.isOptionalDef())
if (MCOI.isOptionalDef())
OS << "opt:";
}
if (isDebugValue() && MO.isMetadata()) {

View File

@ -1018,9 +1018,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
/*UnfoldStore=*/false,
&LoadRegIndex);
if (NewOpc == 0) return 0;
const TargetInstrDesc &TID = TII->get(NewOpc);
if (TID.getNumDefs() != 1) return 0;
const TargetRegisterClass *RC = TII->getRegClass(TID, LoadRegIndex, TRI);
const MCInstrDesc &MID = TII->get(NewOpc);
if (MID.getNumDefs() != 1) return 0;
const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
// Ok, we're unfolding. Create a temporary register and do the unfold.
unsigned Reg = MRI->createVirtualRegister(RC);

View File

@ -541,19 +541,19 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
}
void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
const TargetInstrDesc &TI = MI->getDesc();
if (MI->getNumOperands() < TI.getNumOperands()) {
const MCInstrDesc &MCID = MI->getDesc();
if (MI->getNumOperands() < MCID.getNumOperands()) {
report("Too few operands", MI);
*OS << TI.getNumOperands() << " operands expected, but "
*OS << MCID.getNumOperands() << " operands expected, but "
<< MI->getNumExplicitOperands() << " given.\n";
}
// Check the MachineMemOperands for basic consistency.
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I) {
if ((*I)->isLoad() && !TI.mayLoad())
if ((*I)->isLoad() && !MCID.mayLoad())
report("Missing mayLoad flag", MI);
if ((*I)->isStore() && !TI.mayStore())
if ((*I)->isStore() && !MCID.mayStore())
report("Missing mayStore flag", MI);
}
@ -575,29 +575,30 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const TargetInstrDesc &TI = MI->getDesc();
const TargetOperandInfo &TOI = TI.OpInfo[MONum];
const MCInstrDesc &MCID = MI->getDesc();
const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
// The first TI.NumDefs operands must be explicit register defines
if (MONum < TI.getNumDefs()) {
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < MCID.getNumDefs()) {
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef())
report("Explicit definition marked as use", MO, MONum);
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < TI.getNumOperands()) {
} else if (MONum < MCID.getNumOperands()) {
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() && !(TI.isVariadic() && MONum == TI.getNumOperands()-1)) {
if (MO->isDef() && !TOI.isOptionalDef())
if (MO->isReg() &&
!(MCID.isVariadic() && MONum == MCID.getNumOperands()-1)) {
if (MO->isDef() && !MCOI.isOptionalDef())
report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
if (MO->isReg() && !MO->isImplicit() && !TI.isVariadic() && MO->getReg())
if (MO->isReg() && !MO->isImplicit() && !MCID.isVariadic() && MO->getReg())
report("Extra explicit operand on non-variadic instruction", MO, MONum);
}
@ -709,7 +710,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
// Check register classes.
if (MONum < TI.getNumOperands() && !MO->isImplicit()) {
if (MONum < MCID.getNumOperands() && !MO->isImplicit()) {
unsigned SubIdx = MO->getSubReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
@ -723,7 +724,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
sr = s;
}
if (const TargetRegisterClass *DRC = TII->getRegClass(TI, MONum, TRI)) {
if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
if (!DRC->contains(sr)) {
report("Illegal physical register for instruction", MO, MONum);
*OS << TRI->getName(sr) << " is not a "
@ -743,7 +744,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
RC = SRC;
}
if (const TargetRegisterClass *DRC = TII->getRegClass(TI, MONum, TRI)) {
if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
if (!RC->hasSuperClassEq(DRC)) {
report("Illegal virtual register for instruction", MO, MONum);
*OS << "Expected a " << DRC->getName() << " register, but got a "
@ -765,11 +766,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
LiveInts && !LiveInts->isNotInMIMap(MI)) {
LiveInterval &LI = LiveStks->getInterval(MO->getIndex());
SlotIndex Idx = LiveInts->getInstructionIndex(MI);
if (TI.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
if (MCID.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
report("Instruction loads from dead spill slot", MO, MONum);
*OS << "Live stack: " << LI << '\n';
}
if (TI.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
if (MCID.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
report("Instruction stores to dead spill slot", MO, MONum);
*OS << "Live stack: " << LI << '\n';
}

View File

@ -353,10 +353,10 @@ bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isMoveImmediate())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isMoveImmediate())
return false;
if (TID.getNumDefs() != 1)
if (MCID.getNumDefs() != 1)
return false;
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
@ -429,16 +429,16 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
continue;
}
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
if (TID.isBitcast()) {
if (MCID.isBitcast()) {
if (OptimizeBitcastInstr(MI, MBB)) {
// MI is deleted.
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
}
} else if (TID.isCompare()) {
} else if (MCID.isCompare()) {
if (OptimizeCmpInstr(MI, MBB)) {
// MI is deleted.
Changed = true;

View File

@ -118,7 +118,7 @@ namespace {
// SkippedInstrs - Descriptors of instructions whose clobber list was
// ignored because all registers were spilled. It is still necessary to
// mark all the clobbered registers as used by the function.
SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
SmallPtrSet<const MCInstrDesc*, 4> SkippedInstrs;
// isBulkSpilling - This flag is set when LiveRegMap will be cleared
// completely after spilling all live registers. LiveRegMap entries should
@ -777,7 +777,7 @@ void RAFast::AllocateBasicBlock() {
// Otherwise, sequentially allocate each instruction in the MBB.
while (MII != MBB->end()) {
MachineInstr *MI = MII++;
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
DEBUG({
dbgs() << "\n>> " << *MI << "Regs:";
for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
@ -890,7 +890,7 @@ void RAFast::AllocateBasicBlock() {
VirtOpEnd = i+1;
if (MO.isUse()) {
hasTiedOps = hasTiedOps ||
TID.getOperandConstraint(i, TOI::TIED_TO) != -1;
MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
} else {
if (MO.isEarlyClobber())
hasEarlyClobbers = true;
@ -920,7 +920,7 @@ void RAFast::AllocateBasicBlock() {
// We didn't detect inline asm tied operands above, so just make this extra
// pass for all inline asm.
if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
(hasTiedOps && (hasPhysDefs || TID.getNumDefs() > 1))) {
(hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
handleThroughOperands(MI, VirtDead);
// Don't attempt coalescing when we have funny stuff going on.
CopyDst = 0;
@ -965,7 +965,7 @@ void RAFast::AllocateBasicBlock() {
}
unsigned DefOpEnd = MI->getNumOperands();
if (TID.isCall()) {
if (MCID.isCall()) {
// Spill all virtregs before a call. This serves two purposes: 1. If an
// exception is thrown, the landing pad is going to expect to find
// registers in their spill slots, and 2. we don't have to wade through
@ -976,7 +976,7 @@ void RAFast::AllocateBasicBlock() {
// The imp-defs are skipped below, but we still need to mark those
// registers as used by the function.
SkippedInstrs.insert(&TID);
SkippedInstrs.insert(&MCID);
}
// Third scan.
@ -1062,7 +1062,7 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
MRI->closePhysRegsUsed(*TRI);
// Add the clobber lists for all the instructions we skipped earlier.
for (SmallPtrSet<const TargetInstrDesc*, 4>::const_iterator
for (SmallPtrSet<const MCInstrDesc*, 4>::const_iterator
I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I)
if (const unsigned *Defs = (*I)->getImplicitDefs())
while (*Defs)

View File

@ -526,8 +526,8 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
if (!DefMI)
return false;
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isCommutable())
const MCInstrDesc &MCID = DefMI->getDesc();
if (!MCID.isCommutable())
return false;
// If DefMI is a two-address instruction then commuting it will change the
// destination register.
@ -687,21 +687,21 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
if (!DefMI)
return false;
assert(DefMI && "Defining instruction disappeared");
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isAsCheapAsAMove())
const MCInstrDesc &MCID = DefMI->getDesc();
if (!MCID.isAsCheapAsAMove())
return false;
if (!tii_->isTriviallyReMaterializable(DefMI, AA))
return false;
bool SawStore = false;
if (!DefMI->isSafeToMove(tii_, AA, SawStore))
return false;
if (TID.getNumDefs() != 1)
if (MCID.getNumDefs() != 1)
return false;
if (!DefMI->isImplicitDef()) {
// Make sure the copy destination register class fits the instruction
// definition register class. The mismatch can happen as a result of earlier
// extract_subreg, insert_subreg, subreg_to_reg coalescing.
const TargetRegisterClass *RC = tii_->getRegClass(TID, 0, tri_);
const TargetRegisterClass *RC = tii_->getRegClass(MCID, 0, tri_);
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
if (mri_->getRegClass(DstReg) != RC)
return false;
@ -712,13 +712,13 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
// If destination register has a sub-register index on it, make sure it
// matches the instruction register class.
if (DstSubIdx) {
const TargetInstrDesc &TID = DefMI->getDesc();
if (TID.getNumDefs() != 1)
const MCInstrDesc &MCID = DefMI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
const TargetRegisterClass *DstSubRC =
DstRC->getSubRegisterRegClass(DstSubIdx);
const TargetRegisterClass *DefRC = tii_->getRegClass(TID, 0, tri_);
const TargetRegisterClass *DefRC = tii_->getRegClass(MCID, 0, tri_);
if (DefRC == DstRC)
DstSubIdx = 0;
else if (DefRC != DstSubRC)

View File

@ -45,7 +45,7 @@ ScheduleDAG::ScheduleDAG(MachineFunction &mf)
ScheduleDAG::~ScheduleDAG() {}
/// getInstrDesc helper to handle SDNodes.
const TargetInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
if (!Node || !Node->isMachineOpcode()) return NULL;
return &TII->get(Node->getMachineOpcode());
}

View File

@ -236,13 +236,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
continue;
}
const TargetInstrDesc &TID = MI->getDesc();
assert(!TID.isTerminator() && !MI->isLabel() &&
const MCInstrDesc &MCID = MI->getDesc();
assert(!MCID.isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
// Create the SUnit for this MI.
SUnit *SU = NewSUnit(MI);
SU->isCall = TID.isCall();
SU->isCommutable = TID.isCommutable();
SU->isCall = MCID.isCall();
SU->isCommutable = MCID.isCommutable();
// Assign the Latency field of SU using target-provided information.
if (UnitLatencies)
@ -309,13 +309,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
if (SpecialAddressLatency != 0 && !UnitLatencies &&
UseSU != &ExitSU) {
MachineInstr *UseMI = UseSU->getInstr();
const TargetInstrDesc &UseTID = UseMI->getDesc();
const MCInstrDesc &UseMCID = UseMI->getDesc();
int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
if (RegUseIndex >= 0 &&
(UseTID.mayLoad() || UseTID.mayStore()) &&
(unsigned)RegUseIndex < UseTID.getNumOperands() &&
UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
(UseMCID.mayLoad() || UseMCID.mayStore()) &&
(unsigned)RegUseIndex < UseMCID.getNumOperands() &&
UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
}
// Adjust the dependence latency using operand def/use
@ -352,17 +352,17 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
unsigned Count = I->second.second;
const MachineInstr *UseMI = UseMO->getParent();
unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
const TargetInstrDesc &UseTID = UseMI->getDesc();
const MCInstrDesc &UseMCID = UseMI->getDesc();
// TODO: If we knew the total depth of the region here, we could
// handle the case where the whole loop is inside the region but
// is large enough that the isScheduleHigh trick isn't needed.
if (UseMOIdx < UseTID.getNumOperands()) {
if (UseMOIdx < UseMCID.getNumOperands()) {
// Currently, we only support scheduling regions consisting of
// single basic blocks. Check to see if the instruction is in
// the same region by checking to see if it has the same parent.
if (UseMI->getParent() != MI->getParent()) {
unsigned Latency = SU->Latency;
if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
Latency += SpecialAddressLatency;
// This is a wild guess as to the portion of the latency which
// will be overlapped by work done outside the current
@ -374,7 +374,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
/*isMustAlias=*/false,
/*isArtificial=*/true));
} else if (SpecialAddressLatency > 0 &&
UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
// The entire loop body is within the current scheduling region
// and the latency of this operation is assumed to be greater
// than the latency of the loop.
@ -417,9 +417,9 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
if (MCID.isCall() || MI->hasUnmodeledSideEffects() ||
(MI->hasVolatileMemoryRef() &&
(!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
(!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
for (std::map<const Value *, SUnit *>::iterator I =
@ -458,7 +458,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
PendingLoads.clear();
AliasMemDefs.clear();
AliasMemUses.clear();
} else if (TID.mayStore()) {
} else if (MCID.mayStore()) {
bool MayAlias = true;
TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
@ -514,7 +514,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
/*isArtificial=*/true));
} else if (TID.mayLoad()) {
} else if (MCID.mayLoad()) {
bool MayAlias = true;
TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {

View File

@ -115,12 +115,12 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
// Use the itinerary for the underlying instruction to check for
// free FU's in the scoreboard at the appropriate future cycles.
const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
if (TID == NULL) {
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
if (MCID == NULL) {
// Don't check hazards for non-machineinstr Nodes.
return NoHazard;
}
unsigned idx = TID->getSchedClass();
unsigned idx = MCID->getSchedClass();
for (const InstrStage *IS = ItinData->beginStage(idx),
*E = ItinData->endStage(idx); IS != E; ++IS) {
// We must find one of the stage's units free for every cycle the
@ -173,16 +173,16 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
// Use the itinerary for the underlying instruction to reserve FU's
// in the scoreboard at the appropriate future cycles.
const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
assert(TID && "The scheduler must filter non-machineinstrs");
if (DAG->TII->isZeroCost(TID->Opcode))
const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
assert(MCID && "The scheduler must filter non-machineinstrs");
if (DAG->TII->isZeroCost(MCID->Opcode))
return;
++IssueCount;
unsigned cycle = 0;
unsigned idx = TID->getSchedClass();
unsigned idx = MCID->getSchedClass();
for (const InstrStage *IS = ItinData->beginStage(idx),
*E = ItinData->endStage(idx); IS != E; ++IS) {
// We must reserve one of the stage's units for every cycle the

View File

@ -569,7 +569,7 @@ bool FastISel::SelectCall(const User *I) {
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
const DbgValueInst *DI = cast<DbgValueInst>(Call);
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
if (!V) {
// Currently the optimizer can produce this; insert an undef to
@ -1112,7 +1112,7 @@ unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass* RC) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
return ResultReg;
@ -1122,7 +1122,7 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1142,7 +1142,7 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1164,7 +1164,7 @@ unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1187,7 +1187,7 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1208,7 +1208,7 @@ unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
uint64_t Imm1, uint64_t Imm2) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1231,7 +1231,7 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
const ConstantFP *FPImm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1253,7 +1253,7 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
unsigned Op1, bool Op1IsKill,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -1275,7 +1275,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
@ -1291,7 +1291,7 @@ unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm1, uint64_t Imm2) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)

View File

@ -106,7 +106,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
continue;
Match = false;
if (User->isMachineOpcode()) {
const TargetInstrDesc &II = TII->get(User->getMachineOpcode());
const MCInstrDesc &II = TII->get(User->getMachineOpcode());
const TargetRegisterClass *RC = 0;
if (i+II.getNumDefs() < II.getNumOperands())
RC = TII->getRegClass(II, i+II.getNumDefs(), TRI);
@ -178,7 +178,7 @@ unsigned InstrEmitter::getDstOfOnlyCopyToRegUse(SDNode *Node,
}
void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
const TargetInstrDesc &II,
const MCInstrDesc &II,
bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap) {
assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
@ -242,7 +242,7 @@ unsigned InstrEmitter::getVR(SDValue Op,
Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
// Add an IMPLICIT_DEF instruction before every use.
unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
// IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
// IMPLICIT_DEF can produce any type of result so its MCInstrDesc
// does not include operand register class info.
if (!VReg) {
const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
@ -265,7 +265,7 @@ unsigned InstrEmitter::getVR(SDValue Op,
void
InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned) {
assert(Op.getValueType() != MVT::Other &&
@ -275,9 +275,9 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned VReg = getVR(Op, VRBaseMap);
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
const TargetInstrDesc &TID = MI->getDesc();
bool isOptDef = IIOpNum < TID.getNumOperands() &&
TID.OpInfo[IIOpNum].isOptionalDef();
const MCInstrDesc &MCID = MI->getDesc();
bool isOptDef = IIOpNum < MCID.getNumOperands() &&
MCID.OpInfo[IIOpNum].isOptionalDef();
// If the instruction requires a register in a different class, create
// a new virtual register and copy the value into it.
@ -286,7 +286,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
const TargetRegisterClass *DstRC = 0;
if (IIOpNum < II->getNumOperands())
DstRC = TII->getRegClass(*II, IIOpNum, TRI);
assert((DstRC || (TID.isVariadic() && IIOpNum >= TID.getNumOperands())) &&
assert((DstRC || (MCID.isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
"Don't have operand info for this instruction!");
if (DstRC && !SrcRC->hasSuperClassEq(DstRC)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
@ -312,7 +312,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
while (Idx > 0 &&
MI->getOperand(Idx-1).isReg() && MI->getOperand(Idx-1).isImplicit())
--Idx;
bool isTied = MI->getDesc().getOperandConstraint(Idx, TOI::TIED_TO) != -1;
bool isTied = MI->getDesc().getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
if (isTied)
isKill = false;
}
@ -330,7 +330,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
/// assertions only.
void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned) {
if (Op.isMachineOpcode()) {
@ -556,7 +556,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
unsigned NumOps = Node->getNumOperands();
assert((NumOps & 1) == 1 &&
"REG_SEQUENCE must have an odd number of operands!");
const TargetInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
for (unsigned i = 1; i != NumOps; ++i) {
SDValue Op = Node->getOperand(i);
if ((i & 1) == 0) {
@ -597,7 +597,7 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
return TII->emitFrameIndexDebugValue(*MF, FrameIx, Offset, MDPtr, DL);
}
// Otherwise, we're going to create an instruction here.
const TargetInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
if (SD->getKind() == SDDbgValue::SDNODE) {
SDNode *Node = SD->getSDNode();
@ -668,7 +668,7 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// We want a unique VR for each IMPLICIT_DEF use.
return;
const TargetInstrDesc &II = TII->get(Opc);
const MCInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NodeOperands = CountOperands(Node);
bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0;
@ -697,9 +697,9 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
else {
// Collect declared implicit uses.
const TargetInstrDesc &TID = TII->get(F->getMachineOpcode());
UsedRegs.append(TID.getImplicitUses(),
TID.getImplicitUses() + TID.getNumImplicitUses());
const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
UsedRegs.append(MCID.getImplicitUses(),
MCID.getImplicitUses() + MCID.getNumImplicitUses());
// In addition to declared implicit uses, we must also check for
// direct RegisterSDNode operands.
for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)

View File

@ -22,7 +22,7 @@
namespace llvm {
class TargetInstrDesc;
class MCInstrDesc;
class SDDbgValue;
class InstrEmitter {
@ -49,7 +49,7 @@ class InstrEmitter {
unsigned ResNo) const;
void CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
const TargetInstrDesc &II,
const MCInstrDesc &II,
bool IsClone, bool IsCloned,
DenseMap<SDValue, unsigned> &VRBaseMap);
@ -63,7 +63,7 @@ class InstrEmitter {
/// not in the required register class.
void AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned);
@ -73,7 +73,7 @@ class InstrEmitter {
/// assertions only.
void AddOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
const MCInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap,
bool IsDebug, bool IsClone, bool IsCloned);

View File

@ -249,14 +249,14 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
NewSU->isTwoAddress = true;
break;
}
}
if (TID.isCommutable())
if (MCID.isCommutable())
NewSU->isCommutable = true;
// LoadNode may already exist. This can happen when there is another
@ -422,10 +422,10 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
/// FIXME: Move to SelectionDAG?
static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = TID.getNumDefs();
for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = MCID.getNumDefs();
for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
@ -505,10 +505,10 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
}
if (!Node->isMachineOpcode())
continue;
const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
if (!TID.ImplicitDefs)
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg) {
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
}

View File

@ -302,7 +302,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
}
unsigned Idx = RegDefPos.GetIdx();
const TargetInstrDesc Desc = TII->get(Opcode);
const MCInstrDesc Desc = TII->get(Opcode);
const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
RegClass = RC->getID();
// FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
@ -837,14 +837,14 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
NewSU->isTwoAddress = true;
break;
}
}
if (TID.isCommutable())
if (MCID.isCommutable())
NewSU->isCommutable = true;
InitNumRegDefsLeft(NewSU);
@ -1024,10 +1024,10 @@ void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
/// FIXME: Move to SelectionDAG?
static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = TID.getNumDefs();
for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = MCID.getNumDefs();
for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
@ -1108,10 +1108,10 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
if (!Node->isMachineOpcode())
continue;
const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
if (!TID.ImplicitDefs)
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
@ -2606,11 +2606,11 @@ void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
if (SU->isTwoAddress) {
unsigned Opc = SU->getNode()->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
unsigned NumRes = TID.getNumDefs();
unsigned NumOps = TID.getNumOperands() - NumRes;
const MCInstrDesc &MCID = TII->get(Opc);
unsigned NumRes = MCID.getNumDefs();
unsigned NumOps = MCID.getNumOperands() - NumRes;
for (unsigned i = 0; i != NumOps; ++i) {
if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
SDNode *DU = SU->getNode()->getOperand(i).getNode();
if (DU->getNodeId() != -1 &&
Op->OrigNode == &(*SUnits)[DU->getNodeId()])
@ -2790,11 +2790,11 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() {
bool isLiveOut = hasOnlyLiveOutUses(SU);
unsigned Opc = Node->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
unsigned NumRes = TID.getNumDefs();
unsigned NumOps = TID.getNumOperands() - NumRes;
const MCInstrDesc &MCID = TII->get(Opc);
unsigned NumRes = MCID.getNumDefs();
unsigned NumOps = MCID.getNumOperands() - NumRes;
for (unsigned j = 0; j != NumOps; ++j) {
if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
continue;
SDNode *DU = SU->getNode()->getOperand(j).getNode();
if (DU->getNodeId() == -1)

View File

@ -111,7 +111,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
unsigned ResNo = User->getOperand(2).getResNo();
if (Def->isMachineOpcode()) {
const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
if (ResNo >= II.getNumDefs() &&
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
PhysReg = Reg;
@ -255,8 +255,8 @@ void ScheduleDAGSDNodes::ClusterNodes() {
continue;
unsigned Opc = Node->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
if (TID.mayLoad())
const MCInstrDesc &MCID = TII->get(Opc);
if (MCID.mayLoad())
// Cluster loads from "near" addresses into combined SUnits.
ClusterNeighboringLoads(Node);
}
@ -390,14 +390,14 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (MainNode->isMachineOpcode()) {
unsigned Opc = MainNode->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
const MCInstrDesc &MCID = TII->get(Opc);
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
SU->isTwoAddress = true;
break;
}
}
if (TID.isCommutable())
if (MCID.isCommutable())
SU->isCommutable = true;
}

View File

@ -354,9 +354,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
const MachineBasicBlock *MBB = I;
for (MachineBasicBlock::const_iterator
II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
const MCInstrDesc &MCID = TM.getInstrInfo()->get(II->getOpcode());
if ((TID.isCall() && !TID.isReturn()) ||
if ((MCID.isCall() && !MCID.isReturn()) ||
II->isStackAligningInlineAsm()) {
MFI->setHasCalls(true);
goto done;
@ -681,7 +681,7 @@ void SelectionDAGISel::PrepareEHLandingPad() {
// landing pad can thus be detected via the MachineModuleInfo.
MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
const MCInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
@ -2613,9 +2613,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (EmitNodeInfo & OPFL_MemRefs) {
// Only attach load or store memory operands if the generated
// instruction may load or store.
const TargetInstrDesc &TID = TM.getInstrInfo()->get(TargetOpc);
bool mayLoad = TID.mayLoad();
bool mayStore = TID.mayStore();
const MCInstrDesc &MCID = TM.getInstrInfo()->get(TargetOpc);
bool mayLoad = MCID.mayLoad();
bool mayStore = MCID.mayStore();
unsigned NumMemRefs = 0;
for (SmallVector<MachineMemOperand*, 2>::const_iterator I =

View File

@ -504,7 +504,7 @@ bool StackSlotColoring::PropagateBackward(MachineBasicBlock::iterator MII,
bool FoundDef = false; // Not counting 2address def.
Uses.clear();
const TargetInstrDesc &TID = MII->getDesc();
const MCInstrDesc &MCID = MII->getDesc();
for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MII->getOperand(i);
if (!MO.isReg())
@ -521,7 +521,7 @@ bool StackSlotColoring::PropagateBackward(MachineBasicBlock::iterator MII,
if (MO.getSubReg() || MII->isSubregToReg())
return false;
const TargetRegisterClass *RC = TII->getRegClass(TID, i, TRI);
const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
if (RC && !RC->contains(NewReg))
return false;
@ -566,7 +566,7 @@ bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII,
SmallVector<MachineOperand*, 4> Uses;
while (++MII != MBB->end()) {
bool FoundKill = false;
const TargetInstrDesc &TID = MII->getDesc();
const MCInstrDesc &MCID = MII->getDesc();
for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MII->getOperand(i);
if (!MO.isReg())
@ -583,7 +583,7 @@ bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII,
if (MO.getSubReg())
return false;
const TargetRegisterClass *RC = TII->getRegClass(TID, i, TRI);
const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
if (RC && !RC->contains(NewReg))
return false;
if (MO.isKill())

View File

@ -529,8 +529,8 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
bool hasIndirectBR = false;
if (PreRegAlloc && !TailBB.empty()) {
const TargetInstrDesc &TID = TailBB.back().getDesc();
if (TID.isIndirectBranch()) {
const MCInstrDesc &MCID = TailBB.back().getDesc();
if (MCID.isIndirectBranch()) {
MaxDuplicateCount = 20;
hasIndirectBR = true;
}

View File

@ -59,8 +59,8 @@ TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
// the two operands returned by findCommutedOpIndices.
MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
const TargetInstrDesc &TID = MI->getDesc();
bool HasDef = TID.getNumDefs();
const MCInstrDesc &MCID = MI->getDesc();
bool HasDef = MCID.getNumDefs();
if (HasDef && !MI->getOperand(0).isReg())
// No idea how to commute this instruction. Target should implement its own.
return 0;
@ -81,7 +81,7 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
bool ChangeReg0 = false;
if (HasDef && MI->getOperand(0).getReg() == Reg1) {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!");
Reg2IsKill = false;
ChangeReg0 = true;
@ -119,12 +119,12 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isCommutable())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isCommutable())
return false;
// This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
// is not true, then the target must implement this.
SrcOpIdx1 = TID.getNumDefs();
SrcOpIdx1 = MCID.getNumDefs();
SrcOpIdx2 = SrcOpIdx1 + 1;
if (!MI->getOperand(SrcOpIdx1).isReg() ||
!MI->getOperand(SrcOpIdx2).isReg())
@ -137,12 +137,12 @@ bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
bool MadeChange = false;
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isPredicable())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isPredicable())
return false;
for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (TID.OpInfo[i].isPredicate()) {
if (MCID.OpInfo[i].isPredicate()) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
MO.setReg(Pred[j].getReg());
@ -332,10 +332,10 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
return true;
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
// Avoid instructions obviously unsafe for remat.
if (TID.isNotDuplicable() || TID.mayStore() ||
if (MCID.isNotDuplicable() || MCID.mayStore() ||
MI->hasUnmodeledSideEffects())
return false;
@ -345,7 +345,7 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
return false;
// Avoid instructions which load from potentially varying memory.
if (TID.mayLoad() && !MI->isInvariantLoad(AA))
if (MCID.mayLoad() && !MI->isInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume

View File

@ -280,8 +280,8 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
/// isTwoAddrUse - Return true if the specified MI is using the specified
/// register as a two-address operand.
static bool isTwoAddrUse(MachineInstr *UseMI, unsigned Reg) {
const TargetInstrDesc &TID = UseMI->getDesc();
for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
const MCInstrDesc &MCID = UseMI->getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
MachineOperand &MO = UseMI->getOperand(i);
if (MO.isReg() && MO.getReg() == Reg &&
(MO.isDef() || UseMI->isRegTiedToDefOperand(i)))
@ -443,8 +443,9 @@ static bool isKilled(MachineInstr &MI, unsigned Reg,
/// isTwoAddrUse - Return true if the specified MI uses the specified register
/// as a two-address use. If so, return the destination register by reference.
static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
const TargetInstrDesc &TID = MI.getDesc();
unsigned NumOps = MI.isInlineAsm() ? MI.getNumOperands():TID.getNumOperands();
const MCInstrDesc &MCID = MI.getDesc();
unsigned NumOps = MI.isInlineAsm()
? MI.getNumOperands() : MCID.getNumOperands();
for (unsigned i = 0; i != NumOps; ++i) {
const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
@ -761,10 +762,10 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI,
static bool isSafeToDelete(MachineInstr *MI,
const TargetInstrInfo *TII,
SmallVector<unsigned, 4> &Kills) {
const TargetInstrDesc &TID = MI->getDesc();
if (TID.mayStore() || TID.isCall())
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.mayStore() || MCID.isCall())
return false;
if (TID.isTerminator() || MI->hasUnmodeledSideEffects())
if (MCID.isTerminator() || MI->hasUnmodeledSideEffects())
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@ -854,7 +855,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineFunction::iterator &mbbi,
unsigned SrcIdx, unsigned DstIdx, unsigned Dist,
SmallPtrSet<MachineInstr*, 8> &Processed) {
const TargetInstrDesc &TID = mi->getDesc();
const MCInstrDesc &MCID = mi->getDesc();
unsigned regA = mi->getOperand(DstIdx).getReg();
unsigned regB = mi->getOperand(SrcIdx).getReg();
@ -876,7 +877,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
unsigned regCIdx = ~0U;
bool TryCommute = false;
bool AggressiveCommute = false;
if (TID.isCommutable() && mi->getNumOperands() >= 3 &&
if (MCID.isCommutable() && mi->getNumOperands() >= 3 &&
TII->findCommutedOpIndices(mi, SrcOp1, SrcOp2)) {
if (SrcIdx == SrcOp1)
regCIdx = SrcOp2;
@ -907,7 +908,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
if (TargetRegisterInfo::isVirtualRegister(regA))
ScanUses(regA, &*mbbi, Processed);
if (TID.isConvertibleTo3Addr()) {
if (MCID.isConvertibleTo3Addr()) {
// This instruction is potentially convertible to a true
// three-address instruction. Check if it is profitable.
if (!regBKilled || isProfitableToConv3Addr(regA, regB)) {
@ -927,7 +928,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// movq (%rax), %rcx
// addq %rdx, %rcx
// because it's preferable to schedule a load than a register copy.
if (TID.mayLoad() && !regBKilled) {
if (MCID.mayLoad() && !regBKilled) {
// Determine if a load can be unfolded.
unsigned LoadRegIndex;
unsigned NewOpc =
@ -936,14 +937,14 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
/*UnfoldStore=*/false,
&LoadRegIndex);
if (NewOpc != 0) {
const TargetInstrDesc &UnfoldTID = TII->get(NewOpc);
if (UnfoldTID.getNumDefs() == 1) {
const MCInstrDesc &UnfoldMCID = TII->get(NewOpc);
if (UnfoldMCID.getNumDefs() == 1) {
MachineFunction &MF = *mbbi->getParent();
// Unfold the load.
DEBUG(dbgs() << "2addr: UNFOLDING: " << *mi);
const TargetRegisterClass *RC =
TII->getRegClass(UnfoldTID, LoadRegIndex, TRI);
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI);
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
if (!TII->unfoldMemoryOperand(MF, mi, Reg,
@ -1067,7 +1068,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
if (mi->isRegSequence())
RegSequences.push_back(&*mi);
const TargetInstrDesc &TID = mi->getDesc();
const MCInstrDesc &MCID = mi->getDesc();
bool FirstTied = true;
DistanceMap.insert(std::make_pair(mi, ++Dist));
@ -1077,7 +1078,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
// First scan through all the tied register uses in this instruction
// and record a list of pairs of tied operands for each register.
unsigned NumOps = mi->isInlineAsm()
? mi->getNumOperands() : TID.getNumOperands();
? mi->getNumOperands() : MCID.getNumOperands();
for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
unsigned DstIdx = 0;
if (!mi->isRegTiedToDefOperand(SrcIdx, &DstIdx))

View File

@ -679,8 +679,8 @@ static void ReMaterialize(MachineBasicBlock &MBB,
VirtRegMap &VRM) {
MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
#ifndef NDEBUG
const TargetInstrDesc &TID = ReMatDefMI->getDesc();
assert(TID.getNumDefs() == 1 &&
const MCInstrDesc &MCID = ReMatDefMI->getDesc();
assert(MCID.getNumDefs() == 1 &&
"Don't know how to remat instructions that define > 1 values!");
#endif
TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
@ -1483,11 +1483,11 @@ OptimizeByUnfold(MachineBasicBlock::iterator &MII,
/// where SrcReg is r1 and it is tied to r0. Return true if after
/// commuting this instruction it will be r0 = op r2, r1.
static bool CommuteChangesDestination(MachineInstr *DefMI,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
unsigned SrcReg,
const TargetInstrInfo *TII,
unsigned &DstIdx) {
if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
if (MCID.getNumDefs() != 1 && MCID.getNumOperands() != 3)
return false;
if (!DefMI->getOperand(1).isReg() ||
DefMI->getOperand(1).getReg() != SrcReg)
@ -1527,11 +1527,11 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
const TargetInstrDesc &TID = DefMI->getDesc();
const MCInstrDesc &MCID = DefMI->getDesc();
unsigned NewDstIdx;
if (DefMII != MBB->begin() &&
TID.isCommutable() &&
CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
MCID.isCommutable() &&
CommuteChangesDestination(DefMI, MCID, SrcReg, TII, NewDstIdx)) {
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
unsigned NewReg = NewDstMO.getReg();
if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
@ -1658,9 +1658,9 @@ SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
/// isSafeToDelete - Return true if this instruction doesn't produce any side
/// effect and all of its defs are dead.
static bool isSafeToDelete(MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
if (TID.mayLoad() || TID.mayStore() || TID.isTerminator() ||
TID.isCall() || TID.isBarrier() || TID.isReturn() ||
const MCInstrDesc &MCID = MI.getDesc();
if (MCID.mayLoad() || MCID.mayStore() || MCID.isTerminator() ||
MCID.isCall() || MCID.isBarrier() || MCID.isReturn() ||
MI.isLabel() || MI.isDebugValue() ||
MI.hasUnmodeledSideEffects())
return false;

View File

@ -136,9 +136,9 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr *UpdateMI = NULL;
MachineInstr *MemMI = NULL;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const TargetInstrDesc &TID = MI->getDesc();
unsigned NumOps = TID.getNumOperands();
bool isLoad = !TID.mayStore();
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
bool isLoad = !MCID.mayStore();
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
const MachineOperand &Base = MI->getOperand(2);
const MachineOperand &Offset = MI->getOperand(NumOps-3);
@ -475,8 +475,8 @@ SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
// FIXME: This confuses implicit_def with optional CPSR def.
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef())
return false;
bool Found = false;
@ -495,11 +495,11 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
/// By default, this returns true for every instruction with a
/// PredicateOperand.
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isPredicable())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isPredicable())
return false;
if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
ARMFunctionInfo *AFI =
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
return AFI->isThumb2Function();
@ -525,8 +525,8 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
// Basic size info comes from the TSFlags field.
const TargetInstrDesc &TID = MI->getDesc();
uint64_t TSFlags = TID.TSFlags;
const MCInstrDesc &MCID = MI->getDesc();
uint64_t TSFlags = MCID.TSFlags;
unsigned Opc = MI->getOpcode();
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
@ -588,9 +588,9 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
// entry is one byte; TBH two byte each.
unsigned EntrySize = (Opc == ARM::t2TBB_JT)
? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
unsigned NumOps = TID.getNumOperands();
unsigned NumOps = MCID.getNumOperands();
MachineOperand JTOP =
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2));
unsigned JTI = JTOP.getIndex();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
assert(MJTI != 0);
@ -1363,7 +1363,7 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARMBaseInstrInfo &TII) {
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = MI.getDesc();
const MCInstrDesc &Desc = MI.getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
bool isSub = false;
@ -1803,7 +1803,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
if (!ItinData || ItinData->isEmpty())
return 1;
const TargetInstrDesc &Desc = MI->getDesc();
const MCInstrDesc &Desc = MI->getDesc();
unsigned Class = Desc.getSchedClass();
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
if (UOps)
@ -1906,10 +1906,10 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
int
ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefClass,
unsigned DefIdx, unsigned DefAlign) const {
int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
if (RegNo <= 0)
// Def is the address writeback.
return ItinData->getOperandCycle(DefClass, DefIdx);
@ -1924,7 +1924,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
DefCycle = RegNo;
bool isSLoad = false;
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLDMSIA:
case ARM::VLDMSIA_UPD:
@ -1947,10 +1947,10 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
int
ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefClass,
unsigned DefIdx, unsigned DefAlign) const {
int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
if (RegNo <= 0)
// Def is the address writeback.
return ItinData->getOperandCycle(DefClass, DefIdx);
@ -1982,10 +1982,10 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
int
ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseClass,
unsigned UseIdx, unsigned UseAlign) const {
int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
if (RegNo <= 0)
return ItinData->getOperandCycle(UseClass, UseIdx);
@ -1999,7 +1999,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
UseCycle = RegNo;
bool isSStore = false;
switch (UseTID.getOpcode()) {
switch (UseMCID.getOpcode()) {
default: break;
case ARM::VSTMSIA:
case ARM::VSTMSIA_UPD:
@ -2022,10 +2022,10 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
int
ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseClass,
unsigned UseIdx, unsigned UseAlign) const {
int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
if (RegNo <= 0)
return ItinData->getOperandCycle(UseClass, UseIdx);
@ -2051,14 +2051,14 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
int
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefIdx, unsigned DefAlign,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const {
unsigned DefClass = DefTID.getSchedClass();
unsigned UseClass = UseTID.getSchedClass();
unsigned DefClass = DefMCID.getSchedClass();
unsigned UseClass = UseMCID.getSchedClass();
if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands())
if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
// This may be a def / use of a variable_ops instruction, the operand
@ -2066,7 +2066,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// figure it out.
int DefCycle = -1;
bool LdmBypass = false;
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default:
DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
break;
@ -2077,7 +2077,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLDMSIA:
case ARM::VLDMSIA_UPD:
case ARM::VLDMSDB_UPD:
DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
break;
case ARM::LDMIA_RET:
@ -2098,7 +2098,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::t2LDMIA_UPD:
case ARM::t2LDMDB_UPD:
LdmBypass = 1;
DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
break;
}
@ -2107,7 +2107,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
DefCycle = 2;
int UseCycle = -1;
switch (UseTID.getOpcode()) {
switch (UseMCID.getOpcode()) {
default:
UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
break;
@ -2118,7 +2118,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VSTMSIA:
case ARM::VSTMSIA_UPD:
case ARM::VSTMSDB_UPD:
UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
break;
case ARM::STMIA:
@ -2137,7 +2137,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::t2STMDB:
case ARM::t2STMIA_UPD:
case ARM::t2STMDB_UPD:
UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
break;
}
@ -2150,7 +2150,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (LdmBypass) {
// It's a variable_ops instruction so we can't use DefIdx here. Just use
// first def operand.
if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1,
if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
UseClass, UseIdx))
--UseCycle;
} else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
@ -2170,11 +2170,11 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
DefMI->isRegSequence() || DefMI->isImplicitDef())
return 1;
const TargetInstrDesc &DefTID = DefMI->getDesc();
const MCInstrDesc &DefMCID = DefMI->getDesc();
if (!ItinData || ItinData->isEmpty())
return DefTID.mayLoad() ? 3 : 1;
return DefMCID.mayLoad() ? 3 : 1;
const TargetInstrDesc &UseTID = UseMI->getDesc();
const MCInstrDesc &UseMCID = UseMI->getDesc();
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
if (DefMO.getReg() == ARM::CPSR) {
if (DefMI->getOpcode() == ARM::FMSTAT) {
@ -2183,7 +2183,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
}
// CPSR set and branch can be paired in the same cycle.
if (UseTID.isBranch())
if (UseMCID.isBranch())
return 0;
}
@ -2191,14 +2191,14 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
? (*DefMI->memoperands_begin())->getAlignment() : 0;
unsigned UseAlign = UseMI->hasOneMemOperand()
? (*UseMI->memoperands_begin())->getAlignment() : 0;
int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
UseTID, UseIdx, UseAlign);
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
UseMCID, UseIdx, UseAlign);
if (Latency > 1 &&
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
@ -2223,7 +2223,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
}
if (DefAlign < 8 && Subtarget.isCortexA9())
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLD1q8:
case ARM::VLD1q16:
@ -2327,37 +2327,37 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (!DefNode->isMachineOpcode())
return 1;
const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode());
const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
if (isZeroCost(DefTID.Opcode))
if (isZeroCost(DefMCID.Opcode))
return 0;
if (!ItinData || ItinData->isEmpty())
return DefTID.mayLoad() ? 3 : 1;
return DefMCID.mayLoad() ? 3 : 1;
if (!UseNode->isMachineOpcode()) {
int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx);
int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
if (Subtarget.isCortexA9())
return Latency <= 2 ? 1 : Latency - 1;
else
return Latency <= 3 ? 1 : Latency - 2;
}
const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode());
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
unsigned DefAlign = !DefMN->memoperands_empty()
? (*DefMN->memoperands_begin())->getAlignment() : 0;
const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
unsigned UseAlign = !UseMN->memoperands_empty()
? (*UseMN->memoperands_begin())->getAlignment() : 0;
int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
UseTID, UseIdx, UseAlign);
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
UseMCID, UseIdx, UseAlign);
if (Latency > 1 &&
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
@ -2384,7 +2384,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
}
if (DefAlign < 8 && Subtarget.isCortexA9())
switch (DefTID.getOpcode()) {
switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLD1q8Pseudo:
case ARM::VLD1q16Pseudo:
@ -2503,10 +2503,10 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
if (!ItinData || ItinData->isEmpty())
return 1;
const TargetInstrDesc &TID = MI->getDesc();
unsigned Class = TID.getSchedClass();
const MCInstrDesc &MCID = MI->getDesc();
unsigned Class = MCID.getSchedClass();
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR))
if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR))
// When predicated, CPSR is an additional source operand for CPSR updating
// instructions, this apparently increases their latencies.
*PredCost = 1;

View File

@ -353,25 +353,25 @@ public:
SDNode *UseNode, unsigned UseIdx) const;
private:
int getVLDMDefCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefClass,
unsigned DefIdx, unsigned DefAlign) const;
int getLDMDefCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefClass,
unsigned DefIdx, unsigned DefAlign) const;
int getVSTMUseCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseClass,
unsigned UseIdx, unsigned UseAlign) const;
int getSTMUseCycle(const InstrItineraryData *ItinData,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseClass,
unsigned UseIdx, unsigned UseAlign) const;
int getOperandLatency(const InstrItineraryData *ItinData,
const TargetInstrDesc &DefTID,
const MCInstrDesc &DefMCID,
unsigned DefIdx, unsigned DefAlign,
const TargetInstrDesc &UseTID,
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
int getInstrLatency(const InstrItineraryData *ItinData,

View File

@ -960,7 +960,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
int64_t ARMBaseRegisterInfo::
getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
const TargetInstrDesc &Desc = MI->getDesc();
const MCInstrDesc &Desc = MI->getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
int64_t InstrOffs = 0;;
int Scale = 1;
@ -1110,11 +1110,11 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
if (Ins != MBB->end())
DL = Ins->getDebugLoc();
const TargetInstrDesc &TID = TII.get(ADDriOpc);
const MCInstrDesc &MCID = TII.get(ADDriOpc);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
MRI.constrainRegClass(BaseReg, TII.getRegClass(TID, 0, this));
MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, TID, BaseReg)
MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
if (!AFI->isThumb1OnlyFunction())
@ -1150,7 +1150,7 @@ ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
int64_t Offset) const {
const TargetInstrDesc &Desc = MI->getDesc();
const MCInstrDesc &Desc = MI->getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
unsigned i = 0;

View File

@ -96,13 +96,13 @@ namespace {
void addPCLabel(unsigned LabelID);
void emitPseudoInstruction(const MachineInstr &MI);
unsigned getMachineSoRegOpValue(const MachineInstr &MI,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
const MachineOperand &MO,
unsigned OpIdx);
unsigned getMachineSoImmOpValue(unsigned SoImm);
unsigned getAddrModeSBit(const MachineInstr &MI,
const TargetInstrDesc &TID) const;
const MCInstrDesc &MCID) const;
void emitDataProcessingInstruction(const MachineInstr &MI,
unsigned ImplicitRd = 0,
@ -443,9 +443,9 @@ unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
else if (MO.isSymbol())
emitExternalSymbolAddress(MO.getSymbolName(), ARM::reloc_arm_branch);
else if (MO.isCPI()) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// For VFP load, the immediate offset is multiplied by 4.
unsigned Reloc = ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm)
unsigned Reloc = ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm)
? ARM::reloc_arm_vfp_cp_entry : ARM::reloc_arm_cp_entry;
emitConstPoolAddress(MO.getIndex(), Reloc);
} else if (MO.isJTI())
@ -757,7 +757,7 @@ void ARMCodeEmitter::emitMOVi2piecesInstruction(const MachineInstr &MI) {
void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
// It's basically add r, pc, (LJTI - $+8)
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Emit the 'add' instruction.
unsigned Binary = 0x4 << 21; // add: Insts{24-21} = 0b0100
@ -766,7 +766,7 @@ void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
// Encode S bit if MI modifies CPSR.
Binary |= getAddrModeSBit(MI, TID);
Binary |= getAddrModeSBit(MI, MCID);
// Encode Rd.
Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
@ -912,7 +912,7 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
}
unsigned ARMCodeEmitter::getMachineSoRegOpValue(const MachineInstr &MI,
const TargetInstrDesc &TID,
const MCInstrDesc &MCID,
const MachineOperand &MO,
unsigned OpIdx) {
unsigned Binary = getMachineOpValue(MI, MO);
@ -982,8 +982,8 @@ unsigned ARMCodeEmitter::getMachineSoImmOpValue(unsigned SoImm) {
}
unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI,
const TargetInstrDesc &TID) const {
for (unsigned i = MI.getNumOperands(), e = TID.getNumOperands(); i >= e; --i){
const MCInstrDesc &MCID) const {
for (unsigned i = MI.getNumOperands(), e = MCID.getNumOperands(); i >= e; --i){
const MachineOperand &MO = MI.getOperand(i-1);
if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)
return 1 << ARMII::S_BitShift;
@ -994,7 +994,7 @@ unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI,
void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
unsigned ImplicitRd,
unsigned ImplicitRn) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1003,10 +1003,10 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
// Encode S bit if MI modifies CPSR.
Binary |= getAddrModeSBit(MI, TID);
Binary |= getAddrModeSBit(MI, MCID);
// Encode register def if there is one.
unsigned NumDefs = TID.getNumDefs();
unsigned NumDefs = MCID.getNumDefs();
unsigned OpIdx = 0;
if (NumDefs)
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
@ -1014,7 +1014,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
// Special handling for implicit use (e.g. PC).
Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
if (TID.Opcode == ARM::MOVi16) {
if (MCID.Opcode == ARM::MOVi16) {
// Get immediate from MI.
unsigned Lo16 = getMovi32Value(MI, MI.getOperand(OpIdx),
ARM::reloc_arm_movw);
@ -1023,14 +1023,14 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
Binary |= ((Lo16 >> 12) & 0xF) << 16;
emitWordLE(Binary);
return;
} else if(TID.Opcode == ARM::MOVTi16) {
} else if(MCID.Opcode == ARM::MOVTi16) {
unsigned Hi16 = (getMovi32Value(MI, MI.getOperand(OpIdx),
ARM::reloc_arm_movt) >> 16);
Binary |= Hi16 & 0xFFF;
Binary |= ((Hi16 >> 12) & 0xF) << 16;
emitWordLE(Binary);
return;
} else if ((TID.Opcode == ARM::BFC) || (TID.Opcode == ARM::BFI)) {
} else if ((MCID.Opcode == ARM::BFC) || (MCID.Opcode == ARM::BFI)) {
uint32_t v = ~MI.getOperand(2).getImm();
int32_t lsb = CountTrailingZeros_32(v);
int32_t msb = (32 - CountLeadingZeros_32(v)) - 1;
@ -1039,7 +1039,7 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
Binary |= (lsb & 0x1F) << 7;
emitWordLE(Binary);
return;
} else if ((TID.Opcode == ARM::UBFX) || (TID.Opcode == ARM::SBFX)) {
} else if ((MCID.Opcode == ARM::UBFX) || (MCID.Opcode == ARM::SBFX)) {
// Encode Rn in Instr{0-3}
Binary |= getMachineOpValue(MI, OpIdx++);
@ -1054,11 +1054,11 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
}
// If this is a two-address operand, skip it. e.g. MOVCCr operand 1.
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
// Encode first non-shifter register operand if there is one.
bool isUnary = TID.TSFlags & ARMII::UnaryDP;
bool isUnary = MCID.TSFlags & ARMII::UnaryDP;
if (!isUnary) {
if (ImplicitRn)
// Special handling for implicit use (e.g. PC).
@ -1071,9 +1071,9 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
// Encode shifter operand.
const MachineOperand &MO = MI.getOperand(OpIdx);
if ((TID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) {
if ((MCID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) {
// Encode SoReg.
emitWordLE(Binary | getMachineSoRegOpValue(MI, TID, MO, OpIdx));
emitWordLE(Binary | getMachineSoRegOpValue(MI, MCID, MO, OpIdx));
return;
}
@ -1092,9 +1092,9 @@ void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
unsigned ImplicitRd,
unsigned ImplicitRn) {
const TargetInstrDesc &TID = MI.getDesc();
unsigned Form = TID.TSFlags & ARMII::FormMask;
bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
const MCInstrDesc &MCID = MI.getDesc();
unsigned Form = MCID.TSFlags & ARMII::FormMask;
bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1134,7 +1134,7 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
// If this is a two-address operand, skip it. e.g. LDR_PRE.
if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
const MachineOperand &MO2 = MI.getOperand(OpIdx);
@ -1170,9 +1170,9 @@ void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
unsigned ImplicitRn) {
const TargetInstrDesc &TID = MI.getDesc();
unsigned Form = TID.TSFlags & ARMII::FormMask;
bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
const MCInstrDesc &MCID = MI.getDesc();
unsigned Form = MCID.TSFlags & ARMII::FormMask;
bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1194,7 +1194,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
// Skip LDRD and STRD's second operand.
if (TID.Opcode == ARM::LDRD || TID.Opcode == ARM::STRD)
if (MCID.Opcode == ARM::LDRD || MCID.Opcode == ARM::STRD)
++OpIdx;
// Set second operand
@ -1205,7 +1205,7 @@ void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
// If this is a two-address operand, skip it. e.g. LDRH_POST.
if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
const MachineOperand &MO2 = MI.getOperand(OpIdx);
@ -1255,8 +1255,8 @@ static unsigned getAddrModeUPBits(unsigned Mode) {
}
void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0;
const MCInstrDesc &MCID = MI.getDesc();
bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1295,7 +1295,7 @@ void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1304,12 +1304,12 @@ void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) {
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
// Encode S bit if MI modifies CPSR.
Binary |= getAddrModeSBit(MI, TID);
Binary |= getAddrModeSBit(MI, MCID);
// 32x32->64bit operations have two destination registers. The number
// of register definitions will tell us if that's what we're dealing with.
unsigned OpIdx = 0;
if (TID.getNumDefs() == 2)
if (MCID.getNumDefs() == 2)
Binary |= getMachineOpValue (MI, OpIdx++) << ARMII::RegRdLoShift;
// Encode Rd
@ -1323,16 +1323,16 @@ void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) {
// Many multiple instructions (e.g. MLA) have three src operands. Encode
// it as Rn (for multiply, that's in the same offset as RdLo.
if (TID.getNumOperands() > OpIdx &&
!TID.OpInfo[OpIdx].isPredicate() &&
!TID.OpInfo[OpIdx].isOptionalDef())
if (MCID.getNumOperands() > OpIdx &&
!MCID.OpInfo[OpIdx].isPredicate() &&
!MCID.OpInfo[OpIdx].isOptionalDef())
Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRdLoShift;
emitWordLE(Binary);
}
void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1361,15 +1361,15 @@ void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) {
// Encode rot imm (0, 8, 16, or 24) if it has a rotate immediate operand.
if (MI.getOperand(OpIdx).isImm() &&
!TID.OpInfo[OpIdx].isPredicate() &&
!TID.OpInfo[OpIdx].isOptionalDef())
!MCID.OpInfo[OpIdx].isPredicate() &&
!MCID.OpInfo[OpIdx].isOptionalDef())
Binary |= (getMachineOpValue(MI, OpIdx) / 8) << ARMII::ExtRotImmShift;
emitWordLE(Binary);
}
void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1378,7 +1378,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
// PKH instructions are finished at this point
if (TID.Opcode == ARM::PKHBT || TID.Opcode == ARM::PKHTB) {
if (MCID.Opcode == ARM::PKHBT || MCID.Opcode == ARM::PKHTB) {
emitWordLE(Binary);
return;
}
@ -1389,9 +1389,9 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
const MachineOperand &MO = MI.getOperand(OpIdx++);
if (OpIdx == TID.getNumOperands() ||
TID.OpInfo[OpIdx].isPredicate() ||
TID.OpInfo[OpIdx].isOptionalDef()) {
if (OpIdx == MCID.getNumOperands() ||
MCID.OpInfo[OpIdx].isPredicate() ||
MCID.OpInfo[OpIdx].isOptionalDef()) {
// Encode Rm and it's done.
Binary |= getMachineOpValue(MI, MO);
emitWordLE(Binary);
@ -1406,7 +1406,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
// Encode shift_imm.
unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
if (TID.Opcode == ARM::PKHTB) {
if (MCID.Opcode == ARM::PKHTB) {
assert(ShiftAmt != 0 && "PKHTB shift_imm is 0!");
if (ShiftAmt == 32)
ShiftAmt = 0;
@ -1418,7 +1418,7 @@ void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGen.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1431,11 +1431,11 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) {
// Encode saturate bit position.
unsigned Pos = MI.getOperand(1).getImm();
if (TID.Opcode == ARM::SSAT || TID.Opcode == ARM::SSAT16)
if (MCID.Opcode == ARM::SSAT || MCID.Opcode == ARM::SSAT16)
Pos -= 1;
assert((Pos < 16 || (Pos < 32 &&
TID.Opcode != ARM::SSAT16 &&
TID.Opcode != ARM::USAT16)) &&
MCID.Opcode != ARM::SSAT16 &&
MCID.Opcode != ARM::USAT16)) &&
"saturate bit position out of range");
Binary |= Pos << 16;
@ -1443,7 +1443,7 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) {
Binary |= getMachineOpValue(MI, 2);
// Encode shift_imm.
if (TID.getNumOperands() == 4) {
if (MCID.getNumOperands() == 4) {
unsigned ShiftOp = MI.getOperand(3).getImm();
ARM_AM::ShiftOpc Opc = ARM_AM::getSORegShOp(ShiftOp);
if (Opc == ARM_AM::asr)
@ -1459,9 +1459,9 @@ void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitBranchInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
if (TID.Opcode == ARM::TPsoft) {
if (MCID.Opcode == ARM::TPsoft) {
llvm_unreachable("ARM::TPsoft FIXME"); // FIXME
}
@ -1498,20 +1498,20 @@ void ARMCodeEmitter::emitInlineJumpTable(unsigned JTIndex) {
}
void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Handle jump tables.
if (TID.Opcode == ARM::BR_JTr || TID.Opcode == ARM::BR_JTadd) {
if (MCID.Opcode == ARM::BR_JTr || MCID.Opcode == ARM::BR_JTadd) {
// First emit a ldr pc, [] instruction.
emitDataProcessingInstruction(MI, ARM::PC);
// Then emit the inline jump table.
unsigned JTIndex =
(TID.Opcode == ARM::BR_JTr)
(MCID.Opcode == ARM::BR_JTr)
? MI.getOperand(1).getIndex() : MI.getOperand(2).getIndex();
emitInlineJumpTable(JTIndex);
return;
} else if (TID.Opcode == ARM::BR_JTm) {
} else if (MCID.Opcode == ARM::BR_JTm) {
// First emit a ldr pc, [] instruction.
emitLoadStoreInstruction(MI, ARM::PC);
@ -1526,7 +1526,7 @@ void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
// Set the conditional execution predicate
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
if (TID.Opcode == ARM::BX_RET || TID.Opcode == ARM::MOVPCLR)
if (MCID.Opcode == ARM::BX_RET || MCID.Opcode == ARM::MOVPCLR)
// The return register is LR.
Binary |= getARMRegisterNumbering(ARM::LR);
else
@ -1579,7 +1579,7 @@ static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) {
}
void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1596,16 +1596,16 @@ void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) {
Binary |= encodeVFPRd(MI, OpIdx++);
// If this is a two-address operand, skip it, e.g. FMACD.
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
// Encode Dn / Sn.
if ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm)
if ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm)
Binary |= encodeVFPRn(MI, OpIdx++);
if (OpIdx == TID.getNumOperands() ||
TID.OpInfo[OpIdx].isPredicate() ||
TID.OpInfo[OpIdx].isOptionalDef()) {
if (OpIdx == MCID.getNumOperands() ||
MCID.OpInfo[OpIdx].isPredicate() ||
MCID.OpInfo[OpIdx].isOptionalDef()) {
// FCMPEZD etc. has only one operand.
emitWordLE(Binary);
return;
@ -1618,8 +1618,8 @@ void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitVFPConversionInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
unsigned Form = TID.TSFlags & ARMII::FormMask;
const MCInstrDesc &MCID = MI.getDesc();
unsigned Form = MCID.TSFlags & ARMII::FormMask;
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1709,8 +1709,8 @@ void ARMCodeEmitter::emitVFPLoadStoreInstruction(const MachineInstr &MI) {
void
ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0;
const MCInstrDesc &MCID = MI.getDesc();
bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
// Part of binary is determined by TableGn.
unsigned Binary = getBinaryCodeForInstr(MI);
@ -1795,8 +1795,8 @@ void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) {
unsigned Binary = getBinaryCodeForInstr(MI);
unsigned RegTOpIdx, RegNOpIdx, LnOpIdx;
const TargetInstrDesc &TID = MI.getDesc();
if ((TID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
const MCInstrDesc &MCID = MI.getDesc();
if ((MCID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
RegTOpIdx = 0;
RegNOpIdx = 1;
LnOpIdx = 2;
@ -1863,12 +1863,12 @@ void ARMCodeEmitter::emitNEON1RegModImmInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
unsigned Binary = getBinaryCodeForInstr(MI);
// Destination register is encoded in Dd; source register in Dm.
unsigned OpIdx = 0;
Binary |= encodeNEONRd(MI, OpIdx++);
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
Binary |= encodeNEONRm(MI, OpIdx);
if (IsThumb)
@ -1878,15 +1878,15 @@ void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) {
}
void ARMCodeEmitter::emitNEON3RegInstruction(const MachineInstr &MI) {
const TargetInstrDesc &TID = MI.getDesc();
const MCInstrDesc &MCID = MI.getDesc();
unsigned Binary = getBinaryCodeForInstr(MI);
// Destination register is encoded in Dd; source registers in Dn and Dm.
unsigned OpIdx = 0;
Binary |= encodeNEONRd(MI, OpIdx++);
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
Binary |= encodeNEONRn(MI, OpIdx++);
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
++OpIdx;
Binary |= encodeNEONRm(MI, OpIdx);
if (IsThumb)

View File

@ -1692,9 +1692,9 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
MachineInstr *MI = T2JumpTables[i];
const TargetInstrDesc &TID = MI->getDesc();
unsigned NumOps = TID.getNumOperands();
unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
MachineOperand JTOP = MI->getOperand(JTOpIdx);
unsigned JTI = JTOP.getIndex();
assert(JTI < JT.size());
@ -1815,9 +1815,9 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
MachineInstr *MI = T2JumpTables[i];
const TargetInstrDesc &TID = MI->getDesc();
unsigned NumOps = TID.getNumOperands();
unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
MachineOperand JTOP = MI->getOperand(JTOpIdx);
unsigned JTI = JTOP.getIndex();
assert(JTI < JT.size());

View File

@ -68,7 +68,7 @@ namespace {
void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
MachineInstrBuilder &UseMI,
MachineInstrBuilder &DefMI) {
const TargetInstrDesc &Desc = OldMI.getDesc();
const MCInstrDesc &Desc = OldMI.getDesc();
for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands();
i != e; ++i) {
const MachineOperand &MO = OldMI.getOperand(i);

View File

@ -219,8 +219,8 @@ class ARMFastISel : public FastISel {
// we don't care about implicit defs here, just places we'll need to add a
// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.hasOptionalDef())
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.hasOptionalDef())
return false;
// Look to see if our OptionalDef is defining CPSR or CCR.
@ -234,15 +234,15 @@ bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
}
bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
// If we're a thumb2 or not NEON function we were handled via isPredicable.
if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
AFI->isThumb2Function())
return false;
for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i)
if (TID.OpInfo[i].isPredicate())
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
if (MCID.OpInfo[i].isPredicate())
return true;
return false;
@ -278,7 +278,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass* RC) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
return ResultReg;
@ -288,7 +288,7 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -308,7 +308,7 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -331,7 +331,7 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -355,7 +355,7 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -377,7 +377,7 @@ unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
unsigned Op0, bool Op0IsKill,
const ConstantFP *FPImm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -400,7 +400,7 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
unsigned Op1, bool Op1IsKill,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -423,7 +423,7 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@ -442,7 +442,7 @@ unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm1, uint64_t Imm2) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)

View File

@ -19,11 +19,11 @@ using namespace llvm;
static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI,
const TargetRegisterInfo &TRI) {
// FIXME: Detect integer instructions properly.
const TargetInstrDesc &TID = MI->getDesc();
unsigned Domain = TID.TSFlags & ARMII::DomainMask;
if (TID.mayStore())
const MCInstrDesc &MCID = MI->getDesc();
unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
if (MCID.mayStore())
return false;
unsigned Opcode = TID.getOpcode();
unsigned Opcode = MCID.getOpcode();
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
return false;
if ((Domain & ARMII::DomainVFP) || (Domain & ARMII::DomainNEON))
@ -43,15 +43,15 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
// Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
// a VMLA / VMLS will cause 4 cycle stall.
const TargetInstrDesc &TID = MI->getDesc();
if (LastMI && (TID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
const MCInstrDesc &MCID = MI->getDesc();
if (LastMI && (MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
MachineInstr *DefMI = LastMI;
const TargetInstrDesc &LastTID = LastMI->getDesc();
const MCInstrDesc &LastMCID = LastMI->getDesc();
// Skip over one non-VFP / NEON instruction.
if (!LastTID.isBarrier() &&
if (!LastMCID.isBarrier() &&
// On A9, AGU and NEON/FPU are muxed.
!(STI.isCortexA9() && (LastTID.mayLoad() || LastTID.mayStore())) &&
(LastTID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
!(STI.isCortexA9() && (LastMCID.mayLoad() || LastMCID.mayStore())) &&
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
I = llvm::prior(I);

View File

@ -329,10 +329,10 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (Use->getOpcode() == ISD::CopyToReg)
return true;
if (Use->isMachineOpcode()) {
const TargetInstrDesc &TID = TII->get(Use->getMachineOpcode());
if (TID.mayStore())
const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
if (MCID.mayStore())
return true;
unsigned Opcode = TID.getOpcode();
unsigned Opcode = MCID.getOpcode();
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
return true;
// vmlx feeding into another vmlx. We actually want to unfold

View File

@ -977,12 +977,12 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
// Load are scheduled for latency even if there instruction itinerary
// is not available.
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
if (TID.getNumDefs() == 0)
if (MCID.getNumDefs() == 0)
return Sched::RegPressure;
if (!Itins->isEmpty() &&
Itins->getOperandCycle(TID.getSchedClass(), 0) > 2)
Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
return Sched::Latency;
return Sched::RegPressure;

View File

@ -1461,19 +1461,19 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
while (++I != E) {
if (I->isDebugValue() || MemOps.count(&*I))
continue;
const TargetInstrDesc &TID = I->getDesc();
if (TID.isCall() || TID.isTerminator() || I->hasUnmodeledSideEffects())
const MCInstrDesc &MCID = I->getDesc();
if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects())
return false;
if (isLd && TID.mayStore())
if (isLd && MCID.mayStore())
return false;
if (!isLd) {
if (TID.mayLoad())
if (MCID.mayLoad())
return false;
// It's not safe to move the first 'str' down.
// str r1, [r0]
// strh r5, [r0]
// str r4, [r0, #+4]
if (TID.mayStore())
if (MCID.mayStore())
return false;
}
for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
@ -1672,14 +1672,14 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
Ops.pop_back();
Ops.pop_back();
const TargetInstrDesc &TID = TII->get(NewOpc);
const TargetRegisterClass *TRC = TII->getRegClass(TID, 0, TRI);
const MCInstrDesc &MCID = TII->get(NewOpc);
const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
MRI->constrainRegClass(EvenReg, TRC);
MRI->constrainRegClass(OddReg, TRC);
// Form the pair instruction.
if (isLd) {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID)
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
.addReg(EvenReg, RegState::Define)
.addReg(OddReg, RegState::Define)
.addReg(BaseReg);
@ -1691,7 +1691,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
++NumLDRDFormed;
} else {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID)
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
.addReg(EvenReg)
.addReg(OddReg)
.addReg(BaseReg);
@ -1742,8 +1742,8 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
while (MBBI != E) {
for (; MBBI != E; ++MBBI) {
MachineInstr *MI = MBBI;
const TargetInstrDesc &TID = MI->getDesc();
if (TID.isCall() || TID.isTerminator()) {
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.isCall() || MCID.isTerminator()) {
// Stop at barriers.
++MBBI;
break;

View File

@ -1274,7 +1274,7 @@ void ARMMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
// Pseudo instructions don't get encoded.
const TargetInstrDesc &Desc = TII.get(MI.getOpcode());
const MCInstrDesc &Desc = TII.get(MI.getOpcode());
uint64_t TSFlags = Desc.TSFlags;
if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo)
return;

View File

@ -24,8 +24,8 @@
//#define DEBUG(X) do { X; } while (0)
/// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
/// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
/// describing the operand info for each ARMInsts[i].
/// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the
/// operand info for each ARMInsts[i].
///
/// Together with an instruction's encoding format, we can take advantage of the
/// NumOperands and the OpInfo fields of the target instruction description in
@ -46,10 +46,10 @@
/// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
/// }
///
/// which is manifested by the TargetOperandInfo[] of:
/// which is manifested by the MCOperandInfo[] of:
///
/// { 0, 0|(1<<TOI::Predicate), 0 },
/// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
/// { 0, 0|(1<<MCOI::Predicate), 0 },
/// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 }
///
/// So the first predicate MCOperand corresponds to the immediate part of the
/// ARM condition field (Inst{31-28}), and the second predicate MCOperand
@ -66,9 +66,9 @@
/// dag DefaultOps = (ops (i32 zero_reg));
/// }
///
/// which is manifested by the one TargetOperandInfo of:
/// which is manifested by the one MCOperandInfo of:
///
/// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
/// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 }
///
/// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
#include "ARMGenInstrInfo.inc"
@ -588,9 +588,9 @@ static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
unsigned short NumDefs = MCID.getNumDefs();
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -739,9 +739,9 @@ static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
if (PW) {
MI.addOperand(MCOperand::CreateReg(0));
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
const TargetInstrDesc &TID = ARMInsts[Opcode];
const MCInstrDesc &MCID = ARMInsts[Opcode];
unsigned IndexMode =
(TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
(MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
ARM_AM::no_shift, IndexMode);
MI.addOperand(MCOperand::CreateImm(Offset));
@ -802,7 +802,7 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
if (CoprocessorOpcode(Opcode))
return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
// MRS and MRSsys take one GPR reg Rd.
@ -901,7 +901,7 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
unsigned &OpIdx = NumOpsAdded;
@ -976,10 +976,10 @@ static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
bool isUnary = isUnaryDP(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
unsigned short NumDefs = MCID.getNumDefs();
bool isUnary = isUnaryDP(MCID.TSFlags);
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1041,7 +1041,7 @@ static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
}
@ -1089,10 +1089,10 @@ static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
bool isUnary = isUnaryDP(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
unsigned short NumDefs = MCID.getNumDefs();
bool isUnary = isUnaryDP(MCID.TSFlags);
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1118,7 +1118,7 @@ static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
}
@ -1244,17 +1244,17 @@ static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBac
static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
bool isPrePost = isPrePostLdSt(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
bool isPrePost = isPrePostLdSt(MCID.TSFlags);
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
assert(((!isStore && TID.getNumDefs() > 0) ||
(isStore && (TID.getNumDefs() == 0 || isPrePost)))
assert(((!isStore && MCID.getNumDefs() > 0) ||
(isStore && (MCID.getNumDefs() == 0 || isPrePost)))
&& "Invalid arguments");
// Operand 0 of a pre- and post-indexed store is the address base writeback.
@ -1291,7 +1291,7 @@ static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
"Reg operand expected");
assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
&& "Index mode or tied_to operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRn(insn))));
@ -1308,7 +1308,7 @@ static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
unsigned IndexMode =
(TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
(MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
if (getIBit(insn) == 0) {
// For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
// Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
@ -1379,17 +1379,17 @@ static bool HasDualReg(unsigned Opcode) {
static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
bool isPrePost = isPrePostLdSt(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
bool isPrePost = isPrePostLdSt(MCID.TSFlags);
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
assert(((!isStore && TID.getNumDefs() > 0) ||
(isStore && (TID.getNumDefs() == 0 || isPrePost)))
assert(((!isStore && MCID.getNumDefs() > 0) ||
(isStore && (MCID.getNumDefs() == 0 || isPrePost)))
&& "Invalid arguments");
// Operand 0 of a pre- and post-indexed store is the address base writeback.
@ -1433,7 +1433,7 @@ static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
"Reg operand expected");
assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
&& "Offset mode or tied_to operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRn(insn))));
@ -1451,7 +1451,7 @@ static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
unsigned IndexMode =
(TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
(MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
if (getAM3IBit(insn) == 1) {
MI.addOperand(MCOperand::CreateReg(0));
@ -1539,7 +1539,7 @@ static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
unsigned &OpIdx = NumOpsAdded;
@ -1591,7 +1591,7 @@ static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1653,8 +1653,8 @@ static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
return false;
const TargetInstrDesc &TID = ARMInsts[Opcode];
NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
const MCInstrDesc &MCID = ARMInsts[Opcode];
NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
// Disassemble register def.
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@ -1696,7 +1696,7 @@ static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
return false;
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1802,7 +1802,7 @@ static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1842,8 +1842,8 @@ static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1858,7 +1858,7 @@ static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
++OpIdx;
// Skip tied_to operand constraint.
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
assert(NumOps >= 4 && "Expect >=4 operands");
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
@ -1886,8 +1886,8 @@ static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
@ -1903,7 +1903,7 @@ static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
getRegisterEnum(B, RegClassID,
decodeVFPRd(insn, SP))));
assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
"Tied to operand expected");
MI.addOperand(MI.getOperand(0));
@ -1961,7 +1961,7 @@ static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@ -2011,7 +2011,7 @@ static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -2136,7 +2136,7 @@ static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -2402,8 +2402,8 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
unsigned alignment, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
// At least one DPR register plus addressing mode #6.
assert(NumOps >= 3 && "Expect >= 3 operands");
@ -2507,7 +2507,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
}
while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 &&
"Tied to operand expected");
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
@ -2757,8 +2757,8 @@ static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
@ -2848,8 +2848,8 @@ enum N2VFlag {
static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opc];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opc];
const MCOperandInfo *OpInfo = MCID.OpInfo;
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
@ -2878,7 +2878,7 @@ static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
++OpIdx;
// VPADAL...
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
// TIED_TO operand.
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
@ -2892,7 +2892,7 @@ static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
// VZIP and others have two TIED_TO reg operands.
int Idx;
while (OpIdx < NumOps &&
(Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
(Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// Add TIED_TO operand.
MI.addOperand(MI.getOperand(Idx));
++OpIdx;
@ -2945,8 +2945,8 @@ static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
assert(NumOps >= 3 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
@ -2964,7 +2964,7 @@ static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
decodeNEONRd(insn))));
++OpIdx;
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
// TIED_TO operand.
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
@ -3044,8 +3044,8 @@ enum N3VFlag {
static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
// No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
assert(NumOps >= 3 &&
@ -3076,7 +3076,7 @@ static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
++OpIdx;
// VABA, VABAL, VBSLd, VBSLq, ...
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
// TIED_TO operand.
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
@ -3163,8 +3163,8 @@ static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 3 &&
@ -3192,7 +3192,7 @@ static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// Process tied_to operand constraint.
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
MI.addOperand(MI.getOperand(Idx));
++OpIdx;
}
@ -3221,11 +3221,11 @@ static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
OpInfo[2].RegClass < 0 &&
@ -3255,14 +3255,14 @@ static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
if (!OpInfo) return false;
assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::DPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
OpInfo[2].RegClass == ARM::GPRRegClassID &&
OpInfo[3].RegClass < 0 &&
"Expect >= 3 operands with one dst operand");
@ -3294,7 +3294,7 @@ static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
@ -3604,11 +3604,11 @@ bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
assert(NumOpsRemaining > 0 && "Invalid argument");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned Idx = MI.getNumOperands();
// First, we check whether this instr specifies the PredicateOperand through
// a pair of TargetOperandInfos with isPredicate() property.
// a pair of MCOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
OpInfo[Idx].RegClass < 0 &&
@ -3636,13 +3636,13 @@ bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
assert(NumOpsRemaining > 0 && "Invalid argument");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const std::string &Name = ARMInsts[Opcode].Name;
unsigned Idx = MI.getNumOperands();
uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
// First, we check whether this instr specifies the PredicateOperand through
// a pair of TargetOperandInfos with isPredicate() property.
// a pair of MCOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
OpInfo[Idx].RegClass < 0 &&

View File

@ -350,7 +350,7 @@ static inline unsigned decodeRotate(uint32_t insn) {
static bool DisassembleThumb1General(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -425,8 +425,8 @@ static bool DisassembleThumb1General(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb1DP(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -454,7 +454,7 @@ static bool DisassembleThumb1DP(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::tGPRRegClassID
&& "Thumb reg operand expected");
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// The reg operand is tied to the first reg operand.
MI.addOperand(MI.getOperand(Idx));
++OpIdx;
@ -511,8 +511,8 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn,
return true;
}
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -530,7 +530,7 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(OpIdx < NumOps && "More operands expected");
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// The reg operand is tied to the first reg operand.
MI.addOperand(MI.getOperand(Idx));
++OpIdx;
@ -554,7 +554,7 @@ static bool DisassembleThumb1Special(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
@ -602,7 +602,7 @@ static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 2 &&
@ -630,8 +630,8 @@ static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
static bool DisassembleThumb1LdSt(unsigned opA, MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
assert(NumOps >= 2
@ -680,7 +680,7 @@ static bool DisassembleThumb1LdStSP(MCInst &MI, unsigned Opcode, uint32_t insn,
assert((Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
&& "Unexpected opcode");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 3 &&
@ -708,7 +708,7 @@ static bool DisassembleThumb1AddPCi(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(Opcode == ARM::tADDrPCi && "Unexpected opcode");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
@ -733,7 +733,7 @@ static bool DisassembleThumb1AddSPi(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(Opcode == ARM::tADDrSPi && "Unexpected opcode");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 3 &&
@ -810,7 +810,7 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn,
if (Opcode == ARM::tPUSH || Opcode == ARM::tPOP)
return DisassembleThumb1PushPop(MI, Opcode, insn, NumOps, NumOpsAdded, B);
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
// Predicate operands are handled elsewhere.
if (NumOps == 2 &&
@ -958,7 +958,7 @@ static bool DisassembleThumb1CondBr(MCInst &MI, unsigned Opcode, uint32_t insn,
if (Opcode == ARM::tTRAP)
return true;
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps == 3 && OpInfo[0].RegClass < 0 &&
@ -989,7 +989,7 @@ static bool DisassembleThumb1CondBr(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb1Br(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps == 1 && OpInfo[0].RegClass < 0 && "1 imm operand expected");
@ -1226,7 +1226,7 @@ static bool DisassembleThumb2LdStMul(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2LdStEx(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
unsigned &OpIdx = NumOpsAdded;
@ -1316,7 +1316,7 @@ static bool DisassembleThumb2LdStEx(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2LdStDual(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
assert(NumOps >= 4
@ -1423,8 +1423,8 @@ static inline bool Thumb2ShiftOpcode(unsigned Opcode) {
static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
// Special case handling.
@ -1467,7 +1467,7 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
if (ThreeReg) {
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// Process tied_to operand constraint.
MI.addOperand(MI.getOperand(Idx));
++OpIdx;
@ -1521,8 +1521,8 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1550,7 +1550,7 @@ static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode,
return false;
}
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// The reg operand is tied to the first reg operand.
MI.addOperand(MI.getOperand(Idx));
} else {
@ -1590,8 +1590,8 @@ static inline bool Thumb2SaturateOpcode(unsigned Opcode) {
/// o t2SSAT16, t2USAT16: Rs sat_pos Rn
static bool DisassembleThumb2Sat(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
const MCInstrDesc &MCID = ARMInsts[Opcode];
NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
// Disassemble the register def.
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::rGPRRegClassID,
@ -1635,8 +1635,8 @@ static bool DisassembleThumb2Sat(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2DPBinImm(MCInst &MI, unsigned Opcode,
uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -1659,7 +1659,7 @@ static bool DisassembleThumb2DPBinImm(MCInst &MI, unsigned Opcode,
if (TwoReg) {
assert(NumOps >= 3 && "Expect >= 3 operands");
int Idx;
if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
// Process tied_to operand constraint.
MI.addOperand(MI.getOperand(Idx));
} else {
@ -1907,8 +1907,8 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn,
// t2PLDs: Rn Rm imm2=Inst{5-4}
// Same pattern applies for t2PLDW* and t2PLI*.
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -2073,8 +2073,8 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
// See, for example, A6.3.7 Load word: Table A6-18 Load word.
if (Load && Rn == 15)
return DisassembleThumb2Ldpci(MI, Opcode, insn, NumOps, NumOpsAdded, B);
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -2085,7 +2085,7 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
"Expect >= 3 operands and first two as reg operands");
bool ThreeReg = (OpInfo[2].RegClass > 0);
bool TIED_TO = ThreeReg && TID.getOperandConstraint(2, TOI::TIED_TO) != -1;
bool TIED_TO = ThreeReg && MCID.getOperandConstraint(2, MCOI::TIED_TO) != -1;
bool Imm12 = !ThreeReg && slice(insn, 23, 23) == 1; // ARMInstrThumb2.td
// Build the register operands, followed by the immediate.
@ -2160,8 +2160,8 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
static bool DisassembleThumb2DPReg(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
const MCInstrDesc &MCID = ARMInsts[Opcode];
const MCOperandInfo *OpInfo = MCID.OpInfo;
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
@ -2214,7 +2214,7 @@ static bool DisassembleThumb2DPReg(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2Mul(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::rGPRRegClassID &&
@ -2259,7 +2259,7 @@ static bool DisassembleThumb2Mul(MCInst &MI, unsigned Opcode, uint32_t insn,
static bool DisassembleThumb2LongMul(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::rGPRRegClassID &&

View File

@ -137,11 +137,11 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const {
bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const {
// FIXME: Detect integer instructions properly.
const TargetInstrDesc &TID = MI->getDesc();
unsigned Domain = TID.TSFlags & ARMII::DomainMask;
if (TID.mayStore())
const MCInstrDesc &MCID = MI->getDesc();
unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
if (MCID.mayStore())
return false;
unsigned Opcode = TID.getOpcode();
unsigned Opcode = MCID.getOpcode();
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
return false;
if ((Domain & ARMII::DomainVFP) || (Domain & ARMII::DomainNEON))
@ -218,18 +218,18 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NextOp).getImm();
unsigned PredReg = MI->getOperand(++NextOp).getReg();
const TargetInstrDesc &TID1 = TII->get(MulOpc);
const TargetInstrDesc &TID2 = TII->get(AddSubOpc);
unsigned TmpReg = MRI->createVirtualRegister(TII->getRegClass(TID1, 0, TRI));
const MCInstrDesc &MCID1 = TII->get(MulOpc);
const MCInstrDesc &MCID2 = TII->get(AddSubOpc);
unsigned TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI));
MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID1, TmpReg)
MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID1, TmpReg)
.addReg(Src1Reg, getKillRegState(Src1Kill))
.addReg(Src2Reg, getKillRegState(Src2Kill));
if (HasLane)
MIB.addImm(LaneImm);
MIB.addImm(Pred).addReg(PredReg);
MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), TID2)
MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID2)
.addReg(DstReg, getDefRegState(true) | getDeadRegState(DstDead));
if (NegAcc) {
@ -273,15 +273,15 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
continue;
}
const TargetInstrDesc &TID = MI->getDesc();
if (TID.isBarrier()) {
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.isBarrier()) {
clearStack();
Skip = 0;
++MII;
continue;
}
unsigned Domain = TID.TSFlags & ARMII::DomainMask;
unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
if (Domain == ARMII::DomainGeneral) {
if (++Skip == 2)
// Assume dual issues of non-VFP / NEON instructions.
@ -291,7 +291,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
unsigned MulOpc, AddSubOpc;
bool NegAcc, HasLane;
if (!TII->isFpMLxInstruction(TID.getOpcode(),
if (!TII->isFpMLxInstruction(MCID.getOpcode(),
MulOpc, AddSubOpc, NegAcc, HasLane) ||
!FindMLxHazard(MI))
pushStack(MI);

View File

@ -239,9 +239,9 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
unsigned Chunk = (1 << 3) - 1;
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
Bytes -= ThisVal;
const TargetInstrDesc &TID = TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3);
const MCInstrDesc &MCID = TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3);
const MachineInstrBuilder MIB =
AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg).setMIFlags(MIFlags));
AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg).setMIFlags(MIFlags));
AddDefaultPred(MIB.addReg(BaseReg, RegState::Kill).addImm(ThisVal));
} else {
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
@ -291,8 +291,8 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
}
if (ExtraOpc) {
const TargetInstrDesc &TID = TII.get(ExtraOpc);
AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg))
const MCInstrDesc &MCID = TII.get(ExtraOpc);
AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg))
.addReg(DestReg, RegState::Kill)
.addImm(((unsigned)NumBytes) & 3)
.setMIFlags(MIFlags));
@ -360,8 +360,8 @@ static void emitThumbConstant(MachineBasicBlock &MBB,
if (Imm > 0)
emitThumbRegPlusImmediate(MBB, MBBI, dl, DestReg, DestReg, Imm, TII, MRI);
if (isSub) {
const TargetInstrDesc &TID = TII.get(ARM::tRSB);
AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg))
const MCInstrDesc &MCID = TII.get(ARM::tRSB);
AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, MCID, DestReg))
.addReg(DestReg, RegState::Kill));
}
}
@ -396,7 +396,7 @@ rewriteFrameIndex(MachineBasicBlock::iterator II, unsigned FrameRegIdx,
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = MI.getDesc();
const MCInstrDesc &Desc = MI.getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
if (Opcode == ARM::tADDrSPi) {
@ -653,7 +653,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
assert(Offset && "This code isn't needed if offset already handled!");
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = MI.getDesc();
const MCInstrDesc &Desc = MI.getDesc();
// Remove predicate first.
int PIdx = MI.findFirstPredOperandIdx();

View File

@ -396,7 +396,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARMBaseInstrInfo &TII) {
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = MI.getDesc();
const MCInstrDesc &Desc = MI.getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
bool isSub = false;

View File

@ -189,8 +189,8 @@ Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
}
}
static bool HasImplicitCPSRDef(const TargetInstrDesc &TID) {
for (const unsigned *Regs = TID.ImplicitDefs; *Regs; ++Regs)
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
for (const unsigned *Regs = MCID.ImplicitDefs; *Regs; ++Regs)
if (*Regs == ARM::CPSR)
return true;
return false;
@ -484,8 +484,8 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
if (Entry.LowRegs1 && !VerifyLowRegs(MI))
return false;
const TargetInstrDesc &TID = MI->getDesc();
if (TID.mayLoad() || TID.mayStore())
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.mayLoad() || MCID.mayStore())
return ReduceLoadStore(MBB, MI, Entry);
unsigned Opc = MI->getOpcode();
@ -576,23 +576,23 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
}
// Check if it's possible / necessary to transfer the predicate.
const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc2);
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
unsigned PredReg = 0;
ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewTID.isPredicable())
if (!NewMCID.isPredicable())
// Can't transfer predicate, fail.
return false;
} else {
SkipPred = !NewTID.isPredicable();
SkipPred = !NewMCID.isPredicable();
}
bool HasCC = false;
bool CCDead = false;
const TargetInstrDesc &TID = MI->getDesc();
if (TID.hasOptionalDef()) {
unsigned NumOps = TID.getNumOperands();
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.hasOptionalDef()) {
unsigned NumOps = MCID.getNumOperands();
HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
if (HasCC && MI->getOperand(NumOps-1).isDead())
CCDead = true;
@ -602,15 +602,15 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewTID.hasOptionalDef() && HasCC &&
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
canAddPseudoFlagDep(CPSRDef, MI))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID);
MIB.addOperand(MI->getOperand(0));
if (NewTID.hasOptionalDef()) {
if (NewMCID.hasOptionalDef()) {
if (HasCC)
AddDefaultT1CC(MIB, CCDead);
else
@ -618,11 +618,11 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
}
// Transfer the rest of operands.
unsigned NumOps = TID.getNumOperands();
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
if (i < NumOps && TID.OpInfo[i].isOptionalDef())
if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
continue;
if (SkipPred && TID.OpInfo[i].isPredicate())
if (SkipPred && MCID.OpInfo[i].isPredicate())
continue;
MIB.addOperand(MI->getOperand(i));
}
@ -649,9 +649,9 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
if (Entry.Imm1Limit)
Limit = ((1 << Entry.Imm1Limit) - 1) * Scale;
const TargetInstrDesc &TID = MI->getDesc();
for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
if (TID.OpInfo[i].isPredicate())
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
if (MCID.OpInfo[i].isPredicate())
continue;
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
@ -663,29 +663,29 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
if (Entry.LowRegs1 && !isARMLowRegister(Reg))
return false;
} else if (MO.isImm() &&
!TID.OpInfo[i].isPredicate()) {
!MCID.OpInfo[i].isPredicate()) {
if (((unsigned)MO.getImm()) > Limit || (MO.getImm() & (Scale-1)) != 0)
return false;
}
}
// Check if it's possible / necessary to transfer the predicate.
const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc1);
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
unsigned PredReg = 0;
ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewTID.isPredicable())
if (!NewMCID.isPredicable())
// Can't transfer predicate, fail.
return false;
} else {
SkipPred = !NewTID.isPredicable();
SkipPred = !NewMCID.isPredicable();
}
bool HasCC = false;
bool CCDead = false;
if (TID.hasOptionalDef()) {
unsigned NumOps = TID.getNumOperands();
if (MCID.hasOptionalDef()) {
unsigned NumOps = MCID.getNumOperands();
HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
if (HasCC && MI->getOperand(NumOps-1).isDead())
CCDead = true;
@ -695,15 +695,15 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewTID.hasOptionalDef() && HasCC &&
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
canAddPseudoFlagDep(CPSRDef, MI))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID);
MIB.addOperand(MI->getOperand(0));
if (NewTID.hasOptionalDef()) {
if (NewMCID.hasOptionalDef()) {
if (HasCC)
AddDefaultT1CC(MIB, CCDead);
else
@ -711,15 +711,15 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
}
// Transfer the rest of operands.
unsigned NumOps = TID.getNumOperands();
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
if (i < NumOps && TID.OpInfo[i].isOptionalDef())
if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
continue;
if ((TID.getOpcode() == ARM::t2RSBSri ||
TID.getOpcode() == ARM::t2RSBri) && i == 2)
if ((MCID.getOpcode() == ARM::t2RSBSri ||
MCID.getOpcode() == ARM::t2RSBri) && i == 2)
// Skip the zero immediate operand, it's now implicit.
continue;
bool isPred = (i < NumOps && TID.OpInfo[i].isPredicate());
bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
if (SkipPred && isPred)
continue;
const MachineOperand &MO = MI->getOperand(i);
@ -733,7 +733,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
MIB.addOperand(MO);
}
}
if (!TID.isPredicable() && NewTID.isPredicable())
if (!MCID.isPredicable() && NewMCID.isPredicable())
AddDefaultPred(MIB);
// Transfer MI flags.

View File

@ -146,21 +146,21 @@ void BlackfinDAGToDAGISel::FixRegisterClasses(SelectionDAG &DAG) {
NI != DAG.allnodes_end(); ++NI) {
if (NI->use_empty() || !NI->isMachineOpcode())
continue;
const TargetInstrDesc &DefTID = TII.get(NI->getMachineOpcode());
const MCInstrDesc &DefMCID = TII.get(NI->getMachineOpcode());
for (SDNode::use_iterator UI = NI->use_begin(); !UI.atEnd(); ++UI) {
if (!UI->isMachineOpcode())
continue;
if (UI.getUse().getResNo() >= DefTID.getNumDefs())
if (UI.getUse().getResNo() >= DefMCID.getNumDefs())
continue;
const TargetRegisterClass *DefRC =
TII.getRegClass(DefTID, UI.getUse().getResNo(), TRI);
TII.getRegClass(DefMCID, UI.getUse().getResNo(), TRI);
const TargetInstrDesc &UseTID = TII.get(UI->getMachineOpcode());
if (UseTID.getNumDefs()+UI.getOperandNo() >= UseTID.getNumOperands())
const MCInstrDesc &UseMCID = TII.get(UI->getMachineOpcode());
if (UseMCID.getNumDefs()+UI.getOperandNo() >= UseMCID.getNumOperands())
continue;
const TargetRegisterClass *UseRC =
TII.getRegClass(UseTID, UseTID.getNumDefs()+UI.getOperandNo(), TRI);
TII.getRegClass(UseMCID, UseMCID.getNumDefs()+UI.getOperandNo(), TRI);
if (!DefRC || !UseRC)
continue;
// We cannot copy CC <-> !(CC/D)

View File

@ -109,7 +109,7 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
// Hazard check
MachineBasicBlock::iterator a = candidate;
MachineBasicBlock::iterator b = slot;
TargetInstrDesc desc = candidate->getDesc();
MCInstrDesc desc = candidate->getDesc();
// MBB layout:-
// candidate := a0 = operation(a1, a2)
@ -183,7 +183,7 @@ static bool isDelayFiller(MachineBasicBlock &MBB,
if (candidate == MBB.begin())
return false;
TargetInstrDesc brdesc = (--candidate)->getDesc();
MCInstrDesc brdesc = (--candidate)->getDesc();
return (brdesc.hasDelaySlot());
}
@ -211,7 +211,7 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) {
break;
--I;
TargetInstrDesc desc = I->getDesc();
MCInstrDesc desc = I->getDesc();
if (desc.hasDelaySlot() || desc.isBranch() || isDelayFiller(MBB,I) ||
desc.isCall() || desc.isReturn() || desc.isBarrier() ||
hasUnknownSideEffects(I))

View File

@ -179,7 +179,7 @@ void MBlazeMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = TII.get(Opcode);
const MCInstrDesc &Desc = TII.get(Opcode);
uint64_t TSFlags = Desc.TSFlags;
// Keep track of the current byte being emitted.
unsigned CurByte = 0;

View File

@ -158,13 +158,13 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
}
bool MSP430InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isTerminator()) return false;
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
if (MCID.isBranch() && !MCID.isBarrier())
return true;
if (!TID.isPredicable())
if (!MCID.isPredicable())
return true;
return !isPredicated(MI);
}
@ -293,7 +293,7 @@ MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
/// instruction may be. This returns the maximum number of bytes.
///
unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const TargetInstrDesc &Desc = MI->getDesc();
const MCInstrDesc &Desc = MI->getDesc();
switch (Desc.TSFlags & MSP430II::SizeMask) {
default:

View File

@ -59,10 +59,10 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB)
{
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
const TargetInstrDesc& Tid = I->getDesc();
if (Tid.hasDelaySlot() &&
const MCInstrDesc& MCid = I->getDesc();
if (MCid.hasDelaySlot() &&
(TM.getSubtarget<MipsSubtarget>().isMips1() ||
Tid.isCall() || Tid.isBranch() || Tid.isReturn())) {
MCid.isCall() || MCid.isBranch() || MCid.isReturn())) {
MachineBasicBlock::iterator J = I;
++J;
BuildMI(MBB, J, I->getDebugLoc(), TII->get(Mips::NOP));

View File

@ -61,9 +61,9 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) {
const TargetInstrDesc& Tid = I->getDesc();
const MCInstrDesc& MCid = I->getDesc();
switch(Tid.getOpcode()) {
switch(MCid.getOpcode()) {
default:
++I;
continue;
@ -87,7 +87,7 @@ void MipsExpandPseudo::ExpandBuildPairF64(MachineBasicBlock& MBB,
MachineBasicBlock::iterator I) {
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
const TargetInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1);
const MCInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
const unsigned* SubReg =
TM.getRegisterInfo()->getSubRegisters(DstReg);
@ -103,7 +103,7 @@ void MipsExpandPseudo::ExpandExtractElementF64(MachineBasicBlock& MBB,
unsigned DstReg = I->getOperand(0).getReg();
unsigned SrcReg = I->getOperand(1).getReg();
unsigned N = I->getOperand(2).getImm();
const TargetInstrDesc& Mfc1Tdd = TII->get(Mips::MFC1);
const MCInstrDesc& Mfc1Tdd = TII->get(Mips::MFC1);
DebugLoc dl = I->getDebugLoc();
const unsigned* SubReg = TM.getRegisterInfo()->getSubRegisters(SrcReg);

View File

@ -341,8 +341,8 @@ void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
const SmallVectorImpl<MachineOperand>& Cond)
const {
unsigned Opc = Cond[0].getImm();
const TargetInstrDesc &TID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, TID);
const MCInstrDesc &MCID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID);
for (unsigned i = 1; i < Cond.size(); ++i)
MIB.addReg(Cond[i].getReg());

View File

@ -47,8 +47,8 @@ void PTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++ i) {
if (map[i].cls->contains(DstReg, SrcReg)) {
const TargetInstrDesc &TID = get(map[i].opcode);
MachineInstr *MI = BuildMI(MBB, I, DL, TID, DstReg).
const MCInstrDesc &MCID = get(map[i].opcode);
MachineInstr *MI = BuildMI(MBB, I, DL, MCID, DstReg).
addReg(SrcReg, getKillRegState(KillSrc));
AddDefaultPredicate(MI);
return;
@ -69,8 +69,8 @@ bool PTXInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
for (int i = 0, e = sizeof(map)/sizeof(map[0]); i != e; ++ i)
if (DstRC == map[i].cls) {
const TargetInstrDesc &TID = get(map[i].opcode);
MachineInstr *MI = BuildMI(MBB, I, DL, TID, DstReg).addReg(SrcReg);
const MCInstrDesc &MCID = get(map[i].opcode);
MachineInstr *MI = BuildMI(MBB, I, DL, MCID, DstReg).addReg(SrcReg);
AddDefaultPredicate(MI);
return true;
}
@ -178,13 +178,13 @@ AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock::const_iterator iter = MBB.end();
const MachineInstr& instLast1 = *--iter;
const TargetInstrDesc &desc1 = instLast1.getDesc();
const MCInstrDesc &desc1 = instLast1.getDesc();
// for special case that MBB has only 1 instruction
const bool IsSizeOne = MBB.size() == 1;
// if IsSizeOne is true, *--iter and instLast2 are invalid
// we put a dummy value in instLast2 and desc2 since they are used
const MachineInstr& instLast2 = IsSizeOne ? instLast1 : *--iter;
const TargetInstrDesc &desc2 = IsSizeOne ? desc1 : instLast2.getDesc();
const MCInstrDesc &desc2 = IsSizeOne ? desc1 : instLast2.getDesc();
DEBUG(dbgs() << "\n");
DEBUG(dbgs() << "AnalyzeBranch: opcode: " << instLast1.getOpcode() << "\n");
@ -387,7 +387,7 @@ void PTXInstrInfo::AddDefaultPredicate(MachineInstr *MI) {
}
bool PTXInstrInfo::IsAnyKindOfBranch(const MachineInstr& inst) {
const TargetInstrDesc &desc = inst.getDesc();
const MCInstrDesc &desc = inst.getDesc();
return desc.isTerminator() || desc.isBranch() || desc.isIndirectBranch();
}

View File

@ -73,12 +73,12 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
}
Opcode = ~Opcode;
const TargetInstrDesc &TID = TII.get(Opcode);
const MCInstrDesc &MCID = TII.get(Opcode);
isLoad = TID.mayLoad();
isStore = TID.mayStore();
isLoad = MCID.mayLoad();
isStore = MCID.mayStore();
uint64_t TSFlags = TID.TSFlags;
uint64_t TSFlags = MCID.TSFlags;
isFirst = TSFlags & PPCII::PPC970_First;
isSingle = TSFlags & PPCII::PPC970_Single;

View File

@ -120,7 +120,7 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// destination register as well.
if (Reg0 == Reg1) {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!");
Reg2IsKill = false;
ChangeReg0 = true;
@ -315,12 +315,12 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
else
llvm_unreachable("Impossible reg-to-reg copy");
const TargetInstrDesc &TID = get(Opc);
if (TID.getNumOperands() == 3)
BuildMI(MBB, I, DL, TID, DestReg)
const MCInstrDesc &MCID = get(Opc);
if (MCID.getNumOperands() == 3)
BuildMI(MBB, I, DL, MCID, DestReg)
.addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
else
BuildMI(MBB, I, DL, TID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
}
bool

View File

@ -298,7 +298,7 @@ bool Filler::isDelayFiller(MachineBasicBlock &MBB,
return false;
if (candidate->getOpcode() == SP::UNIMP)
return true;
const TargetInstrDesc &prevdesc = (--candidate)->getDesc();
const MCInstrDesc &prevdesc = (--candidate)->getDesc();
return prevdesc.hasDelaySlot();
}

View File

@ -108,11 +108,11 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
MachineInstr *MI = MIB;
MachineFunction &MF = *MI->getParent()->getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
unsigned Flags = 0;
if (TID.mayLoad())
if (MCID.mayLoad())
Flags |= MachineMemOperand::MOLoad;
if (TID.mayStore())
if (MCID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(

View File

@ -199,13 +199,13 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
}
bool SystemZInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isTerminator()) return false;
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
if (MCID.isBranch() && !MCID.isBarrier())
return true;
if (!TID.isPredicable())
if (!MCID.isPredicable())
return true;
return !isPredicated(MI);
}
@ -343,7 +343,7 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return Count;
}
const TargetInstrDesc&
const MCInstrDesc&
SystemZInstrInfo::getBrCond(SystemZCC::CondCodes CC) const {
switch (CC) {
default:
@ -408,7 +408,7 @@ SystemZInstrInfo::getOppositeCondition(SystemZCC::CondCodes CC) const {
}
}
const TargetInstrDesc&
const MCInstrDesc&
SystemZInstrInfo::getLongDispOpc(unsigned Opc) const {
switch (Opc) {
default:

View File

@ -94,10 +94,10 @@ public:
SystemZCC::CondCodes getOppositeCondition(SystemZCC::CondCodes CC) const;
SystemZCC::CondCodes getCondFromBranchOpc(unsigned Opc) const;
const TargetInstrDesc& getBrCond(SystemZCC::CondCodes CC) const;
const TargetInstrDesc& getLongDispOpc(unsigned Opc) const;
const MCInstrDesc& getBrCond(SystemZCC::CondCodes CC) const;
const MCInstrDesc& getLongDispOpc(unsigned Opc) const;
const TargetInstrDesc& getMemoryInstr(unsigned Opc, int64_t Offset = 0) const {
const MCInstrDesc& getMemoryInstr(unsigned Opc, int64_t Offset = 0) const {
if (Offset < 0 || Offset >= 4096)
return getLongDispOpc(Opc);
else

View File

@ -24,22 +24,21 @@ using namespace llvm;
// TargetInstrInfo
//===----------------------------------------------------------------------===//
TargetInstrInfo::TargetInstrInfo(const TargetInstrDesc* Desc,
unsigned numOpcodes)
: Descriptors(Desc), NumOpcodes(numOpcodes) {
TargetInstrInfo::TargetInstrInfo(const MCInstrDesc* Desc, unsigned numOpcodes) {
InitMCInstrInfo(Desc, numOpcodes);
}
TargetInstrInfo::~TargetInstrInfo() {
}
const TargetRegisterClass*
TargetInstrInfo::getRegClass(const TargetInstrDesc &TID, unsigned OpNum,
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
if (OpNum >= TID.getNumOperands())
if (OpNum >= MCID.getNumOperands())
return 0;
short RegClass = TID.OpInfo[OpNum].RegClass;
if (TID.OpInfo[OpNum].isLookupPtrRegClass())
short RegClass = MCID.OpInfo[OpNum].RegClass;
if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
return TRI->getPointerRegClass(RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
@ -135,13 +134,13 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isTerminator()) return false;
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
if (MCID.isBranch() && !MCID.isBarrier())
return true;
if (!TID.isPredicable())
if (!MCID.isPredicable())
return true;
return !isPredicated(MI);
}

View File

@ -68,7 +68,7 @@ namespace {
return "X86 Machine Code Emitter";
}
void emitInstruction(MachineInstr &MI, const TargetInstrDesc *Desc);
void emitInstruction(MachineInstr &MI, const MCInstrDesc *Desc);
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@ -132,7 +132,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
MCE.StartMachineBasicBlock(MBB);
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I) {
const TargetInstrDesc &Desc = I->getDesc();
const MCInstrDesc &Desc = I->getDesc();
emitInstruction(*I, &Desc);
// MOVPC32r is basically a call plus a pop instruction.
if (Desc.getOpcode() == X86::MOVPC32r)
@ -150,7 +150,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
/// size, and 3) use of X86-64 extended registers.
static unsigned determineREX(const MachineInstr &MI) {
unsigned REX = 0;
const TargetInstrDesc &Desc = MI.getDesc();
const MCInstrDesc &Desc = MI.getDesc();
// Pseudo instructions do not need REX prefix byte.
if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
@ -161,7 +161,7 @@ static unsigned determineREX(const MachineInstr &MI) {
unsigned NumOps = Desc.getNumOperands();
if (NumOps) {
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
@ -598,7 +598,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
template<class CodeEmitter>
void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
const TargetInstrDesc *Desc) {
const MCInstrDesc *Desc) {
DEBUG(dbgs() << MI);
// If this is a pseudo instruction, lower it.
@ -708,9 +708,9 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
// If this is a two-address instruction, skip one of the register operands.
unsigned NumOps = Desc->getNumOperands();
unsigned CurOp = 0;
if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) != -1)
++CurOp;
else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1,MCOI::TIED_TO)== 0)
// Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
--NumOps;

View File

@ -1393,7 +1393,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
assert(DI->getAddress() && "Null address should be checked earlier!");
if (!X86SelectAddress(DI->getAddress(), AM))
return false;
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).

View File

@ -150,11 +150,11 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
MachineInstr *MI = MIB;
MachineFunction &MF = *MI->getParent()->getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
const TargetInstrDesc &TID = MI->getDesc();
const MCInstrDesc &MCID = MI->getDesc();
unsigned Flags = 0;
if (TID.mayLoad())
if (MCID.mayLoad())
Flags |= MachineMemOperand::MOLoad;
if (TID.mayStore())
if (MCID.mayStore())
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI, Offset),

View File

@ -1689,13 +1689,13 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
}
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isTerminator()) return false;
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
if (MCID.isBranch() && !MCID.isBarrier())
return true;
if (!TID.isPredicable())
if (!MCID.isPredicable())
return true;
return !isPredicated(MI);
}
@ -2225,7 +2225,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
bool isTwoAddrFold = false;
unsigned NumOps = MI->getDesc().getNumOperands();
bool isTwoAddr = NumOps > 1 &&
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// FIXME: AsmPrinter doesn't know how to handle
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
@ -2543,7 +2543,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
unsigned Opc = MI->getOpcode();
unsigned NumOps = MI->getDesc().getNumOperands();
bool isTwoAddr = NumOps > 1 &&
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
@ -2589,8 +2589,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
return false;
UnfoldStore &= FoldedStore;
const TargetInstrDesc &TID = get(Opc);
const TargetRegisterClass *RC = getRegClass(TID, Index, &RI);
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
if (!MI->hasOneMemOperand() &&
RC == &X86::VR128RegClass &&
!TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
@ -2632,7 +2632,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
}
// Emit the data processing instruction.
MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true);
MachineInstrBuilder MIB(DataMI);
if (FoldedStore)
@ -2685,7 +2685,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(TID, 0, &RI);
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(MI->memoperands_begin(),
@ -2710,9 +2710,9 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned Index = I->second.second & 0xf;
bool FoldedLoad = I->second.second & (1 << 4);
bool FoldedStore = I->second.second & (1 << 5);
const TargetInstrDesc &TID = get(Opc);
const TargetRegisterClass *RC = getRegClass(TID, Index, &RI);
unsigned NumDefs = TID.NumDefs;
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
std::vector<SDValue> AfterOps;
@ -2756,13 +2756,13 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
// Emit the data processing instruction.
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = 0;
if (TID.getNumDefs() > 0) {
DstRC = getRegClass(TID, 0, &RI);
if (MCID.getNumDefs() > 0) {
DstRC = getRegClass(MCID, 0, &RI);
VTs.push_back(*DstRC->vt_begin());
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
VTs.push_back(VT);
}
if (Load)

View File

@ -111,7 +111,7 @@ public:
SmallVectorImpl<MCFixup> &Fixups) const;
void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const TargetInstrDesc &Desc,
const MCInst &MI, const MCInstrDesc &Desc,
raw_ostream &OS) const;
void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
@ -119,7 +119,7 @@ public:
raw_ostream &OS) const;
void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const TargetInstrDesc &Desc,
const MCInst &MI, const MCInstrDesc &Desc,
raw_ostream &OS) const;
};
@ -379,7 +379,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
/// called VEX.
void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const TargetInstrDesc &Desc,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
bool HasVEX_4V = false;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
@ -586,7 +586,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
const TargetInstrDesc &Desc) {
const MCInstrDesc &Desc) {
unsigned REX = 0;
if (TSFlags & X86II::REX_W)
REX |= 1 << 3; // set REX.W
@ -596,7 +596,7 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
unsigned NumOps = MI.getNumOperands();
// FIXME: MCInst should explicitize the two-addrness.
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
@ -713,7 +713,7 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
/// Not present, it is -1.
void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const TargetInstrDesc &Desc,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
// Emit the lock opcode prefix as needed.
@ -803,7 +803,7 @@ void X86MCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
unsigned Opcode = MI.getOpcode();
const TargetInstrDesc &Desc = TII.get(Opcode);
const MCInstrDesc &Desc = TII.get(Opcode);
uint64_t TSFlags = Desc.TSFlags;
// Pseudo instructions don't get encoded.
@ -814,9 +814,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// FIXME: This should be handled during MCInst lowering.
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
if (NumOps > 1 && Desc.getOperandConstraint(1, TOI::TIED_TO) != -1)
if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1)
++CurOp;
else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, MCOI::TIED_TO)== 0)
// Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
--NumOps;

View File

@ -94,17 +94,17 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
// Ptr value whose register class is resolved via callback.
if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += "|(1<<TOI::LookupPtrRegClass)";
Res += "|(1<<MCOI::LookupPtrRegClass)";
// Predicate operands. Check to see if the original unexpanded operand
// was of type PredicateOperand.
if (Inst.Operands[i].Rec->isSubClassOf("PredicateOperand"))
Res += "|(1<<TOI::Predicate)";
Res += "|(1<<MCOI::Predicate)";
// Optional def operands. Check to see if the original unexpanded operand
// was of type OptionalDefOperand.
if (Inst.Operands[i].Rec->isSubClassOf("OptionalDefOperand"))
Res += "|(1<<TOI::OptionalDef)";
Res += "|(1<<MCOI::OptionalDef)";
// Fill in constraint info.
Res += ", ";
@ -114,11 +114,11 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
if (Constraint.isNone())
Res += "0";
else if (Constraint.isEarlyClobber())
Res += "(1 << TOI::EARLY_CLOBBER)";
Res += "(1 << MCOI::EARLY_CLOBBER)";
else {
assert(Constraint.isTied());
Res += "((" + utostr(Constraint.getTiedOperand()) +
" << 16) | (1 << TOI::TIED_TO))";
" << 16) | (1 << MCOI::TIED_TO))";
}
Result.push_back(Res);
@ -143,7 +143,7 @@ void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
if (N != 0) continue;
N = ++OperandListNum;
OS << "static const TargetOperandInfo OperandInfo" << N << "[] = { ";
OS << "static const MCOperandInfo OperandInfo" << N << "[] = { ";
for (unsigned i = 0, e = OperandInfo.size(); i != e; ++i)
OS << "{ " << OperandInfo[i] << " }, ";
OS << "};\n";
@ -190,9 +190,9 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
// Emit all of the operand info records.
EmitOperandInfo(OS, OperandInfoIDs);
// Emit all of the TargetInstrDesc records in their ENUM ordering.
// Emit all of the MCInstrDesc records in their ENUM ordering.
//
OS << "\nstatic const TargetInstrDesc " << TargetName
OS << "\nstatic const MCInstrDesc " << TargetName
<< "Insts[] = {\n";
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
@ -221,31 +221,31 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
<< ",\t\"" << Inst.TheDef->getName() << "\", 0";
// Emit all of the target indepedent flags...
if (Inst.isReturn) OS << "|(1<<TID::Return)";
if (Inst.isBranch) OS << "|(1<<TID::Branch)";
if (Inst.isIndirectBranch) OS << "|(1<<TID::IndirectBranch)";
if (Inst.isCompare) OS << "|(1<<TID::Compare)";
if (Inst.isMoveImm) OS << "|(1<<TID::MoveImm)";
if (Inst.isBitcast) OS << "|(1<<TID::Bitcast)";
if (Inst.isBarrier) OS << "|(1<<TID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<TID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<TID::Call)";
if (Inst.canFoldAsLoad) OS << "|(1<<TID::FoldableAsLoad)";
if (Inst.mayLoad) OS << "|(1<<TID::MayLoad)";
if (Inst.mayStore) OS << "|(1<<TID::MayStore)";
if (Inst.isPredicable) OS << "|(1<<TID::Predicable)";
if (Inst.isConvertibleToThreeAddress) OS << "|(1<<TID::ConvertibleTo3Addr)";
if (Inst.isCommutable) OS << "|(1<<TID::Commutable)";
if (Inst.isTerminator) OS << "|(1<<TID::Terminator)";
if (Inst.isReMaterializable) OS << "|(1<<TID::Rematerializable)";
if (Inst.isNotDuplicable) OS << "|(1<<TID::NotDuplicable)";
if (Inst.Operands.hasOptionalDef) OS << "|(1<<TID::HasOptionalDef)";
if (Inst.usesCustomInserter) OS << "|(1<<TID::UsesCustomInserter)";
if (Inst.Operands.isVariadic)OS << "|(1<<TID::Variadic)";
if (Inst.hasSideEffects) OS << "|(1<<TID::UnmodeledSideEffects)";
if (Inst.isAsCheapAsAMove) OS << "|(1<<TID::CheapAsAMove)";
if (Inst.hasExtraSrcRegAllocReq) OS << "|(1<<TID::ExtraSrcRegAllocReq)";
if (Inst.hasExtraDefRegAllocReq) OS << "|(1<<TID::ExtraDefRegAllocReq)";
if (Inst.isReturn) OS << "|(1<<MCID::Return)";
if (Inst.isBranch) OS << "|(1<<MCID::Branch)";
if (Inst.isIndirectBranch) OS << "|(1<<MCID::IndirectBranch)";
if (Inst.isCompare) OS << "|(1<<MCID::Compare)";
if (Inst.isMoveImm) OS << "|(1<<MCID::MoveImm)";
if (Inst.isBitcast) OS << "|(1<<MCID::Bitcast)";
if (Inst.isBarrier) OS << "|(1<<MCID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<MCID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<MCID::Call)";
if (Inst.canFoldAsLoad) OS << "|(1<<MCID::FoldableAsLoad)";
if (Inst.mayLoad) OS << "|(1<<MCID::MayLoad)";
if (Inst.mayStore) OS << "|(1<<MCID::MayStore)";
if (Inst.isPredicable) OS << "|(1<<MCID::Predicable)";
if (Inst.isConvertibleToThreeAddress) OS << "|(1<<MCID::ConvertibleTo3Addr)";
if (Inst.isCommutable) OS << "|(1<<MCID::Commutable)";
if (Inst.isTerminator) OS << "|(1<<MCID::Terminator)";
if (Inst.isReMaterializable) OS << "|(1<<MCID::Rematerializable)";
if (Inst.isNotDuplicable) OS << "|(1<<MCID::NotDuplicable)";
if (Inst.Operands.hasOptionalDef) OS << "|(1<<MCID::HasOptionalDef)";
if (Inst.usesCustomInserter) OS << "|(1<<MCID::UsesCustomInserter)";
if (Inst.Operands.isVariadic)OS << "|(1<<MCID::Variadic)";
if (Inst.hasSideEffects) OS << "|(1<<MCID::UnmodeledSideEffects)";
if (Inst.isAsCheapAsAMove) OS << "|(1<<MCID::CheapAsAMove)";
if (Inst.hasExtraSrcRegAllocReq) OS << "|(1<<MCID::ExtraSrcRegAllocReq)";
if (Inst.hasExtraDefRegAllocReq) OS << "|(1<<MCID::ExtraDefRegAllocReq)";
// Emit all of the target-specific flags...
BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");

View File

@ -54,10 +54,6 @@ private:
// Operand information.
void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
void DetectRegisterClassBarriers(std::vector<Record*> &Defs,
const std::vector<CodeGenRegisterClass> &RCs,
std::vector<Record*> &Barriers);
};
} // End llvm namespace