Infrastructure for getting the machine code size of a function and an instruction. X86, PowerPC and ARM are implemented

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49809 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nicolas Geoffray 2008-04-16 20:10:13 +00:00
parent dc00858e11
commit 52e724ad7e
14 changed files with 636 additions and 208 deletions

View File

@ -388,6 +388,18 @@ public:
abort();
return 0; // Must return a value in order to compile with VS 2005
}
/// GetInstSize - Returns the size of the specified Instruction.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
assert(0 && "Target didn't implement TargetInstrInfo::GetInstSize!");
return 0;
}
/// GetFunctionSizeInBytes - Returns the size of the specified MachineFunction.
///
virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0;
};
/// TargetInstrInfoImpl - This is the default implementation of
@ -408,6 +420,7 @@ public:
MachineBasicBlock::iterator MI,
unsigned DestReg,
const MachineInstr *Orig) const;
virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
};
} // End llvm namespace

View File

@ -94,3 +94,14 @@ void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
unsigned
TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
unsigned FnSize = 0;
for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
const MachineBasicBlock &MBB = *MBBI;
for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end(); I != E; ++I)
FnSize += GetInstSizeInBytes(I);
}
return FnSize;
}

View File

@ -368,7 +368,7 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &Fn,
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
// Add instruction size to MBBSize.
MBBSize += ARM::GetInstSize(I);
MBBSize += TII->GetInstSizeInBytes(I);
int Opc = I->getOpcode();
if (I->getDesc().isBranch()) {
@ -519,7 +519,7 @@ unsigned ARMConstantIslands::GetOffsetOf(MachineInstr *MI) const {
for (MachineBasicBlock::iterator I = MBB->begin(); ; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
if (&*I == MI) return Offset;
Offset += ARM::GetInstSize(I);
Offset += TII->GetInstSizeInBytes(I);
}
}
@ -617,7 +617,7 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
unsigned NewBBSize = 0;
for (MachineBasicBlock::iterator I = NewBB->begin(), E = NewBB->end();
I != E; ++I)
NewBBSize += ARM::GetInstSize(I);
NewBBSize += TII->GetInstSizeInBytes(I);
unsigned OrigBBI = OrigBB->getNumber();
unsigned NewBBI = NewBB->getNumber();
@ -968,9 +968,9 @@ void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex,
MachineBasicBlock::iterator MI = UserMI;
++MI;
unsigned CPUIndex = CPUserIndex+1;
for (unsigned Offset = UserOffset+ARM::GetInstSize(UserMI);
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
Offset < BaseInsertOffset;
Offset += ARM::GetInstSize(MI),
Offset += TII->GetInstSizeInBytes(MI),
MI = next(MI)) {
if (CPUIndex < CPUsers.size() && CPUsers[CPUIndex].MI == MI) {
if (!OffsetIsInRange(Offset, EndInsertOffset,
@ -1225,7 +1225,7 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &Fn, ImmBranch &Br) {
SplitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding a unconditional
// branch to the destination.
int delta = ARM::GetInstSize(&MBB->back());
int delta = TII->GetInstSizeInBytes(&MBB->back());
BBSizes[MBB->getNumber()] -= delta;
MachineBasicBlock* SplitBB = next(MachineFunction::iterator(MBB));
AdjustBBOffsetsAfter(SplitBB, -delta);
@ -1243,18 +1243,18 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &Fn, ImmBranch &Br) {
BuildMI(MBB, TII->get(MI->getOpcode())).addMBB(NextBB)
.addImm(CC).addReg(CCReg);
Br.MI = &MBB->back();
BBSizes[MBB->getNumber()] += ARM::GetInstSize(&MBB->back());
BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
BuildMI(MBB, TII->get(Br.UncondBr)).addMBB(DestBB);
BBSizes[MBB->getNumber()] += ARM::GetInstSize(&MBB->back());
BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
// Remove the old conditional branch. It may or may not still be in MBB.
BBSizes[MI->getParent()->getNumber()] -= ARM::GetInstSize(MI);
BBSizes[MI->getParent()->getNumber()] -= TII->GetInstSizeInBytes(MI);
MI->eraseFromParent();
// The net size change is an addition of one unconditional branch.
int delta = ARM::GetInstSize(&MBB->back());
int delta = TII->GetInstSizeInBytes(&MBB->back());
AdjustBBOffsetsAfter(MBB, delta);
return true;
}

View File

@ -877,8 +877,8 @@ static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
/// GetInstSize - Return the size of the specified MachineInstr.
///
unsigned ARM::GetInstSize(MachineInstr *MI) {
MachineBasicBlock &MBB = *MI->getParent();
unsigned ARMInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MachineBasicBlock &MBB = *MI->getParent();
const MachineFunction *MF = MBB.getParent();
const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
@ -937,17 +937,3 @@ unsigned ARM::GetInstSize(MachineInstr *MI) {
}
return 0; // Not reached
}
/// GetFunctionSize - Returns the size of the specified MachineFunction.
///
unsigned ARM::GetFunctionSize(MachineFunction &MF) {
unsigned FnSize = 0;
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
MachineBasicBlock &MBB = *MBBI;
for (MachineBasicBlock::iterator I = MBB.begin(),E = MBB.end(); I != E; ++I)
FnSize += ARM::GetInstSize(I);
}
return FnSize;
}

View File

@ -225,18 +225,12 @@ public:
virtual bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const;
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
};
// Utility routines
namespace ARM {
/// GetInstSize - Returns the size of the specified MachineInstr.
///
unsigned GetInstSize(MachineInstr *MI);
/// GetFunctionSize - Returns the size of the specified MachineFunction.
///
unsigned GetFunctionSize(MachineFunction &MF);
}
}
#endif

View File

@ -948,7 +948,7 @@ ARMRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
bool ForceLRSpill = false;
if (!LRSpilled && AFI->isThumbFunction()) {
unsigned FnSize = ARM::GetFunctionSize(MF);
unsigned FnSize = TII.GetFunctionSizeInBytes(MF);
// Force LR to be spilled if the Thumb function size is > 2048. This enables
// use of BL to implement far jump. If it turns out that it's not needed
// then the branch fix up path will undo it.

View File

@ -22,7 +22,6 @@
#include "PPCPredicates.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
@ -54,25 +53,6 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
return new PPCBSel();
}
/// getNumBytesForInstruction - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
static unsigned getNumBytesForInstruction(MachineInstr *MI) {
switch (MI->getOpcode()) {
case PPC::INLINEASM: { // Inline Asm: Variable size.
MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
return MF->getTarget().getTargetAsmInfo()->getInlineAsmLength(AsmStr);
}
case PPC::LABEL: {
return 0;
}
default:
return 4; // PowerPC instructions are all 4 bytes
}
}
bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
// Give the blocks of the function a dense, in-order, numbering.
@ -88,7 +68,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
unsigned BlockSize = 0;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
MBBI != EE; ++MBBI)
BlockSize += getNumBytesForInstruction(MBBI);
BlockSize += TII->GetInstSizeInBytes(MBBI);
BlockSizes[MBB->getNumber()] = BlockSize;
FuncSize += BlockSize;
@ -124,7 +104,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
if (I->getOpcode() != PPC::BCC || I->getOperand(2).isImmediate()) {
MBBStartOffset += getNumBytesForInstruction(I);
MBBStartOffset += TII->GetInstSizeInBytes(I);
continue;
}

View File

@ -20,6 +20,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
extern cl::opt<bool> EnablePPC32RS; // FIXME (64-bit): See PPCRegisterInfo.cpp.
@ -724,3 +725,21 @@ ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm()));
return false;
}
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
case PPC::INLINEASM: { // Inline Asm: Variable size.
const MachineFunction *MF = MI->getParent()->getParent();
const char *AsmStr = MI->getOperand(0).getSymbolName();
return MF->getTarget().getTargetAsmInfo()->getInlineAsmLength(AsmStr);
}
case PPC::LABEL: {
return 0;
}
default:
return 4; // PowerPC instructions are all 4 bytes
}
}

View File

@ -155,6 +155,11 @@ public:
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
};
}

View File

@ -92,8 +92,6 @@ namespace {
intptr_t PCAdj = 0);
unsigned getX86RegNum(unsigned RegNo) const;
bool isX86_64ExtendedReg(const MachineOperand &MO);
unsigned determineREX(const MachineInstr &MI);
bool gvNeedsLazyPtr(const GlobalValue *GV);
};
@ -405,139 +403,6 @@ void Emitter::emitMemModRMByte(const MachineInstr &MI,
}
}
static unsigned sizeOfImm(const TargetInstrDesc *Desc) {
switch (Desc->TSFlags & X86II::ImmMask) {
case X86II::Imm8: return 1;
case X86II::Imm16: return 2;
case X86II::Imm32: return 4;
case X86II::Imm64: return 8;
default: assert(0 && "Immediate size not set!");
return 0;
}
}
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
/// e.g. r8, xmm8, etc.
bool Emitter::isX86_64ExtendedReg(const MachineOperand &MO) {
if (!MO.isRegister()) return false;
switch (MO.getReg()) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
case X86::R12: case X86::R13: case X86::R14: case X86::R15:
case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
return true;
}
return false;
}
inline static bool isX86_64NonExtLowByteReg(unsigned reg) {
return (reg == X86::SPL || reg == X86::BPL ||
reg == X86::SIL || reg == X86::DIL);
}
/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
unsigned Emitter::determineREX(const MachineInstr &MI) {
unsigned REX = 0;
const TargetInstrDesc &Desc = MI.getDesc();
// Pseudo instructions do not need REX prefix byte.
if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
return 0;
if (Desc.TSFlags & X86II::REX_W)
REX |= 1 << 3;
unsigned NumOps = Desc.getNumOperands();
if (NumOps) {
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
unsigned Reg = MO.getReg();
if (isX86_64NonExtLowByteReg(Reg))
REX |= 0x40;
}
}
switch (Desc.TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= (1 << 0) | (1 << 2);
break;
case X86II::MRMSrcReg: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 2;
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (isX86_64ExtendedReg(MO))
REX |= 1 << 0;
}
break;
}
case X86II::MRMSrcMem: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 2;
unsigned Bit = 0;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
}
}
break;
}
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
unsigned e = isTwoAddr ? 5 : 4;
i = isTwoAddr ? 1 : 0;
if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
REX |= 1 << 2;
unsigned Bit = 0;
for (; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
}
}
break;
}
default: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 0;
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (isX86_64ExtendedReg(MO))
REX |= 1 << 2;
}
break;
}
}
}
return REX;
}
void Emitter::emitInstruction(const MachineInstr &MI,
const TargetInstrDesc *Desc) {
DOUT << MI;
@ -584,7 +449,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (Is64BitMode) {
// REX prefix
unsigned REX = determineREX(MI);
unsigned REX = X86InstrInfo::determineREX(MI);
if (REX)
MCE.emitByte(0x40 | REX);
}
@ -632,7 +497,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
case X86::MOVPC32r: {
// This emits the "call" portion of this pseudo instruction.
MCE.emitByte(BaseOpcode);
emitConstant(0, sizeOfImm(Desc));
emitConstant(0, X86InstrInfo::sizeOfImm(Desc));
// Remember PIC base.
PICBaseOffset = MCE.getCurrentPCOffset();
X86JITInfo *JTI = dynamic_cast<X86JITInfo*>(TM.getJITInfo());
@ -657,7 +522,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
} else if (MO.isExternalSymbol()) {
emitExternalSymbolAddress(MO.getSymbolName(), X86::reloc_pcrel_word);
} else if (MO.isImmediate()) {
emitConstant(MO.getImm(), sizeOfImm(Desc));
emitConstant(MO.getImm(), X86InstrInfo::sizeOfImm(Desc));
} else {
assert(0 && "Unknown RawFrm operand!");
}
@ -669,7 +534,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = sizeOfImm(Desc);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO1.isImmediate())
emitConstant(MO1.getImm(), Size);
else {
@ -698,7 +563,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
getX86RegNum(MI.getOperand(CurOp+1).getReg()));
CurOp += 2;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
emitConstant(MI.getOperand(CurOp++).getImm(), X86InstrInfo::sizeOfImm(Desc));
break;
}
case X86II::MRMDestMem: {
@ -706,7 +571,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(CurOp+4).getReg()));
CurOp += 5;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
emitConstant(MI.getOperand(CurOp++).getImm(), X86InstrInfo::sizeOfImm(Desc));
break;
}
@ -716,18 +581,18 @@ void Emitter::emitInstruction(const MachineInstr &MI,
getX86RegNum(MI.getOperand(CurOp).getReg()));
CurOp += 2;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
emitConstant(MI.getOperand(CurOp++).getImm(), X86InstrInfo::sizeOfImm(Desc));
break;
case X86II::MRMSrcMem: {
intptr_t PCAdj = (CurOp+5 != NumOps) ? sizeOfImm(Desc) : 0;
intptr_t PCAdj = (CurOp+5 != NumOps) ? X86InstrInfo::sizeOfImm(Desc) : 0;
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp+1, getX86RegNum(MI.getOperand(CurOp).getReg()),
PCAdj);
CurOp += 5;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
emitConstant(MI.getOperand(CurOp++).getImm(), X86InstrInfo::sizeOfImm(Desc));
break;
}
@ -741,7 +606,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = sizeOfImm(Desc);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO1.isImmediate())
emitConstant(MO1.getImm(), Size);
else {
@ -769,7 +634,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
intptr_t PCAdj = (CurOp+4 != NumOps) ?
(MI.getOperand(CurOp+4).isImmediate() ? sizeOfImm(Desc) : 4) : 0;
(MI.getOperand(CurOp+4).isImmediate() ? X86InstrInfo::sizeOfImm(Desc) : 4) : 0;
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
@ -778,7 +643,7 @@ void Emitter::emitInstruction(const MachineInstr &MI,
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = sizeOfImm(Desc);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO.isImmediate())
emitConstant(MO.getImm(), Size);
else {

View File

@ -25,6 +25,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
@ -2263,3 +2264,542 @@ const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
else
return &X86::GR32RegClass;
}
unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) {
switch (Desc->TSFlags & X86II::ImmMask) {
case X86II::Imm8: return 1;
case X86II::Imm16: return 2;
case X86II::Imm32: return 4;
case X86II::Imm64: return 8;
default: assert(0 && "Immediate size not set!");
return 0;
}
}
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
/// e.g. r8, xmm8, etc.
bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) {
if (!MO.isRegister()) return false;
switch (MO.getReg()) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
case X86::R12: case X86::R13: case X86::R14: case X86::R15:
case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
return true;
}
return false;
}
/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
unsigned REX = 0;
const TargetInstrDesc &Desc = MI.getDesc();
// Pseudo instructions do not need REX prefix byte.
if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
return 0;
if (Desc.TSFlags & X86II::REX_W)
REX |= 1 << 3;
unsigned NumOps = Desc.getNumOperands();
if (NumOps) {
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
unsigned Reg = MO.getReg();
if (isX86_64NonExtLowByteReg(Reg))
REX |= 0x40;
}
}
switch (Desc.TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= (1 << 0) | (1 << 2);
break;
case X86II::MRMSrcReg: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 2;
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (isX86_64ExtendedReg(MO))
REX |= 1 << 0;
}
break;
}
case X86II::MRMSrcMem: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 2;
unsigned Bit = 0;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
}
}
break;
}
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
unsigned e = isTwoAddr ? 5 : 4;
i = isTwoAddr ? 1 : 0;
if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
REX |= 1 << 2;
unsigned Bit = 0;
for (; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (MO.isRegister()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
}
}
break;
}
default: {
if (isX86_64ExtendedReg(MI.getOperand(0)))
REX |= 1 << 0;
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
if (isX86_64ExtendedReg(MO))
REX |= 1 << 2;
}
break;
}
}
}
return REX;
}
/// sizePCRelativeBlockAddress - This method returns the size of a PC
/// relative block address instruction
///
static unsigned sizePCRelativeBlockAddress() {
return 4;
}
/// sizeGlobalAddress - Give the size of the emission of this global address
///
static unsigned sizeGlobalAddress(bool dword) {
return dword ? 8 : 4;
}
/// sizeConstPoolAddress - Give the size of the emission of this constant
/// pool address
///
static unsigned sizeConstPoolAddress(bool dword) {
return dword ? 8 : 4;
}
/// sizeExternalSymbolAddress - Give the size of the emission of this external
/// symbol
///
static unsigned sizeExternalSymbolAddress(bool dword) {
return dword ? 8 : 4;
}
/// sizeJumpTableAddress - Give the size of the emission of this jump
/// table address
///
static unsigned sizeJumpTableAddress(bool dword) {
return dword ? 8 : 4;
}
static unsigned sizeConstant(unsigned Size) {
return Size;
}
static unsigned sizeRegModRMByte(){
return 1;
}
static unsigned sizeSIBByte(){
return 1;
}
static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) {
unsigned FinalSize = 0;
// If this is a simple integer displacement that doesn't require a relocation.
if (!RelocOp) {
FinalSize += sizeConstant(4);
return FinalSize;
}
// Otherwise, this is something that requires a relocation.
if (RelocOp->isGlobalAddress()) {
FinalSize += sizeGlobalAddress(false);
} else if (RelocOp->isConstantPoolIndex()) {
FinalSize += sizeConstPoolAddress(false);
} else if (RelocOp->isJumpTableIndex()) {
FinalSize += sizeJumpTableAddress(false);
} else {
assert(0 && "Unknown value to relocate!");
}
return FinalSize;
}
static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op,
bool IsPIC, bool Is64BitMode) {
const MachineOperand &Op3 = MI.getOperand(Op+3);
int DispVal = 0;
const MachineOperand *DispForReloc = 0;
unsigned FinalSize = 0;
// Figure out what sort of displacement we have to handle here.
if (Op3.isGlobalAddress()) {
DispForReloc = &Op3;
} else if (Op3.isConstantPoolIndex()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
DispVal = 1;
}
} else if (Op3.isJumpTableIndex()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
DispVal = 1;
}
} else {
DispVal = 1;
}
const MachineOperand &Base = MI.getOperand(Op);
const MachineOperand &IndexReg = MI.getOperand(Op+2);
unsigned BaseReg = Base.getReg();
// Is a SIB byte needed?
if (IndexReg.getReg() == 0 &&
(BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) {
if (BaseReg == 0) { // Just a displacement?
// Emit special case [disp32] encoding
++FinalSize;
FinalSize += getDisplacementFieldSize(DispForReloc);
} else {
unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg);
if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) {
// Emit simple indirect register encoding... [EAX] f.e.
++FinalSize;
// Be pessimistic and assume it's a disp32, not a disp8
} else {
// Emit the most general non-SIB encoding: [REG+disp32]
++FinalSize;
FinalSize += getDisplacementFieldSize(DispForReloc);
}
}
} else { // We need a SIB byte, so start by outputting the ModR/M byte first
assert(IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
bool ForceDisp32 = false;
if (BaseReg == 0 || DispForReloc) {
// Emit the normal disp32 encoding.
++FinalSize;
ForceDisp32 = true;
} else {
++FinalSize;
}
FinalSize += sizeSIBByte();
// Do we need to output a displacement?
if (DispVal != 0 || ForceDisp32) {
FinalSize += getDisplacementFieldSize(DispForReloc);
}
}
return FinalSize;
}
static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
const TargetInstrDesc *Desc,
bool IsPIC, bool Is64BitMode) {
unsigned Opcode = Desc->Opcode;
unsigned FinalSize = 0;
// Emit the lock opcode prefix as needed.
if (Desc->TSFlags & X86II::LOCK) ++FinalSize;
// Emit the repeat opcode prefix as needed.
if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize;
// Emit the operand size opcode prefix as needed.
if (Desc->TSFlags & X86II::OpSize) ++FinalSize;
// Emit the address size opcode prefix as needed.
if (Desc->TSFlags & X86II::AdSize) ++FinalSize;
bool Need0FPrefix = false;
switch (Desc->TSFlags & X86II::Op0Mask) {
case X86II::TB: // Two-byte opcode prefix
case X86II::T8: // 0F 38
case X86II::TA: // 0F 3A
Need0FPrefix = true;
break;
case X86II::REP: break; // already handled.
case X86II::XS: // F3 0F
++FinalSize;
Need0FPrefix = true;
break;
case X86II::XD: // F2 0F
++FinalSize;
Need0FPrefix = true;
break;
case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
++FinalSize;
break; // Two-byte opcode prefix
default: assert(0 && "Invalid prefix!");
case 0: break; // No prefix!
}
if (Is64BitMode) {
// REX prefix
unsigned REX = X86InstrInfo::determineREX(MI);
if (REX)
++FinalSize;
}
// 0x0F escape code must be emitted just before the opcode.
if (Need0FPrefix)
++FinalSize;
switch (Desc->TSFlags & X86II::Op0Mask) {
case X86II::T8: // 0F 38
++FinalSize;
break;
case X86II::TA: // 0F 3A
++FinalSize;
break;
}
// If this is a two-address instruction, skip one of the register operands.
unsigned NumOps = Desc->getNumOperands();
unsigned CurOp = 0;
if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
CurOp++;
switch (Desc->TSFlags & X86II::FormMask) {
default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!");
case X86II::Pseudo:
// Remember the current PC offset, this is the PIC relocation
// base address.
switch (Opcode) {
default:
break;
case TargetInstrInfo::INLINEASM: {
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
const TargetAsmInfo* AI = MF->getTarget().getTargetAsmInfo();
FinalSize += AI->getInlineAsmLength(AsmStr);
break;
}
case TargetInstrInfo::LABEL:
break;
case TargetInstrInfo::IMPLICIT_DEF:
case TargetInstrInfo::DECLARE:
case X86::DWARF_LOC:
case X86::FP_REG_KILL:
break;
case X86::MOVPC32r: {
// This emits the "call" portion of this pseudo instruction.
++FinalSize;
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
}
}
CurOp = NumOps;
break;
case X86II::RawFrm:
++FinalSize;
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
if (MO.isMachineBasicBlock()) {
FinalSize += sizePCRelativeBlockAddress();
} else if (MO.isGlobalAddress()) {
FinalSize += sizeGlobalAddress(false);
} else if (MO.isExternalSymbol()) {
FinalSize += sizeExternalSymbolAddress(false);
} else if (MO.isImmediate()) {
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
} else {
assert(0 && "Unknown RawFrm operand!");
}
}
break;
case X86II::AddRegFrm:
++FinalSize;
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO1.isImmediate())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri)
dword = true;
if (MO1.isGlobalAddress()) {
FinalSize += sizeGlobalAddress(dword);
} else if (MO1.isExternalSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
else if (MO1.isConstantPoolIndex())
FinalSize += sizeConstPoolAddress(dword);
else if (MO1.isJumpTableIndex())
FinalSize += sizeJumpTableAddress(dword);
}
}
break;
case X86II::MRMDestReg: {
++FinalSize;
FinalSize += sizeRegModRMByte();
CurOp += 2;
if (CurOp != NumOps)
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
}
case X86II::MRMDestMem: {
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
CurOp += 5;
if (CurOp != NumOps)
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
}
case X86II::MRMSrcReg:
++FinalSize;
FinalSize += sizeRegModRMByte();
CurOp += 2;
if (CurOp != NumOps)
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
case X86II::MRMSrcMem: {
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
CurOp += 5;
if (CurOp != NumOps)
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
}
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
++FinalSize;
FinalSize += sizeRegModRMByte();
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO1.isImmediate())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri32)
dword = true;
if (MO1.isGlobalAddress()) {
FinalSize += sizeGlobalAddress(dword);
} else if (MO1.isExternalSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
else if (MO1.isConstantPoolIndex())
FinalSize += sizeConstPoolAddress(dword);
else if (MO1.isJumpTableIndex())
FinalSize += sizeJumpTableAddress(dword);
}
}
break;
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
CurOp += 4;
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
if (MO.isImmediate())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64mi32)
dword = true;
if (MO.isGlobalAddress()) {
FinalSize += sizeGlobalAddress(dword);
} else if (MO.isExternalSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
else if (MO.isConstantPoolIndex())
FinalSize += sizeConstPoolAddress(dword);
else if (MO.isJumpTableIndex())
FinalSize += sizeJumpTableAddress(dword);
}
}
break;
}
case X86II::MRMInitReg:
++FinalSize;
// Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
FinalSize += sizeRegModRMByte();
++CurOp;
break;
}
if (!Desc->isVariadic() && CurOp != NumOps) {
cerr << "Cannot determine size: ";
MI.dump();
cerr << '\n';
abort();
}
return FinalSize;
}
unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const TargetInstrDesc &Desc = MI->getDesc();
bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_);
bool Is64BitMode = ((X86Subtarget*)TM.getSubtargetImpl())->is64Bit();
unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode);
if (Desc.getOpcode() == X86::MOVPC32r) {
Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode);
}
return Size;
}

View File

@ -15,6 +15,7 @@
#define X86INSTRUCTIONINFO_H
#include "llvm/Target/TargetInstrInfo.h"
#include "X86.h"
#include "X86RegisterInfo.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/Target/TargetRegisterInfo.h"
@ -380,6 +381,20 @@ public:
unsigned char getBaseOpcodeFor(unsigned Opcode) const {
return getBaseOpcodeFor(&get(Opcode));
}
static bool isX86_64NonExtLowByteReg(unsigned reg) {
return (reg == X86::SPL || reg == X86::BPL ||
reg == X86::SIL || reg == X86::DIL);
}
static unsigned sizeOfImm(const TargetInstrDesc *Desc);
static unsigned getX86RegNum(unsigned RegNo);
static bool isX86_64ExtendedReg(const MachineOperand &MO);
static unsigned determineREX(const MachineInstr &MI);
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
private:
MachineInstr* foldMemoryOperand(MachineInstr* MI,

View File

@ -84,7 +84,7 @@ int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
// getX86RegNum - This function maps LLVM register identifiers to their X86
// specific numbering, which is used in various places encoding instructions.
//
unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) const {
unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
switch(RegNo) {
case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;

View File

@ -84,7 +84,7 @@ public:
/// getX86RegNum - Returns the native X86 register number for the given LLVM
/// register identifier.
unsigned getX86RegNum(unsigned RegNo) const;
static unsigned getX86RegNum(unsigned RegNo);
unsigned getStackAlignment() const { return StackAlign; }