mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-06 09:44:39 +00:00
This patch makes the following changes necessary for MIPS' direct code emission.
- lower unaligned loads/stores. - encode the size operand of instructions INS and EXT. - emit relocation information needed for JAL (jump-and-link). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145113 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
84bfc2f090
commit
421455f1ea
@ -173,11 +173,21 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
|
||||
} else if (MO.isExpr()) {
|
||||
const MCExpr *Expr = MO.getExpr();
|
||||
MCExpr::ExprKind Kind = Expr->getKind();
|
||||
unsigned Ret = 0;
|
||||
|
||||
if (Kind == MCExpr::Binary) {
|
||||
const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Expr);
|
||||
Expr = BE->getLHS();
|
||||
Kind = Expr->getKind();
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
|
||||
assert((Kind == MCExpr::SymbolRef) && CE &&
|
||||
"Binary expression must be sym+const.");
|
||||
Ret = CE->getValue();
|
||||
}
|
||||
|
||||
if (Kind == MCExpr::SymbolRef) {
|
||||
Mips::Fixups FixupKind = Mips::fixup_Mips_NONE;
|
||||
MCSymbolRefExpr::VariantKind SymRefKind =
|
||||
cast<MCSymbolRefExpr>(Expr)->getKind();
|
||||
switch(SymRefKind) {
|
||||
Mips::Fixups FixupKind;
|
||||
switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
|
||||
case MCSymbolRefExpr::VK_Mips_GPREL:
|
||||
FixupKind = Mips::fixup_Mips_GPREL16;
|
||||
break;
|
||||
@ -206,12 +216,12 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
|
||||
FixupKind = Mips::fixup_Mips_TPREL_LO;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return Ret;
|
||||
} // switch
|
||||
Fixups.push_back(MCFixup::Create(0, Expr, MCFixupKind(FixupKind)));
|
||||
} // if SymbolRef
|
||||
// All of the information is in the fixup.
|
||||
return 0;
|
||||
return Ret;
|
||||
}
|
||||
llvm_unreachable("Unable to encode MCOperand!");
|
||||
// Not reached
|
||||
@ -234,15 +244,22 @@ MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
|
||||
unsigned
|
||||
MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
|
||||
SmallVectorImpl<MCFixup> &Fixups) const {
|
||||
// FIXME: implement
|
||||
return 0;
|
||||
assert(MI.getOperand(OpNo).isImm());
|
||||
unsigned szEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
|
||||
return szEncoding - 1;
|
||||
}
|
||||
|
||||
// FIXME: should be called getMSBEncoding
|
||||
//
|
||||
unsigned
|
||||
MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
|
||||
SmallVectorImpl<MCFixup> &Fixups) const {
|
||||
// FIXME: implement
|
||||
return 0;
|
||||
assert(MI.getOperand(OpNo-1).isImm());
|
||||
assert(MI.getOperand(OpNo).isImm());
|
||||
unsigned pos = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups);
|
||||
unsigned sz = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
|
||||
|
||||
return pos + sz - 1;
|
||||
}
|
||||
|
||||
#include "MipsGenMCCodeEmitter.inc"
|
||||
|
@ -78,12 +78,19 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
|
||||
// Enclose unaligned load or store with .macro & .nomacro directives.
|
||||
if (isUnalignedLoadStore(Opc)) {
|
||||
MCInst Directive;
|
||||
Directive.setOpcode(Mips::MACRO);
|
||||
OutStreamer.EmitInstruction(Directive);
|
||||
OutStreamer.EmitInstruction(TmpInst0);
|
||||
Directive.setOpcode(Mips::NOMACRO);
|
||||
OutStreamer.EmitInstruction(Directive);
|
||||
if (OutStreamer.hasRawTextSupport()) {
|
||||
MCInst Directive;
|
||||
Directive.setOpcode(Mips::MACRO);
|
||||
OutStreamer.EmitInstruction(Directive);
|
||||
OutStreamer.EmitInstruction(TmpInst0);
|
||||
Directive.setOpcode(Mips::NOMACRO);
|
||||
OutStreamer.EmitInstruction(Directive);
|
||||
} else {
|
||||
MCInstLowering.LowerUnalignedLoadStore(MI, MCInsts);
|
||||
for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); I
|
||||
!= MCInsts.end(); ++I)
|
||||
OutStreamer.EmitInstruction(*I);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -91,8 +98,8 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
// Lower CPLOAD and CPRESTORE
|
||||
if (Opc == Mips::CPLOAD) {
|
||||
MCInstLowering.LowerCPLOAD(MI, MCInsts);
|
||||
for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
|
||||
I != MCInsts.end(); ++I)
|
||||
for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); I
|
||||
!= MCInsts.end(); ++I)
|
||||
OutStreamer.EmitInstruction(*I);
|
||||
return;
|
||||
}
|
||||
@ -101,7 +108,7 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
MCInstLowering.LowerCPRESTORE(MI, TmpInst0);
|
||||
OutStreamer.EmitInstruction(TmpInst0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
OutStreamer.EmitInstruction(TmpInst0);
|
||||
|
@ -27,9 +27,11 @@ class MachineBasicBlock;
|
||||
class Module;
|
||||
|
||||
class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
|
||||
const MipsSubtarget *Subtarget;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
const MipsSubtarget *Subtarget;
|
||||
|
||||
explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
|
||||
: AsmPrinter(TM, Streamer) {
|
||||
Subtarget = &TM.getSubtarget<MipsSubtarget>();
|
||||
|
@ -145,7 +145,9 @@ def brtarget : Operand<OtherVT> {
|
||||
let EncoderMethod = "getBranchTargetOpValue";
|
||||
let OperandType = "OPERAND_PCREL";
|
||||
}
|
||||
def calltarget : Operand<i32>;
|
||||
def calltarget : Operand<iPTR> {
|
||||
let EncoderMethod = "getJumpTargetOpValue";
|
||||
}
|
||||
def calltarget64: Operand<i64>;
|
||||
def simm16 : Operand<i32>;
|
||||
def simm16_64 : Operand<i64>;
|
||||
@ -378,6 +380,22 @@ class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
|
||||
let isPseudo = Pseudo;
|
||||
}
|
||||
|
||||
// Memory Load/Store
|
||||
let canFoldAsLoad = 1 in
|
||||
class LoadX<bits<6> op, RegisterClass RC,
|
||||
Operand MemOpnd>:
|
||||
FMem<op, (outs RC:$rt), (ins MemOpnd:$addr),
|
||||
"",
|
||||
[], IILoad> {
|
||||
}
|
||||
|
||||
class StoreX<bits<6> op, RegisterClass RC,
|
||||
Operand MemOpnd>:
|
||||
FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr),
|
||||
"",
|
||||
[], IIStore> {
|
||||
}
|
||||
|
||||
// 32-bit load.
|
||||
multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
|
||||
bit Pseudo = 0> {
|
||||
@ -396,6 +414,13 @@ multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
|
||||
// 32-bit load.
|
||||
multiclass LoadX32<bits<6> op> {
|
||||
def #NAME# : LoadX<op, CPURegs, mem>,
|
||||
Requires<[NotN64]>;
|
||||
def _P8 : LoadX<op, CPURegs, mem64>,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
// 32-bit store.
|
||||
multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
|
||||
bit Pseudo = 0> {
|
||||
@ -414,6 +439,14 @@ multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
|
||||
// 32-bit store.
|
||||
multiclass StoreX32<bits<6> op> {
|
||||
def #NAME# : StoreX<op, CPURegs, mem>,
|
||||
Requires<[NotN64]>;
|
||||
def _P8 : StoreX<op, CPURegs, mem64>,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
|
||||
// Conditional Branch
|
||||
class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
|
||||
CBranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16),
|
||||
@ -761,6 +794,12 @@ defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
|
||||
defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
|
||||
defm USW : StoreM32<0x2b, "usw", store_u, 1>;
|
||||
|
||||
/// Primitives for unaligned
|
||||
defm LWL : LoadX32<0x22>;
|
||||
defm LWR : LoadX32<0x26>;
|
||||
defm SWL : StoreX32<0x2A>;
|
||||
defm SWR : StoreX32<0x2E>;
|
||||
|
||||
let hasSideEffects = 1 in
|
||||
def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
|
||||
[(MipsSync imm:$stype)], NoItinerary, FrmOther>
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "llvm/MC/MCExpr.h"
|
||||
#include "llvm/MC/MCInst.h"
|
||||
#include "llvm/Target/Mangler.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
MipsMCInstLower::MipsMCInstLower(Mangler *mang, const MachineFunction &mf,
|
||||
@ -55,34 +56,34 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
||||
}
|
||||
|
||||
switch (MOTy) {
|
||||
case MachineOperand::MO_MachineBasicBlock:
|
||||
Symbol = MO.getMBB()->getSymbol();
|
||||
break;
|
||||
case MachineOperand::MO_MachineBasicBlock:
|
||||
Symbol = MO.getMBB()->getSymbol();
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_GlobalAddress:
|
||||
Symbol = Mang->getSymbol(MO.getGlobal());
|
||||
break;
|
||||
case MachineOperand::MO_GlobalAddress:
|
||||
Symbol = Mang->getSymbol(MO.getGlobal());
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_BlockAddress:
|
||||
Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
break;
|
||||
case MachineOperand::MO_BlockAddress:
|
||||
Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_ExternalSymbol:
|
||||
Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
|
||||
break;
|
||||
case MachineOperand::MO_ExternalSymbol:
|
||||
Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_JumpTableIndex:
|
||||
Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
|
||||
break;
|
||||
case MachineOperand::MO_JumpTableIndex:
|
||||
Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_ConstantPoolIndex:
|
||||
Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
|
||||
if (MO.getOffset())
|
||||
Offset += MO.getOffset();
|
||||
break;
|
||||
case MachineOperand::MO_ConstantPoolIndex:
|
||||
Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
|
||||
if (MO.getOffset())
|
||||
Offset += MO.getOffset();
|
||||
break;
|
||||
|
||||
default:
|
||||
llvm_unreachable("<unknown operand type>");
|
||||
default:
|
||||
llvm_unreachable("<unknown operand type>");
|
||||
}
|
||||
|
||||
const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::Create(Symbol, Kind, Ctx);
|
||||
@ -145,8 +146,8 @@ void MipsMCInstLower::LowerCPRESTORE(const MachineInstr *MI, MCInst &OutMI) {
|
||||
OutMI.addOperand(MCOperand::CreateImm(MO.getImm()));
|
||||
}
|
||||
|
||||
|
||||
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO) const {
|
||||
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO,
|
||||
unsigned offset) const {
|
||||
MachineOperandType MOTy = MO.getType();
|
||||
|
||||
switch (MOTy) {
|
||||
@ -158,14 +159,14 @@ MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO) const {
|
||||
if (MO.isImplicit()) break;
|
||||
return MCOperand::CreateReg(MO.getReg());
|
||||
case MachineOperand::MO_Immediate:
|
||||
return MCOperand::CreateImm(MO.getImm());
|
||||
return MCOperand::CreateImm(MO.getImm() + offset);
|
||||
case MachineOperand::MO_MachineBasicBlock:
|
||||
case MachineOperand::MO_GlobalAddress:
|
||||
case MachineOperand::MO_ExternalSymbol:
|
||||
case MachineOperand::MO_JumpTableIndex:
|
||||
case MachineOperand::MO_ConstantPoolIndex:
|
||||
case MachineOperand::MO_BlockAddress:
|
||||
return LowerSymbolOperand(MO, MOTy, 0);
|
||||
return LowerSymbolOperand(MO, MOTy, offset);
|
||||
}
|
||||
|
||||
return MCOperand();
|
||||
@ -182,3 +183,116 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
||||
OutMI.addOperand(MCOp);
|
||||
}
|
||||
}
|
||||
|
||||
void MipsMCInstLower::LowerUnalignedLoadStore(const MachineInstr *MI,
|
||||
SmallVector<MCInst,
|
||||
4>& MCInsts) {
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MCInst instr1, instr2, instr3, move;
|
||||
|
||||
bool two_instructions = false;
|
||||
|
||||
assert(MI->getNumOperands() == 3);
|
||||
assert(MI->getOperand(0).isReg());
|
||||
assert(MI->getOperand(1).isReg());
|
||||
|
||||
MCOperand target = LowerOperand(MI->getOperand(0));
|
||||
MCOperand base = LowerOperand(MI->getOperand(1));
|
||||
MCOperand atReg = MCOperand::CreateReg(Mips::AT);
|
||||
MCOperand zeroReg = MCOperand::CreateReg(Mips::ZERO);
|
||||
|
||||
MachineOperand unloweredName = MI->getOperand(2);
|
||||
MCOperand name = LowerOperand(unloweredName);
|
||||
|
||||
move.setOpcode(Mips::ADDu);
|
||||
move.addOperand(target);
|
||||
move.addOperand(atReg);
|
||||
move.addOperand(zeroReg);
|
||||
|
||||
switch (Opc) {
|
||||
case Mips::ULW: {
|
||||
// FIXME: only works for little endian right now
|
||||
MCOperand adj_name = LowerOperand(unloweredName, 3);
|
||||
if (base.getReg() == (target.getReg())) {
|
||||
instr1.setOpcode(Mips::LWL);
|
||||
instr1.addOperand(atReg);
|
||||
instr1.addOperand(base);
|
||||
instr1.addOperand(adj_name);
|
||||
instr2.setOpcode(Mips::LWR);
|
||||
instr2.addOperand(atReg);
|
||||
instr2.addOperand(base);
|
||||
instr2.addOperand(name);
|
||||
instr3 = move;
|
||||
} else {
|
||||
two_instructions = true;
|
||||
instr1.setOpcode(Mips::LWL);
|
||||
instr1.addOperand(target);
|
||||
instr1.addOperand(base);
|
||||
instr1.addOperand(adj_name);
|
||||
instr2.setOpcode(Mips::LWR);
|
||||
instr2.addOperand(target);
|
||||
instr2.addOperand(base);
|
||||
instr2.addOperand(name);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Mips::ULHu: {
|
||||
// FIXME: only works for little endian right now
|
||||
MCOperand adj_name = LowerOperand(unloweredName, 1);
|
||||
instr1.setOpcode(Mips::LBu);
|
||||
instr1.addOperand(atReg);
|
||||
instr1.addOperand(base);
|
||||
instr1.addOperand(adj_name);
|
||||
instr2.setOpcode(Mips::LBu);
|
||||
instr2.addOperand(target);
|
||||
instr2.addOperand(base);
|
||||
instr2.addOperand(name);
|
||||
instr3.setOpcode(Mips::INS);
|
||||
instr3.addOperand(target);
|
||||
instr3.addOperand(atReg);
|
||||
instr3.addOperand(MCOperand::CreateImm(0x8));
|
||||
instr3.addOperand(MCOperand::CreateImm(0x18));
|
||||
break;
|
||||
}
|
||||
|
||||
case Mips::USW: {
|
||||
// FIXME: only works for little endian right now
|
||||
assert (base.getReg() != target.getReg());
|
||||
two_instructions = true;
|
||||
MCOperand adj_name = LowerOperand(unloweredName, 3);
|
||||
instr1.setOpcode(Mips::SWL);
|
||||
instr1.addOperand(target);
|
||||
instr1.addOperand(base);
|
||||
instr1.addOperand(adj_name);
|
||||
instr2.setOpcode(Mips::SWR);
|
||||
instr2.addOperand(target);
|
||||
instr2.addOperand(base);
|
||||
instr2.addOperand(name);
|
||||
break;
|
||||
}
|
||||
case Mips::USH: {
|
||||
MCOperand adj_name = LowerOperand(unloweredName, 1);
|
||||
instr1.setOpcode(Mips::SB);
|
||||
instr1.addOperand(target);
|
||||
instr1.addOperand(base);
|
||||
instr1.addOperand(name);
|
||||
instr2.setOpcode(Mips::SRL);
|
||||
instr2.addOperand(atReg);
|
||||
instr2.addOperand(target);
|
||||
instr2.addOperand(MCOperand::CreateImm(8));
|
||||
instr3.setOpcode(Mips::SB);
|
||||
instr3.addOperand(atReg);
|
||||
instr3.addOperand(base);
|
||||
instr3.addOperand(adj_name);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// FIXME: need to add others
|
||||
assert(0 && "unaligned instruction not processed");
|
||||
}
|
||||
|
||||
MCInsts.push_back(instr1);
|
||||
MCInsts.push_back(instr2);
|
||||
if (!two_instructions) MCInsts.push_back(instr3);
|
||||
}
|
||||
|
||||
|
@ -37,10 +37,12 @@ public:
|
||||
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
void LowerCPLOAD(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
|
||||
void LowerCPRESTORE(const MachineInstr *MI, MCInst &OutMI);
|
||||
void LowerUnalignedLoadStore(const MachineInstr *MI,
|
||||
SmallVector<MCInst, 4>& MCInsts);
|
||||
private:
|
||||
MCOperand LowerSymbolOperand(const MachineOperand &MO,
|
||||
MachineOperandType MOTy, unsigned Offset) const;
|
||||
MCOperand LowerOperand(const MachineOperand& MO) const;
|
||||
MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
|
||||
};
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user