mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
Change LEA to have 5 operands for its memory operand, just
like all other instructions, even though a segment is not allowed. This resolves a bunch of gross hacks in the encoder and makes LEA more consistent with the rest of the instruction set. No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107934 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
99cfb69f17
commit
599b531a96
@ -85,11 +85,18 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
|
||||
}
|
||||
}
|
||||
|
||||
void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
const MCOperand &BaseReg = MI->getOperand(Op);
|
||||
const MCOperand &IndexReg = MI->getOperand(Op+2);
|
||||
const MCOperand &DispSpec = MI->getOperand(Op+3);
|
||||
const MCOperand &SegReg = MI->getOperand(Op+4);
|
||||
|
||||
// If this has a segment register, print it.
|
||||
if (SegReg.getReg()) {
|
||||
printOperand(MI, Op+4, O);
|
||||
O << ':';
|
||||
}
|
||||
|
||||
if (DispSpec.isImm()) {
|
||||
int64_t DispVal = DispSpec.getImm();
|
||||
@ -115,13 +122,3 @@ void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
|
||||
O << ')';
|
||||
}
|
||||
}
|
||||
|
||||
void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
// If this has a segment register, print it.
|
||||
if (MI->getOperand(Op+4).getReg()) {
|
||||
printOperand(MI, Op+4, O);
|
||||
O << ':';
|
||||
}
|
||||
printLeaMemReference(MI, Op, O);
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ public:
|
||||
|
||||
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
|
||||
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
|
||||
|
||||
@ -69,15 +68,6 @@ public:
|
||||
void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -81,12 +81,19 @@ void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
|
||||
}
|
||||
}
|
||||
|
||||
void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
const MCOperand &BaseReg = MI->getOperand(Op);
|
||||
unsigned ScaleVal = MI->getOperand(Op+1).getImm();
|
||||
const MCOperand &IndexReg = MI->getOperand(Op+2);
|
||||
const MCOperand &DispSpec = MI->getOperand(Op+3);
|
||||
const MCOperand &SegReg = MI->getOperand(Op+4);
|
||||
|
||||
// If this has a segment register, print it.
|
||||
if (SegReg.getReg()) {
|
||||
printOperand(MI, Op+4, O);
|
||||
O << ':';
|
||||
}
|
||||
|
||||
O << '[';
|
||||
|
||||
@ -104,7 +111,7 @@ void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
|
||||
NeedPlus = true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (!DispSpec.isImm()) {
|
||||
if (NeedPlus) O << " + ";
|
||||
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
|
||||
@ -126,13 +133,3 @@ void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
|
||||
|
||||
O << ']';
|
||||
}
|
||||
|
||||
void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
// If this has a segment register, print it.
|
||||
if (MI->getOperand(Op+4).getReg()) {
|
||||
printOperand(MI, Op+4, O);
|
||||
O << ':';
|
||||
}
|
||||
printLeaMemReference(MI, Op, O);
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ public:
|
||||
|
||||
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
|
||||
void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
|
||||
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
|
||||
void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
|
||||
@ -81,18 +80,6 @@ public:
|
||||
O << "XMMWORD PTR ";
|
||||
printMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "DWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "QWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
O << "QWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo, O);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -349,6 +349,15 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
||||
switch (OutMI.getOpcode()) {
|
||||
case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
|
||||
lower_lea64_32mem(&OutMI, 1);
|
||||
// FALL THROUGH.
|
||||
case X86::LEA64r:
|
||||
case X86::LEA16r:
|
||||
case X86::LEA32r:
|
||||
// LEA should have a segment register, but it must be empty.
|
||||
assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
|
||||
"Unexpected # of LEA operands");
|
||||
assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
|
||||
"LEA has segment specified!");
|
||||
break;
|
||||
case X86::MOVZX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
|
||||
case X86::MOVZX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
|
||||
|
@ -750,13 +750,7 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
|
||||
break;
|
||||
|
||||
case X86II::MRMSrcMem: {
|
||||
// FIXME: Maybe lea should have its own form?
|
||||
int AddrOperands;
|
||||
if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
||||
Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
||||
AddrOperands = X86::AddrNumOperands - 1; // No segment register
|
||||
else
|
||||
AddrOperands = X86::AddrNumOperands;
|
||||
int AddrOperands = X86::AddrNumOperands;
|
||||
|
||||
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
|
||||
X86II::getSizeOfImm(Desc->TSFlags) : 0;
|
||||
|
@ -1728,7 +1728,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
||||
else
|
||||
Opc = X86::LEA64r;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
return 0;
|
||||
@ -1781,7 +1781,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
|
||||
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
||||
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
|
@ -190,9 +190,11 @@ namespace {
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment);
|
||||
bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment);
|
||||
bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment);
|
||||
bool SelectScalarSSELoad(SDNode *Root, SDValue N,
|
||||
SDValue &Base, SDValue &Scale,
|
||||
SDValue &Index, SDValue &Disp,
|
||||
@ -1205,7 +1207,8 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
|
||||
/// mode it matches can be cost effectively emitted as an LEA instruction.
|
||||
bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
|
||||
SDValue &Base, SDValue &Scale,
|
||||
SDValue &Index, SDValue &Disp) {
|
||||
SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment) {
|
||||
X86ISelAddressMode AM;
|
||||
|
||||
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
|
||||
@ -1259,7 +1262,6 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
|
||||
if (Complexity <= 2)
|
||||
return false;
|
||||
|
||||
SDValue Segment;
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
|
||||
return true;
|
||||
}
|
||||
@ -1267,7 +1269,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
|
||||
/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
|
||||
bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index,
|
||||
SDValue &Disp) {
|
||||
SDValue &Disp, SDValue &Segment) {
|
||||
assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
|
||||
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
|
||||
|
||||
@ -1284,7 +1286,6 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
|
||||
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
|
||||
}
|
||||
|
||||
SDValue Segment;
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
|
||||
return true;
|
||||
}
|
||||
|
@ -8580,7 +8580,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
|
||||
MI->getOperand(3).getTargetFlags())
|
||||
.addReg(0);
|
||||
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
|
||||
addDirectMem(MIB, X86::RDI).addReg(0);
|
||||
addDirectMem(MIB, X86::RDI);
|
||||
} else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
|
||||
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
|
||||
TII->get(X86::MOV32rm), X86::EAX)
|
||||
@ -8590,7 +8590,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
|
||||
MI->getOperand(3).getTargetFlags())
|
||||
.addReg(0);
|
||||
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
|
||||
addDirectMem(MIB, X86::EAX).addReg(0);
|
||||
addDirectMem(MIB, X86::EAX);
|
||||
} else {
|
||||
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
|
||||
TII->get(X86::MOV32rm), X86::EAX)
|
||||
@ -8600,7 +8600,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
|
||||
MI->getOperand(3).getTargetFlags())
|
||||
.addReg(0);
|
||||
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
|
||||
addDirectMem(MIB, X86::EAX).addReg(0);
|
||||
addDirectMem(MIB, X86::EAX);
|
||||
}
|
||||
|
||||
MI->eraseFromParent(); // The pseudo instruction is gone now.
|
||||
|
@ -35,6 +35,14 @@ def i64i8imm : Operand<i64> {
|
||||
let ParserMatchClass = ImmSExti64i8AsmOperand;
|
||||
}
|
||||
|
||||
def lea64_32mem : Operand<i32> {
|
||||
let PrintMethod = "printi32mem";
|
||||
let AsmOperandLowerMethod = "lower_lea64_32mem";
|
||||
let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
|
||||
let ParserMatchClass = X86MemAsmOperand;
|
||||
}
|
||||
|
||||
|
||||
// Special i64mem for addresses of load folding tail calls. These are not
|
||||
// allowed to use callee-saved registers since they must be scheduled
|
||||
// after callee-saved register are popped.
|
||||
@ -44,27 +52,14 @@ def i64mem_TC : Operand<i64> {
|
||||
let ParserMatchClass = X86MemAsmOperand;
|
||||
}
|
||||
|
||||
def lea64mem : Operand<i64> {
|
||||
let PrintMethod = "printlea64mem";
|
||||
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
|
||||
let ParserMatchClass = X86NoSegMemAsmOperand;
|
||||
}
|
||||
|
||||
def lea64_32mem : Operand<i32> {
|
||||
let PrintMethod = "printlea64_32mem";
|
||||
let AsmOperandLowerMethod = "lower_lea64_32mem";
|
||||
let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
|
||||
let ParserMatchClass = X86NoSegMemAsmOperand;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Complex Pattern Definitions.
|
||||
//
|
||||
def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
|
||||
def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
|
||||
[add, sub, mul, X86mul_imm, shl, or, frameindex,
|
||||
X86WrapperRIP], []>;
|
||||
|
||||
def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
|
||||
def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
|
||||
[tglobaltlsaddr], []>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -289,7 +284,7 @@ def LEA64_32r : I<0x8D, MRMSrcMem,
|
||||
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
|
||||
|
||||
let isReMaterializable = 1 in
|
||||
def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
|
||||
def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
|
||||
"lea{q}\t{$src|$dst}, {$dst|$src}",
|
||||
[(set GR64:$dst, lea64addr:$src)]>;
|
||||
|
||||
@ -1697,7 +1692,7 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
|
||||
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
||||
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
|
||||
Uses = [RSP] in
|
||||
def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
|
||||
def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
|
||||
".byte\t0x66; "
|
||||
"leaq\t$sym(%rip), %rdi; "
|
||||
".word\t0x6666; "
|
||||
|
@ -64,19 +64,15 @@ struct X86AddressMode {
|
||||
///
|
||||
static inline const MachineInstrBuilder &
|
||||
addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg) {
|
||||
// Because memory references are always represented with four
|
||||
// values, this adds: Reg, [1, NoReg, 0] to the instruction.
|
||||
return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0);
|
||||
// Because memory references are always represented with five
|
||||
// values, this adds: Reg, 1, NoReg, 0, NoReg to the instruction.
|
||||
return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0).addReg(0);
|
||||
}
|
||||
|
||||
static inline const MachineInstrBuilder &
|
||||
addLeaOffset(const MachineInstrBuilder &MIB, int Offset) {
|
||||
return MIB.addImm(1).addReg(0).addImm(Offset);
|
||||
}
|
||||
|
||||
static inline const MachineInstrBuilder &
|
||||
addOffset(const MachineInstrBuilder &MIB, int Offset) {
|
||||
return addLeaOffset(MIB, Offset).addReg(0);
|
||||
return MIB.addImm(1).addReg(0).addImm(Offset).addReg(0);
|
||||
}
|
||||
|
||||
/// addRegOffset - This function is used to add a memory reference of the form
|
||||
@ -89,25 +85,20 @@ addRegOffset(const MachineInstrBuilder &MIB,
|
||||
return addOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
|
||||
}
|
||||
|
||||
static inline const MachineInstrBuilder &
|
||||
addLeaRegOffset(const MachineInstrBuilder &MIB,
|
||||
unsigned Reg, bool isKill, int Offset) {
|
||||
return addLeaOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
|
||||
}
|
||||
|
||||
/// addRegReg - This function is used to add a memory reference of the form:
|
||||
/// [Reg + Reg].
|
||||
static inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB,
|
||||
unsigned Reg1, bool isKill1,
|
||||
unsigned Reg2, bool isKill2) {
|
||||
return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(1)
|
||||
.addReg(Reg2, getKillRegState(isKill2)).addImm(0);
|
||||
.addReg(Reg2, getKillRegState(isKill2)).addImm(0).addReg(0);
|
||||
}
|
||||
|
||||
static inline const MachineInstrBuilder &
|
||||
addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
|
||||
assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
|
||||
|
||||
addFullAddress(const MachineInstrBuilder &MIB,
|
||||
const X86AddressMode &AM) {
|
||||
assert(AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
|
||||
|
||||
if (AM.BaseType == X86AddressMode::RegBase)
|
||||
MIB.addReg(AM.Base.Reg);
|
||||
else if (AM.BaseType == X86AddressMode::FrameIndexBase)
|
||||
@ -116,15 +107,11 @@ addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
|
||||
assert (0);
|
||||
MIB.addImm(AM.Scale).addReg(AM.IndexReg);
|
||||
if (AM.GV)
|
||||
return MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
|
||||
MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
|
||||
else
|
||||
return MIB.addImm(AM.Disp);
|
||||
}
|
||||
|
||||
static inline const MachineInstrBuilder &
|
||||
addFullAddress(const MachineInstrBuilder &MIB,
|
||||
const X86AddressMode &AM) {
|
||||
return addLeaAddress(MIB, AM).addReg(0);
|
||||
MIB.addImm(AM.Disp);
|
||||
|
||||
return MIB.addReg(0);
|
||||
}
|
||||
|
||||
/// addFrameReference - This function is used to add a reference to the base of
|
||||
|
@ -1158,20 +1158,20 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
|
||||
case X86::SHL16ri: {
|
||||
unsigned ShAmt = MI->getOperand(2).getImm();
|
||||
MIB.addReg(0).addImm(1 << ShAmt)
|
||||
.addReg(leaInReg, RegState::Kill).addImm(0);
|
||||
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
|
||||
break;
|
||||
}
|
||||
case X86::INC16r:
|
||||
case X86::INC64_16r:
|
||||
addLeaRegOffset(MIB, leaInReg, true, 1);
|
||||
addRegOffset(MIB, leaInReg, true, 1);
|
||||
break;
|
||||
case X86::DEC16r:
|
||||
case X86::DEC64_16r:
|
||||
addLeaRegOffset(MIB, leaInReg, true, -1);
|
||||
addRegOffset(MIB, leaInReg, true, -1);
|
||||
break;
|
||||
case X86::ADD16ri:
|
||||
case X86::ADD16ri8:
|
||||
addLeaRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
|
||||
addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
|
||||
break;
|
||||
case X86::ADD16rr: {
|
||||
unsigned Src2 = MI->getOperand(2).getReg();
|
||||
@ -1274,7 +1274,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
||||
.addReg(0).addImm(1 << ShAmt)
|
||||
.addReg(Src, getKillRegState(isKill))
|
||||
.addImm(0);
|
||||
.addImm(0).addReg(0);
|
||||
break;
|
||||
}
|
||||
case X86::SHL32ri: {
|
||||
@ -1288,7 +1288,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
||||
.addReg(0).addImm(1 << ShAmt)
|
||||
.addReg(Src, getKillRegState(isKill)).addImm(0);
|
||||
.addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
|
||||
break;
|
||||
}
|
||||
case X86::SHL16ri: {
|
||||
@ -1304,7 +1304,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
||||
.addReg(0).addImm(1 << ShAmt)
|
||||
.addReg(Src, getKillRegState(isKill))
|
||||
.addImm(0);
|
||||
.addImm(0).addReg(0);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
@ -1322,7 +1322,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
|
||||
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
|
||||
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, RegState::Define |
|
||||
getDeadRegState(isDead)),
|
||||
Src, isKill, 1);
|
||||
@ -1344,7 +1344,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
|
||||
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
|
||||
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, RegState::Define |
|
||||
getDeadRegState(isDead)),
|
||||
Src, isKill, -1);
|
||||
@ -1392,7 +1392,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
case X86::ADD64ri32:
|
||||
case X86::ADD64ri8:
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
||||
.addReg(Dest, RegState::Define |
|
||||
getDeadRegState(isDead)),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
@ -1401,7 +1401,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
case X86::ADD32ri8: {
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, RegState::Define |
|
||||
getDeadRegState(isDead)),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
@ -1412,7 +1412,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
if (DisableLEA16)
|
||||
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
||||
.addReg(Dest, RegState::Define |
|
||||
getDeadRegState(isDead)),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
@ -3589,16 +3589,9 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
|
||||
break;
|
||||
|
||||
case X86II::MRMSrcMem: {
|
||||
int AddrOperands;
|
||||
if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
||||
Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
||||
AddrOperands = X86::AddrNumOperands - 1; // No segment register
|
||||
else
|
||||
AddrOperands = X86::AddrNumOperands;
|
||||
|
||||
++FinalSize;
|
||||
FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
|
||||
CurOp += AddrOperands + 1;
|
||||
CurOp += X86::AddrNumOperands + 1;
|
||||
if (CurOp != NumOps) {
|
||||
++CurOp;
|
||||
FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
|
||||
|
@ -202,13 +202,9 @@ def X86MemAsmOperand : AsmOperandClass {
|
||||
let Name = "Mem";
|
||||
let SuperClasses = [];
|
||||
}
|
||||
def X86NoSegMemAsmOperand : AsmOperandClass {
|
||||
let Name = "NoSegMem";
|
||||
let SuperClasses = [X86MemAsmOperand];
|
||||
}
|
||||
def X86AbsMemAsmOperand : AsmOperandClass {
|
||||
let Name = "AbsMem";
|
||||
let SuperClasses = [X86NoSegMemAsmOperand];
|
||||
let SuperClasses = [X86MemAsmOperand];
|
||||
}
|
||||
class X86MemOperand<string printMethod> : Operand<iPTR> {
|
||||
let PrintMethod = printMethod;
|
||||
@ -250,11 +246,6 @@ def i32mem_TC : Operand<i32> {
|
||||
let ParserMatchClass = X86MemAsmOperand;
|
||||
}
|
||||
|
||||
def lea32mem : Operand<i32> {
|
||||
let PrintMethod = "printlea32mem";
|
||||
let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
|
||||
let ParserMatchClass = X86NoSegMemAsmOperand;
|
||||
}
|
||||
|
||||
let ParserMatchClass = X86AbsMemAsmOperand,
|
||||
PrintMethod = "print_pcrel_imm" in {
|
||||
@ -289,26 +280,31 @@ class ImmSExtAsmOperandClass : AsmOperandClass {
|
||||
// 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
|
||||
// (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
|
||||
|
||||
// [0, 0x7FFFFFFF] | [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
|
||||
// [0, 0x7FFFFFFF] |
|
||||
// [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
|
||||
def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
|
||||
let Name = "ImmSExti64i32";
|
||||
}
|
||||
|
||||
// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
|
||||
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
|
||||
let Name = "ImmSExti16i8";
|
||||
let SuperClasses = [ImmSExti64i32AsmOperand];
|
||||
}
|
||||
|
||||
// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
|
||||
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
|
||||
let Name = "ImmSExti32i8";
|
||||
}
|
||||
|
||||
// [0, 0x0000007F] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
// [0, 0x0000007F] |
|
||||
// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
|
||||
def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
|
||||
let Name = "ImmSExti64i8";
|
||||
let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand, ImmSExti64i32AsmOperand];
|
||||
let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
|
||||
ImmSExti64i32AsmOperand];
|
||||
}
|
||||
|
||||
// A couple of more descriptive operand definitions.
|
||||
@ -327,10 +323,10 @@ def i32i8imm : Operand<i32> {
|
||||
|
||||
// Define X86 specific addressing mode.
|
||||
def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
|
||||
def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
|
||||
def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
|
||||
[add, sub, mul, X86mul_imm, shl, or, frameindex],
|
||||
[]>;
|
||||
def tls32addr : ComplexPattern<i32, 4, "SelectTLSADDRAddr",
|
||||
def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
|
||||
[tglobaltlsaddr], []>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -878,11 +874,11 @@ def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
|
||||
let neverHasSideEffects = 1 in
|
||||
def LEA16r : I<0x8D, MRMSrcMem,
|
||||
(outs GR16:$dst), (ins lea32mem:$src),
|
||||
(outs GR16:$dst), (ins i32mem:$src),
|
||||
"lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
|
||||
let isReMaterializable = 1 in
|
||||
def LEA32r : I<0x8D, MRMSrcMem,
|
||||
(outs GR32:$dst), (ins lea32mem:$src),
|
||||
(outs GR32:$dst), (ins i32mem:$src),
|
||||
"lea{l}\t{$src|$dst}, {$dst|$src}",
|
||||
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
|
||||
|
||||
@ -3864,7 +3860,7 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
|
||||
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
|
||||
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
|
||||
Uses = [ESP] in
|
||||
def TLS_addr32 : I<0, Pseudo, (outs), (ins lea32mem:$sym),
|
||||
def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
|
||||
"leal\t$sym, %eax; "
|
||||
"call\t___tls_get_addr@PLT",
|
||||
[(X86tlsaddr tls32addr:$sym)]>,
|
||||
|
@ -655,10 +655,7 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
|
||||
default: assert(0 && "Invalid segment!");
|
||||
case 0:
|
||||
// No segment override, check for explicit one on memory operand.
|
||||
if (MemOperand != -1 && // If the instruction has a memory operand.
|
||||
// FIXME: This is disgusting.
|
||||
MI.getOpcode() != X86::LEA64r && MI.getOpcode() != X86::LEA64_32r &&
|
||||
MI.getOpcode() != X86::LEA16r && MI.getOpcode() != X86::LEA32r) {
|
||||
if (MemOperand != -1) { // If the instruction has a memory operand.
|
||||
switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
|
||||
default: assert(0 && "Unknown segment register!");
|
||||
case 0: break;
|
||||
@ -839,11 +836,6 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
|
||||
}
|
||||
|
||||
// FIXME: Maybe lea should have its own form? This is a horrible hack.
|
||||
if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
||||
Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
||||
--AddrOperands; // No segment register
|
||||
|
||||
EmitByte(BaseOpcode, CurByte, OS);
|
||||
|
||||
EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
|
||||
|
@ -1217,8 +1217,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
|
||||
if (CSSize) {
|
||||
unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
|
||||
MachineInstr *MI =
|
||||
addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
|
||||
FramePtr, false, -CSSize);
|
||||
addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
|
||||
FramePtr, false, -CSSize);
|
||||
MBB.insert(MBBI, MI);
|
||||
} else {
|
||||
BuildMI(MBB, MBBI, DL,
|
||||
|
Loading…
Reference in New Issue
Block a user