mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
Use the same CALL instructions for Windows as for everything else.
The different calling conventions and call-preserved registers are represented with regmask operands that are added dynamically. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@150708 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e3186774f3
commit
527a08b253
@ -806,8 +806,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
|
||||
}
|
||||
|
||||
assert(MO.isImm() && "Unknown RawFrm operand!");
|
||||
if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32 ||
|
||||
Opcode == X86::WINCALL64pcrel32) {
|
||||
if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) {
|
||||
// Fix up immediate operand for pc relative calls.
|
||||
intptr_t Imm = (intptr_t)MO.getImm();
|
||||
Imm = Imm - MCE.getCurrentPCValue() - 4;
|
||||
|
@ -1793,9 +1793,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
|
||||
if (CalleeOp) {
|
||||
// Register-indirect call.
|
||||
unsigned CallOpc;
|
||||
if (Subtarget->isTargetWin64())
|
||||
CallOpc = X86::WINCALL64r;
|
||||
else if (Subtarget->is64Bit())
|
||||
if (Subtarget->is64Bit())
|
||||
CallOpc = X86::CALL64r;
|
||||
else
|
||||
CallOpc = X86::CALL32r;
|
||||
@ -1806,9 +1804,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
|
||||
// Direct call.
|
||||
assert(GV && "Not a direct call");
|
||||
unsigned CallOpc;
|
||||
if (Subtarget->isTargetWin64())
|
||||
CallOpc = X86::WINCALL64pcrel32;
|
||||
else if (Subtarget->is64Bit())
|
||||
if (Subtarget->is64Bit())
|
||||
CallOpc = X86::CALL64pcrel32;
|
||||
else
|
||||
CallOpc = X86::CALLpcrel32;
|
||||
|
@ -540,7 +540,7 @@ void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
|
||||
const TargetInstrInfo *TII = TM.getInstrInfo();
|
||||
if (Subtarget->isTargetCygMing()) {
|
||||
unsigned CallOp =
|
||||
Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
|
||||
Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
|
||||
BuildMI(BB, DebugLoc(),
|
||||
TII->get(CallOp)).addExternalSymbol("__main");
|
||||
}
|
||||
|
@ -12391,22 +12391,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
case X86::TCRETURNdi64:
|
||||
case X86::TCRETURNri64:
|
||||
case X86::TCRETURNmi64:
|
||||
// Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset.
|
||||
// On AMD64, additional defs should be added before register allocation.
|
||||
if (!Subtarget->isTargetWin64()) {
|
||||
MI->addRegisterDefined(X86::RSI);
|
||||
MI->addRegisterDefined(X86::RDI);
|
||||
MI->addRegisterDefined(X86::XMM6);
|
||||
MI->addRegisterDefined(X86::XMM7);
|
||||
MI->addRegisterDefined(X86::XMM8);
|
||||
MI->addRegisterDefined(X86::XMM9);
|
||||
MI->addRegisterDefined(X86::XMM10);
|
||||
MI->addRegisterDefined(X86::XMM11);
|
||||
MI->addRegisterDefined(X86::XMM12);
|
||||
MI->addRegisterDefined(X86::XMM13);
|
||||
MI->addRegisterDefined(X86::XMM14);
|
||||
MI->addRegisterDefined(X86::XMM15);
|
||||
}
|
||||
return BB;
|
||||
case X86::WIN_ALLOCA:
|
||||
return EmitLoweredWinAlloca(MI, BB);
|
||||
|
@ -945,14 +945,9 @@ def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
|
||||
// Direct PC relative function call for small code model. 32-bit displacement
|
||||
// sign extended to 64-bit.
|
||||
def : Pat<(X86call (i64 tglobaladdr:$dst)),
|
||||
(CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
|
||||
(CALL64pcrel32 tglobaladdr:$dst)>;
|
||||
def : Pat<(X86call (i64 texternalsym:$dst)),
|
||||
(CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
|
||||
|
||||
def : Pat<(X86call (i64 tglobaladdr:$dst)),
|
||||
(WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
|
||||
def : Pat<(X86call (i64 texternalsym:$dst)),
|
||||
(WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
|
||||
(CALL64pcrel32 texternalsym:$dst)>;
|
||||
|
||||
// tailcall stuff
|
||||
def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
|
||||
|
@ -204,55 +204,30 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Call Instructions...
|
||||
//
|
||||
let isCall = 1 in
|
||||
// All calls clobber the non-callee saved registers. RSP is marked as
|
||||
// a use to prevent stack-pointer assignments that appear immediately
|
||||
// before calls from potentially appearing dead. Uses for argument
|
||||
// registers are added manually.
|
||||
let Uses = [RSP] in {
|
||||
|
||||
// NOTE: this pattern doesn't match "X86call imm", because we do not know
|
||||
// that the offset between an arbitrary immediate and the call will fit in
|
||||
// the 32-bit pcrel field that we have.
|
||||
def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
|
||||
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
|
||||
"call{q}\t$dst", [], IIC_CALL_RI>,
|
||||
Requires<[In64BitMode, NotWin64]>;
|
||||
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
|
||||
"call{q}\t{*}$dst", [(X86call GR64:$dst)],
|
||||
IIC_CALL_RI>,
|
||||
Requires<[In64BitMode, NotWin64]>;
|
||||
def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
|
||||
"call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))],
|
||||
IIC_CALL_MEM>,
|
||||
Requires<[In64BitMode, NotWin64]>;
|
||||
// RSP is marked as a use to prevent stack-pointer assignments that appear
|
||||
// immediately before calls from potentially appearing dead. Uses for argument
|
||||
// registers are added manually.
|
||||
let isCall = 1, Uses = [RSP] in {
|
||||
// NOTE: this pattern doesn't match "X86call imm", because we do not know
|
||||
// that the offset between an arbitrary immediate and the call will fit in
|
||||
// the 32-bit pcrel field that we have.
|
||||
def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
|
||||
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
|
||||
"call{q}\t$dst", [], IIC_CALL_RI>,
|
||||
Requires<[In64BitMode]>;
|
||||
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
|
||||
"call{q}\t{*}$dst", [(X86call GR64:$dst)],
|
||||
IIC_CALL_RI>,
|
||||
Requires<[In64BitMode]>;
|
||||
def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
|
||||
"call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))],
|
||||
IIC_CALL_MEM>,
|
||||
Requires<[In64BitMode]>;
|
||||
|
||||
def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
|
||||
"lcall{q}\t{*}$dst", [], IIC_CALL_FAR_MEM>;
|
||||
}
|
||||
|
||||
// FIXME: We need to teach codegen about single list of call-clobbered
|
||||
// registers.
|
||||
let isCall = 1, isCodeGenOnly = 1 in
|
||||
// All calls clobber the non-callee saved registers. RSP is marked as
|
||||
// a use to prevent stack-pointer assignments that appear immediately
|
||||
// before calls from potentially appearing dead. Uses for argument
|
||||
// registers are added manually.
|
||||
let Uses = [RSP] in {
|
||||
def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
|
||||
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
|
||||
"call{q}\t$dst", [], IIC_CALL_RI>,
|
||||
Requires<[IsWin64]>;
|
||||
def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
|
||||
"call{q}\t{*}$dst",
|
||||
[(X86call GR64:$dst)], IIC_CALL_RI>,
|
||||
Requires<[IsWin64]>;
|
||||
def WINCALL64m : I<0xFF, MRM2m, (outs),
|
||||
(ins i64mem:$dst,variable_ops),
|
||||
"call{q}\t{*}$dst",
|
||||
[(X86call (loadi64 addr:$dst))], IIC_CALL_MEM>,
|
||||
Requires<[IsWin64]>;
|
||||
}
|
||||
def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
|
||||
"lcall{q}\t{*}$dst", [], IIC_CALL_FAR_MEM>;
|
||||
}
|
||||
|
||||
let isCall = 1, isCodeGenOnly = 1 in
|
||||
// __chkstk(MSVC): clobber R10, R11 and EFLAGS.
|
||||
|
@ -274,7 +274,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
|
||||
{ X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD },
|
||||
{ X86::CALL32r, X86::CALL32m, TB_FOLDED_LOAD },
|
||||
{ X86::CALL64r, X86::CALL64m, TB_FOLDED_LOAD },
|
||||
{ X86::WINCALL64r, X86::WINCALL64m, TB_FOLDED_LOAD },
|
||||
{ X86::CMP16ri, X86::CMP16mi, TB_FOLDED_LOAD },
|
||||
{ X86::CMP16ri8, X86::CMP16mi8, TB_FOLDED_LOAD },
|
||||
{ X86::CMP16rr, X86::CMP16mr, TB_FOLDED_LOAD },
|
||||
|
@ -523,7 +523,6 @@ def In32BitMode : Predicate<"!Subtarget->is64Bit()">,
|
||||
def In64BitMode : Predicate<"Subtarget->is64Bit()">,
|
||||
AssemblerPredicate<"Mode64Bit">;
|
||||
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
|
||||
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
|
||||
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
|
||||
def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
|
||||
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
|
||||
|
@ -389,14 +389,12 @@ ReSimplify:
|
||||
LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
|
||||
break;
|
||||
|
||||
// TAILJMPr64, [WIN]CALL64r, [WIN]CALL64pcrel32 - These instructions have
|
||||
// register inputs modeled as normal uses instead of implicit uses. As such,
|
||||
// truncate off all but the first operand (the callee). FIXME: Change isel.
|
||||
// TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
|
||||
// inputs modeled as normal uses instead of implicit uses. As such, truncate
|
||||
// off all but the first operand (the callee). FIXME: Change isel.
|
||||
case X86::TAILJMPr64:
|
||||
case X86::CALL64r:
|
||||
case X86::CALL64pcrel32:
|
||||
case X86::WINCALL64r:
|
||||
case X86::WINCALL64pcrel32: {
|
||||
case X86::CALL64pcrel32: {
|
||||
unsigned Opcode = OutMI.getOpcode();
|
||||
MCOperand Saved = OutMI.getOperand(0);
|
||||
OutMI = MCInst();
|
||||
|
Loading…
Reference in New Issue
Block a user