mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-29 13:32:33 +00:00
- Switch X86-64 JIT to large code size model.
- Re-enable some codegen niceties for X86-64 static relocation model codegen. - Clean ups, etc. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@32238 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f2d9ceb5b9
commit
28b51439f3
@ -229,41 +229,6 @@ list so it will be passed in register:
|
|||||||
|
|
||||||
//===---------------------------------------------------------------------===//
|
//===---------------------------------------------------------------------===//
|
||||||
|
|
||||||
For this:
|
|
||||||
|
|
||||||
extern int dst[];
|
|
||||||
extern int* ptr;
|
|
||||||
|
|
||||||
void test(void) {
|
|
||||||
ptr = dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
We generate this code for static relocation model:
|
|
||||||
|
|
||||||
_test:
|
|
||||||
leaq _dst(%rip), %rax
|
|
||||||
movq %rax, _ptr(%rip)
|
|
||||||
ret
|
|
||||||
|
|
||||||
If we are in small code model, they we can treat _dst as a 32-bit constant.
|
|
||||||
movq $_dst, _ptr(%rip)
|
|
||||||
|
|
||||||
Note, however, we should continue to use RIP relative addressing mode as much as
|
|
||||||
possible. The above is actually one byte shorter than
|
|
||||||
movq $_dst, _ptr
|
|
||||||
|
|
||||||
A better example is the code from PR1018. We are generating:
|
|
||||||
leaq xcalloc2(%rip), %rax
|
|
||||||
movq %rax, 8(%rsp)
|
|
||||||
when we should be generating:
|
|
||||||
movq $xcalloc2, 8(%rsp)
|
|
||||||
|
|
||||||
The reason the better codegen isn't done now is support for static small
|
|
||||||
code model in JIT mode. The JIT cannot ensure that all GV's are placed in the
|
|
||||||
lower 4G so we are not treating GV labels as 32-bit values.
|
|
||||||
|
|
||||||
//===---------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
Right now the asm printer assumes GlobalAddress are accessed via RIP relative
|
Right now the asm printer assumes GlobalAddress are accessed via RIP relative
|
||||||
addressing. Therefore, it is not possible to generate this:
|
addressing. Therefore, it is not possible to generate this:
|
||||||
movabsq $__ZTV10polynomialIdE+16, %rax
|
movabsq $__ZTV10polynomialIdE+16, %rax
|
||||||
|
@ -156,7 +156,7 @@ bool X86ATTAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
|
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
|
||||||
const char *Modifier) {
|
const char *Modifier, bool NotRIPRel) {
|
||||||
const MachineOperand &MO = MI->getOperand(OpNo);
|
const MachineOperand &MO = MI->getOperand(OpNo);
|
||||||
const MRegisterInfo &RI = *TM.getRegisterInfo();
|
const MRegisterInfo &RI = *TM.getRegisterInfo();
|
||||||
switch (MO.getType()) {
|
switch (MO.getType()) {
|
||||||
@ -192,7 +192,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
|
|||||||
if (X86PICStyle == PICStyle::Stub &&
|
if (X86PICStyle == PICStyle::Stub &&
|
||||||
TM.getRelocationModel() == Reloc::PIC_)
|
TM.getRelocationModel() == Reloc::PIC_)
|
||||||
O << "-\"L" << getFunctionNumber() << "$pb\"";
|
O << "-\"L" << getFunctionNumber() << "$pb\"";
|
||||||
if (isMemOp && Subtarget->is64Bit())
|
if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
|
||||||
O << "(%rip)";
|
O << "(%rip)";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -210,7 +210,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
|
|||||||
else if (Offset < 0)
|
else if (Offset < 0)
|
||||||
O << Offset;
|
O << Offset;
|
||||||
|
|
||||||
if (isMemOp && Subtarget->is64Bit())
|
if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
|
||||||
O << "(%rip)";
|
O << "(%rip)";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -267,8 +267,12 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
|
|||||||
|
|
||||||
if (isMemOp && Subtarget->is64Bit()) {
|
if (isMemOp && Subtarget->is64Bit()) {
|
||||||
if (isExt && TM.getRelocationModel() != Reloc::Static)
|
if (isExt && TM.getRelocationModel() != Reloc::Static)
|
||||||
O << "@GOTPCREL";
|
O << "@GOTPCREL(%rip)";
|
||||||
O << "(%rip)";
|
else if (!NotRIPRel)
|
||||||
|
// Use rip when possible to reduce code size, except when index or
|
||||||
|
// base register are also part of the address. e.g.
|
||||||
|
// foo(%rip)(%rcx,%rax,4) is not legal
|
||||||
|
O << "(%rip)";
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -329,10 +333,11 @@ void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool NotRIPRel = IndexReg.getReg() || BaseReg.getReg();
|
||||||
if (DispSpec.isGlobalAddress() ||
|
if (DispSpec.isGlobalAddress() ||
|
||||||
DispSpec.isConstantPoolIndex() ||
|
DispSpec.isConstantPoolIndex() ||
|
||||||
DispSpec.isJumpTableIndex()) {
|
DispSpec.isJumpTableIndex()) {
|
||||||
printOperand(MI, Op+3, "mem");
|
printOperand(MI, Op+3, "mem", NotRIPRel);
|
||||||
} else {
|
} else {
|
||||||
int DispVal = DispSpec.getImmedValue();
|
int DispVal = DispSpec.getImmedValue();
|
||||||
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
|
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
|
||||||
|
@ -35,7 +35,7 @@ struct X86ATTAsmPrinter : public X86SharedAsmPrinter {
|
|||||||
|
|
||||||
// These methods are used by the tablegen'erated instruction printer.
|
// These methods are used by the tablegen'erated instruction printer.
|
||||||
void printOperand(const MachineInstr *MI, unsigned OpNo,
|
void printOperand(const MachineInstr *MI, unsigned OpNo,
|
||||||
const char *Modifier = 0);
|
const char *Modifier = 0, bool NotRIPRel = false);
|
||||||
void printi8mem(const MachineInstr *MI, unsigned OpNo) {
|
void printi8mem(const MachineInstr *MI, unsigned OpNo) {
|
||||||
printMemReference(MI, OpNo);
|
printMemReference(MI, OpNo);
|
||||||
}
|
}
|
||||||
|
@ -595,44 +595,43 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
|
|||||||
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
|
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
|
||||||
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
|
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
|
||||||
break;
|
break;
|
||||||
|
if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
|
||||||
|
break;
|
||||||
// If value is available in a register both base and index components have
|
// If value is available in a register both base and index components have
|
||||||
// been picked, we can't fit the result available in the register in the
|
// been picked, we can't fit the result available in the register in the
|
||||||
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
|
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
|
||||||
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
|
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
|
||||||
// For X86-64 PIC code, only allow GV / CP + displacement so we can use
|
bool isStatic = TM.getRelocationModel() == Reloc::Static;
|
||||||
// RIP relative addressing mode.
|
SDOperand N0 = N.getOperand(0);
|
||||||
if (is64Bit &&
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||||
(AM.Base.Reg.Val || AM.Scale > 1 || AM.IndexReg.Val ||
|
GlobalValue *GV = G->getGlobal();
|
||||||
AM.BaseType == X86ISelAddressMode::FrameIndexBase))
|
bool isAbs32 = !is64Bit ||
|
||||||
break;
|
(isStatic && !(GV->isExternal() || GV->hasWeakLinkage() ||
|
||||||
if (ConstantPoolSDNode *CP =
|
GV->hasLinkOnceLinkage()));
|
||||||
dyn_cast<ConstantPoolSDNode>(N.getOperand(0))) {
|
if (isAbs32 || isRoot) {
|
||||||
if (AM.CP == 0) {
|
AM.GV = G->getGlobal();
|
||||||
|
AM.Disp += G->getOffset();
|
||||||
|
AM.isRIPRel = !isAbs32;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||||
|
if (!is64Bit || isStatic || isRoot) {
|
||||||
AM.CP = CP->getConstVal();
|
AM.CP = CP->getConstVal();
|
||||||
AM.Align = CP->getAlignment();
|
AM.Align = CP->getAlignment();
|
||||||
AM.Disp += CP->getOffset();
|
AM.Disp += CP->getOffset();
|
||||||
AM.isRIPRel = is64Bit;
|
AM.isRIPRel = !isStatic;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (GlobalAddressSDNode *G =
|
} else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
|
||||||
dyn_cast<GlobalAddressSDNode>(N.getOperand(0))) {
|
if (isStatic || isRoot) {
|
||||||
if (AM.GV == 0) {
|
|
||||||
AM.GV = G->getGlobal();
|
|
||||||
AM.Disp += G->getOffset();
|
|
||||||
AM.isRIPRel = is64Bit;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else if (isRoot && is64Bit) {
|
|
||||||
if (ExternalSymbolSDNode *S =
|
|
||||||
dyn_cast<ExternalSymbolSDNode>(N.getOperand(0))) {
|
|
||||||
AM.ES = S->getSymbol();
|
AM.ES = S->getSymbol();
|
||||||
AM.isRIPRel = true;
|
AM.isRIPRel = !isStatic;
|
||||||
return false;
|
return false;
|
||||||
} else if (JumpTableSDNode *J =
|
}
|
||||||
dyn_cast<JumpTableSDNode>(N.getOperand(0))) {
|
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
|
||||||
|
if (isStatic || isRoot) {
|
||||||
AM.JT = J->getIndex();
|
AM.JT = J->getIndex();
|
||||||
AM.isRIPRel = true;
|
AM.isRIPRel = !isStatic;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -908,7 +907,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
|
|||||||
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
|
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
|
||||||
// For X86-64, we should always use lea to materialize RIP relative
|
// For X86-64, we should always use lea to materialize RIP relative
|
||||||
// addresses.
|
// addresses.
|
||||||
if (Subtarget->is64Bit())
|
if (Subtarget->is64Bit() && TM.getRelocationModel() != Reloc::Static)
|
||||||
Complexity = 4;
|
Complexity = 4;
|
||||||
else
|
else
|
||||||
Complexity += 2;
|
Complexity += 2;
|
||||||
|
@ -163,15 +163,16 @@ def MRMInitReg : Format<32>;
|
|||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// X86 Instruction Predicate Definitions.
|
// X86 Instruction Predicate Definitions.
|
||||||
def HasMMX : Predicate<"Subtarget->hasMMX()">;
|
def HasMMX : Predicate<"Subtarget->hasMMX()">;
|
||||||
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
|
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
|
||||||
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
|
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
|
||||||
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
|
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
|
||||||
def FPStack : Predicate<"!Subtarget->hasSSE2()">;
|
def FPStack : Predicate<"!Subtarget->hasSSE2()">;
|
||||||
def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
|
def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
|
||||||
def In64BitMode : Predicate<"Subtarget->is64Bit()">;
|
def In64BitMode : Predicate<"Subtarget->is64Bit()">;
|
||||||
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
|
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
|
||||||
def NotSmallCode :Predicate<"TM.getCodeModel() != CodeModel::Small">;
|
def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
|
||||||
|
def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// X86 specific pattern fragments.
|
// X86 specific pattern fragments.
|
||||||
|
@ -1031,12 +1031,18 @@ def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
|
|||||||
def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
|
def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
|
||||||
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
|
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
|
||||||
|
|
||||||
/*
|
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
|
||||||
|
(MOV64mi32 addr:$dst, tconstpool:$src)>,
|
||||||
|
Requires<[SmallCode, IsStatic]>;
|
||||||
|
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
|
||||||
|
(MOV64mi32 addr:$dst, tjumptable:$src)>,
|
||||||
|
Requires<[SmallCode, IsStatic]>;
|
||||||
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
|
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
|
||||||
(MOV64mi32 addr:$dst, tglobaladdr:$src)>, Requires<[SmallCode]>;
|
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
|
||||||
|
Requires<[SmallCode, IsStatic]>;
|
||||||
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
|
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
|
||||||
(MOV64mi32 addr:$dst, texternalsym:$src)>, Requires<[SmallCode]>;
|
(MOV64mi32 addr:$dst, texternalsym:$src)>,
|
||||||
*/
|
Requires<[SmallCode, IsStatic]>;
|
||||||
|
|
||||||
// Calls
|
// Calls
|
||||||
// Direct PC relative function call for small code model. 32-bit displacement
|
// Direct PC relative function call for small code model. 32-bit displacement
|
||||||
|
@ -158,6 +158,9 @@ bool X86TargetMachine::addCodeEmitter(FunctionPassManager &PM, bool Fast,
|
|||||||
MachineCodeEmitter &MCE) {
|
MachineCodeEmitter &MCE) {
|
||||||
// FIXME: Move this to TargetJITInfo!
|
// FIXME: Move this to TargetJITInfo!
|
||||||
setRelocationModel(Reloc::Static);
|
setRelocationModel(Reloc::Static);
|
||||||
|
// JIT cannot ensure globals are placed in the lower 4G of address.
|
||||||
|
if (Subtarget.is64Bit())
|
||||||
|
setCodeModel(CodeModel::Large);
|
||||||
|
|
||||||
PM.add(createX86CodeEmitterPass(*this, MCE));
|
PM.add(createX86CodeEmitterPass(*this, MCE));
|
||||||
return false;
|
return false;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user