- Switch X86-64 JIT to large code size model.

- Re-enable some codegen niceties for X86-64 static relocation model codegen.
- Clean ups, etc.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@32238 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2006-12-05 19:50:18 +00:00
parent f2d9ceb5b9
commit 28b51439f3
7 changed files with 61 additions and 82 deletions

View File

@ -229,41 +229,6 @@ list so it will be passed in register:
//===---------------------------------------------------------------------===//
For this:
extern int dst[];
extern int* ptr;
void test(void) {
ptr = dst;
}
We generate this code for static relocation model:
_test:
leaq _dst(%rip), %rax
movq %rax, _ptr(%rip)
ret
If we are in small code model, they we can treat _dst as a 32-bit constant.
movq $_dst, _ptr(%rip)
Note, however, we should continue to use RIP relative addressing mode as much as
possible. The above is actually one byte shorter than
movq $_dst, _ptr
A better example is the code from PR1018. We are generating:
leaq xcalloc2(%rip), %rax
movq %rax, 8(%rsp)
when we should be generating:
movq $xcalloc2, 8(%rsp)
The reason the better codegen isn't done now is support for static small
code model in JIT mode. The JIT cannot ensure that all GV's are placed in the
lower 4G so we are not treating GV labels as 32-bit values.
//===---------------------------------------------------------------------===//
Right now the asm printer assumes GlobalAddress are accessed via RIP relative
addressing. Therefore, it is not possible to generate this:
movabsq $__ZTV10polynomialIdE+16, %rax

View File

@ -156,7 +156,7 @@ bool X86ATTAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
}
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const char *Modifier) {
const char *Modifier, bool NotRIPRel) {
const MachineOperand &MO = MI->getOperand(OpNo);
const MRegisterInfo &RI = *TM.getRegisterInfo();
switch (MO.getType()) {
@ -192,7 +192,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
if (X86PICStyle == PICStyle::Stub &&
TM.getRelocationModel() == Reloc::PIC_)
O << "-\"L" << getFunctionNumber() << "$pb\"";
if (isMemOp && Subtarget->is64Bit())
if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
@ -210,7 +210,7 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
else if (Offset < 0)
O << Offset;
if (isMemOp && Subtarget->is64Bit())
if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
@ -267,8 +267,12 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
if (isMemOp && Subtarget->is64Bit()) {
if (isExt && TM.getRelocationModel() != Reloc::Static)
O << "@GOTPCREL";
O << "(%rip)";
O << "@GOTPCREL(%rip)";
else if (!NotRIPRel)
// Use rip when possible to reduce code size, except when index or
// base register are also part of the address. e.g.
// foo(%rip)(%rcx,%rax,4) is not legal
O << "(%rip)";
}
return;
@ -329,10 +333,11 @@ void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
return;
}
bool NotRIPRel = IndexReg.getReg() || BaseReg.getReg();
if (DispSpec.isGlobalAddress() ||
DispSpec.isConstantPoolIndex() ||
DispSpec.isJumpTableIndex()) {
printOperand(MI, Op+3, "mem");
printOperand(MI, Op+3, "mem", NotRIPRel);
} else {
int DispVal = DispSpec.getImmedValue();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))

View File

@ -35,7 +35,7 @@ struct X86ATTAsmPrinter : public X86SharedAsmPrinter {
// These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo,
const char *Modifier = 0);
const char *Modifier = 0, bool NotRIPRel = false);
void printi8mem(const MachineInstr *MI, unsigned OpNo) {
printMemReference(MI, OpNo);
}

View File

@ -595,44 +595,43 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
break;
if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
break;
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
// For X86-64 PIC code, only allow GV / CP + displacement so we can use
// RIP relative addressing mode.
if (is64Bit &&
(AM.Base.Reg.Val || AM.Scale > 1 || AM.IndexReg.Val ||
AM.BaseType == X86ISelAddressMode::FrameIndexBase))
break;
if (ConstantPoolSDNode *CP =
dyn_cast<ConstantPoolSDNode>(N.getOperand(0))) {
if (AM.CP == 0) {
bool isStatic = TM.getRelocationModel() == Reloc::Static;
SDOperand N0 = N.getOperand(0);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
GlobalValue *GV = G->getGlobal();
bool isAbs32 = !is64Bit ||
(isStatic && !(GV->isExternal() || GV->hasWeakLinkage() ||
GV->hasLinkOnceLinkage()));
if (isAbs32 || isRoot) {
AM.GV = G->getGlobal();
AM.Disp += G->getOffset();
AM.isRIPRel = !isAbs32;
return false;
}
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
if (!is64Bit || isStatic || isRoot) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
AM.isRIPRel = is64Bit;
AM.isRIPRel = !isStatic;
return false;
}
} else if (GlobalAddressSDNode *G =
dyn_cast<GlobalAddressSDNode>(N.getOperand(0))) {
if (AM.GV == 0) {
AM.GV = G->getGlobal();
AM.Disp += G->getOffset();
AM.isRIPRel = is64Bit;
return false;
}
} else if (isRoot && is64Bit) {
if (ExternalSymbolSDNode *S =
dyn_cast<ExternalSymbolSDNode>(N.getOperand(0))) {
} else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
if (isStatic || isRoot) {
AM.ES = S->getSymbol();
AM.isRIPRel = true;
AM.isRIPRel = !isStatic;
return false;
} else if (JumpTableSDNode *J =
dyn_cast<JumpTableSDNode>(N.getOperand(0))) {
}
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
if (isStatic || isRoot) {
AM.JT = J->getIndex();
AM.isRIPRel = true;
AM.isRIPRel = !isStatic;
return false;
}
}
@ -908,7 +907,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
// For X86-64, we should always use lea to materialize RIP relative
// addresses.
if (Subtarget->is64Bit())
if (Subtarget->is64Bit() && TM.getRelocationModel() != Reloc::Static)
Complexity = 4;
else
Complexity += 2;

View File

@ -163,15 +163,16 @@ def MRMInitReg : Format<32>;
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
def HasMMX : Predicate<"Subtarget->hasMMX()">;
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
def FPStack : Predicate<"!Subtarget->hasSSE2()">;
def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
def NotSmallCode :Predicate<"TM.getCodeModel() != CodeModel::Small">;
def HasMMX : Predicate<"Subtarget->hasMMX()">;
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
def FPStack : Predicate<"!Subtarget->hasSSE2()">;
def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
//===----------------------------------------------------------------------===//
// X86 specific pattern fragments.

View File

@ -1031,12 +1031,18 @@ def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
/*
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>, Requires<[SmallCode]>;
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>, Requires<[SmallCode]>;
*/
(MOV64mi32 addr:$dst, texternalsym:$src)>,
Requires<[SmallCode, IsStatic]>;
// Calls
// Direct PC relative function call for small code model. 32-bit displacement

View File

@ -158,6 +158,9 @@ bool X86TargetMachine::addCodeEmitter(FunctionPassManager &PM, bool Fast,
MachineCodeEmitter &MCE) {
// FIXME: Move this to TargetJITInfo!
setRelocationModel(Reloc::Static);
// JIT cannot ensure globals are placed in the lower 4G of address.
if (Subtarget.is64Bit())
setCodeModel(CodeModel::Large);
PM.add(createX86CodeEmitterPass(*this, MCE));
return false;