mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-18 13:34:04 +00:00
Better handle kernel code model. Also, generalize the things and fix one
subtle bug with small code model. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78255 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
44af10943c
commit
b5e0172405
lib/Target/X86
@ -705,7 +705,7 @@ bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
|
|||||||
/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
|
/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
|
||||||
/// into an addressing mode. These wrap things that will resolve down into a
|
/// into an addressing mode. These wrap things that will resolve down into a
|
||||||
/// symbol reference. If no match is possible, this returns true, otherwise it
|
/// symbol reference. If no match is possible, this returns true, otherwise it
|
||||||
/// returns false.
|
/// returns false.
|
||||||
bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
||||||
// If the addressing mode already has a symbol as the displacement, we can
|
// If the addressing mode already has a symbol as the displacement, we can
|
||||||
// never match another symbol.
|
// never match another symbol.
|
||||||
@ -713,28 +713,27 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
SDValue N0 = N.getOperand(0);
|
SDValue N0 = N.getOperand(0);
|
||||||
|
CodeModel::Model M = TM.getCodeModel();
|
||||||
|
|
||||||
// Handle X86-64 rip-relative addresses. We check this before checking direct
|
// Handle X86-64 rip-relative addresses. We check this before checking direct
|
||||||
// folding because RIP is preferable to non-RIP accesses.
|
// folding because RIP is preferable to non-RIP accesses.
|
||||||
if (Subtarget->is64Bit() &&
|
if (Subtarget->is64Bit() &&
|
||||||
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
|
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
|
||||||
// they cannot be folded into immediate fields.
|
// they cannot be folded into immediate fields.
|
||||||
// FIXME: This can be improved for kernel and other models?
|
// FIXME: This can be improved for kernel and other models?
|
||||||
TM.getCodeModel() == CodeModel::Small &&
|
(M == CodeModel::Small || CodeModel::Kernel) &&
|
||||||
|
|
||||||
// Base and index reg must be 0 in order to use %rip as base and lowering
|
// Base and index reg must be 0 in order to use %rip as base and lowering
|
||||||
// must allow RIP.
|
// must allow RIP.
|
||||||
!AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
|
!AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
|
||||||
|
|
||||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||||
int64_t Offset = AM.Disp + G->getOffset();
|
int64_t Offset = AM.Disp + G->getOffset();
|
||||||
if (!isInt32(Offset)) return true;
|
if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
|
||||||
AM.GV = G->getGlobal();
|
AM.GV = G->getGlobal();
|
||||||
AM.Disp = Offset;
|
AM.Disp = Offset;
|
||||||
AM.SymbolFlags = G->getTargetFlags();
|
AM.SymbolFlags = G->getTargetFlags();
|
||||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||||
int64_t Offset = AM.Disp + CP->getOffset();
|
int64_t Offset = AM.Disp + CP->getOffset();
|
||||||
if (!isInt32(Offset)) return true;
|
if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
|
||||||
AM.CP = CP->getConstVal();
|
AM.CP = CP->getConstVal();
|
||||||
AM.Align = CP->getAlignment();
|
AM.Align = CP->getAlignment();
|
||||||
AM.Disp = Offset;
|
AM.Disp = Offset;
|
||||||
@ -747,7 +746,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
|||||||
AM.JT = J->getIndex();
|
AM.JT = J->getIndex();
|
||||||
AM.SymbolFlags = J->getTargetFlags();
|
AM.SymbolFlags = J->getTargetFlags();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (N.getOpcode() == X86ISD::WrapperRIP)
|
if (N.getOpcode() == X86ISD::WrapperRIP)
|
||||||
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
|
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
|
||||||
return false;
|
return false;
|
||||||
@ -757,7 +756,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
|||||||
// X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
|
// X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
|
||||||
// mode, this results in a non-RIP-relative computation.
|
// mode, this results in a non-RIP-relative computation.
|
||||||
if (!Subtarget->is64Bit() ||
|
if (!Subtarget->is64Bit() ||
|
||||||
(TM.getCodeModel() == CodeModel::Small &&
|
((M == CodeModel::Small || M == CodeModel::Kernel) &&
|
||||||
TM.getRelocationModel() == Reloc::Static)) {
|
TM.getRelocationModel() == Reloc::Static)) {
|
||||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||||
AM.GV = G->getGlobal();
|
AM.GV = G->getGlobal();
|
||||||
@ -809,7 +808,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
// Limit recursion.
|
// Limit recursion.
|
||||||
if (Depth > 5)
|
if (Depth > 5)
|
||||||
return MatchAddressBase(N, AM);
|
return MatchAddressBase(N, AM);
|
||||||
|
|
||||||
|
CodeModel::Model M = TM.getCodeModel();
|
||||||
|
|
||||||
// If this is already a %rip relative address, we can only merge immediates
|
// If this is already a %rip relative address, we can only merge immediates
|
||||||
// into it. Instead of handling this in every case, we handle it here.
|
// into it. Instead of handling this in every case, we handle it here.
|
||||||
// RIP relative addressing: %rip + 32-bit displacement!
|
// RIP relative addressing: %rip + 32-bit displacement!
|
||||||
@ -818,10 +819,11 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
// displacements. It isn't very important, but this should be fixed for
|
// displacements. It isn't very important, but this should be fixed for
|
||||||
// consistency.
|
// consistency.
|
||||||
if (!AM.ES && AM.JT != -1) return true;
|
if (!AM.ES && AM.JT != -1) return true;
|
||||||
|
|
||||||
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
|
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
|
||||||
int64_t Val = AM.Disp + Cst->getSExtValue();
|
int64_t Val = AM.Disp + Cst->getSExtValue();
|
||||||
if (isInt32(Val)) {
|
if (X86::isOffsetSuitableForCodeModel(Val, M,
|
||||||
|
AM.hasSymbolicDisplacement())) {
|
||||||
AM.Disp = Val;
|
AM.Disp = Val;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -833,7 +835,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
default: break;
|
default: break;
|
||||||
case ISD::Constant: {
|
case ISD::Constant: {
|
||||||
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
|
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
|
||||||
if (!is64Bit || isInt32(AM.Disp + Val)) {
|
if (!is64Bit ||
|
||||||
|
X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M,
|
||||||
|
AM.hasSymbolicDisplacement())) {
|
||||||
AM.Disp += Val;
|
AM.Disp += Val;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -889,7 +893,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
ConstantSDNode *AddVal =
|
ConstantSDNode *AddVal =
|
||||||
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
|
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
|
||||||
uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
|
uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
|
||||||
if (!is64Bit || isInt32(Disp))
|
if (!is64Bit ||
|
||||||
|
X86::isOffsetSuitableForCodeModel(Disp, M,
|
||||||
|
AM.hasSymbolicDisplacement()))
|
||||||
AM.Disp = Disp;
|
AM.Disp = Disp;
|
||||||
else
|
else
|
||||||
AM.IndexReg = ShVal;
|
AM.IndexReg = ShVal;
|
||||||
@ -931,7 +937,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
|
cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
|
||||||
uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
|
uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
|
||||||
CN->getZExtValue();
|
CN->getZExtValue();
|
||||||
if (!is64Bit || isInt32(Disp))
|
if (!is64Bit ||
|
||||||
|
X86::isOffsetSuitableForCodeModel(Disp, M,
|
||||||
|
AM.hasSymbolicDisplacement()))
|
||||||
AM.Disp = Disp;
|
AM.Disp = Disp;
|
||||||
else
|
else
|
||||||
Reg = N.getNode()->getOperand(0);
|
Reg = N.getNode()->getOperand(0);
|
||||||
@ -1050,7 +1058,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
|||||||
// Address could not have picked a GV address for the displacement.
|
// Address could not have picked a GV address for the displacement.
|
||||||
AM.GV == NULL &&
|
AM.GV == NULL &&
|
||||||
// On x86-64, the resultant disp must fit in 32-bits.
|
// On x86-64, the resultant disp must fit in 32-bits.
|
||||||
(!is64Bit || isInt32(AM.Disp + Offset)) &&
|
(!is64Bit ||
|
||||||
|
X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
|
||||||
|
AM.hasSymbolicDisplacement())) &&
|
||||||
// Check to see if the LHS & C is zero.
|
// Check to see if the LHS & C is zero.
|
||||||
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
|
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
|
||||||
AM.Disp += Offset;
|
AM.Disp += Offset;
|
||||||
|
@ -2126,6 +2126,36 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
|
||||||
|
bool hasSymbolicDisplacement) {
|
||||||
|
// Offset should fit into 32 bit immediate field.
|
||||||
|
if (!isInt32(Offset))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// If we don't have a symbolic displacement - we don't have any extra
|
||||||
|
// restrictions.
|
||||||
|
if (!hasSymbolicDisplacement)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
// FIXME: Some tweaks might be needed for medium code model.
|
||||||
|
if (M != CodeModel::Small && M != CodeModel::Kernel)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// For small code model we assume that latest object is 16MB before end of 31
|
||||||
|
// bits boundary. We may also accept pretty large negative constants knowing
|
||||||
|
// that all objects are in the positive half of address space.
|
||||||
|
if (M == CodeModel::Small && Offset < 16*1024*1024)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
// For kernel code model we know that all object resist in the negative half
|
||||||
|
// of 32bits address space. We may not accept negative offsets, since they may
|
||||||
|
// be just off and we may accept pretty large positive ones.
|
||||||
|
if (M == CodeModel::Kernel && Offset > 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
|
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
|
||||||
/// specific condition code, returning the condition code and the LHS/RHS of the
|
/// specific condition code, returning the condition code and the LHS/RHS of the
|
||||||
/// comparison to make.
|
/// comparison to make.
|
||||||
@ -4440,9 +4470,10 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
|
|||||||
// global base reg.
|
// global base reg.
|
||||||
unsigned char OpFlag = 0;
|
unsigned char OpFlag = 0;
|
||||||
unsigned WrapperKind = X86ISD::Wrapper;
|
unsigned WrapperKind = X86ISD::Wrapper;
|
||||||
|
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||||
|
|
||||||
if (Subtarget->isPICStyleRIPRel() &&
|
if (Subtarget->isPICStyleRIPRel() &&
|
||||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||||
WrapperKind = X86ISD::WrapperRIP;
|
WrapperKind = X86ISD::WrapperRIP;
|
||||||
else if (Subtarget->isPICStyleGOT())
|
else if (Subtarget->isPICStyleGOT())
|
||||||
OpFlag = X86II::MO_GOTOFF;
|
OpFlag = X86II::MO_GOTOFF;
|
||||||
@ -4472,9 +4503,10 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
|
|||||||
// global base reg.
|
// global base reg.
|
||||||
unsigned char OpFlag = 0;
|
unsigned char OpFlag = 0;
|
||||||
unsigned WrapperKind = X86ISD::Wrapper;
|
unsigned WrapperKind = X86ISD::Wrapper;
|
||||||
|
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||||
|
|
||||||
if (Subtarget->isPICStyleRIPRel() &&
|
if (Subtarget->isPICStyleRIPRel() &&
|
||||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||||
WrapperKind = X86ISD::WrapperRIP;
|
WrapperKind = X86ISD::WrapperRIP;
|
||||||
else if (Subtarget->isPICStyleGOT())
|
else if (Subtarget->isPICStyleGOT())
|
||||||
OpFlag = X86II::MO_GOTOFF;
|
OpFlag = X86II::MO_GOTOFF;
|
||||||
@ -4505,8 +4537,10 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
|
|||||||
// global base reg.
|
// global base reg.
|
||||||
unsigned char OpFlag = 0;
|
unsigned char OpFlag = 0;
|
||||||
unsigned WrapperKind = X86ISD::Wrapper;
|
unsigned WrapperKind = X86ISD::Wrapper;
|
||||||
|
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||||
|
|
||||||
if (Subtarget->isPICStyleRIPRel() &&
|
if (Subtarget->isPICStyleRIPRel() &&
|
||||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||||
WrapperKind = X86ISD::WrapperRIP;
|
WrapperKind = X86ISD::WrapperRIP;
|
||||||
else if (Subtarget->isPICStyleGOT())
|
else if (Subtarget->isPICStyleGOT())
|
||||||
OpFlag = X86II::MO_GOTOFF;
|
OpFlag = X86II::MO_GOTOFF;
|
||||||
@ -4540,8 +4574,10 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
|
|||||||
// offset if it is legal.
|
// offset if it is legal.
|
||||||
unsigned char OpFlags =
|
unsigned char OpFlags =
|
||||||
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
|
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
|
||||||
|
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||||
SDValue Result;
|
SDValue Result;
|
||||||
if (OpFlags == X86II::MO_NO_FLAG && isInt32(Offset)) {
|
if (OpFlags == X86II::MO_NO_FLAG &&
|
||||||
|
X86::isOffsetSuitableForCodeModel(Offset, M)) {
|
||||||
// A direct static reference to a global.
|
// A direct static reference to a global.
|
||||||
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
|
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
|
||||||
Offset = 0;
|
Offset = 0;
|
||||||
@ -4550,7 +4586,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (Subtarget->isPICStyleRIPRel() &&
|
if (Subtarget->isPICStyleRIPRel() &&
|
||||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||||
Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
|
Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
|
||||||
else
|
else
|
||||||
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
|
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
|
||||||
@ -7049,32 +7085,28 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||||||
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||||
const Type *Ty) const {
|
const Type *Ty) const {
|
||||||
// X86 supports extremely general addressing modes.
|
// X86 supports extremely general addressing modes.
|
||||||
|
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||||
|
|
||||||
// X86 allows a sign-extended 32-bit immediate field as a displacement.
|
// X86 allows a sign-extended 32-bit immediate field as a displacement.
|
||||||
if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
|
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (AM.BaseGV) {
|
if (AM.BaseGV) {
|
||||||
unsigned GVFlags =
|
unsigned GVFlags =
|
||||||
Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
|
Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
|
||||||
|
|
||||||
// If a reference to this global requires an extra load, we can't fold it.
|
// If a reference to this global requires an extra load, we can't fold it.
|
||||||
if (isGlobalStubReference(GVFlags))
|
if (isGlobalStubReference(GVFlags))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// If BaseGV requires a register for the PIC base, we cannot also have a
|
// If BaseGV requires a register for the PIC base, we cannot also have a
|
||||||
// BaseReg specified.
|
// BaseReg specified.
|
||||||
if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
|
if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// X86-64 only supports addr of globals in small code model.
|
// If lower 4G is not available, then we must use rip-relative addressing.
|
||||||
if (Subtarget->is64Bit()) {
|
if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
|
||||||
if (getTargetMachine().getCodeModel() != CodeModel::Small)
|
return false;
|
||||||
return false;
|
|
||||||
// If lower 4G is not available, then we must use rip-relative addressing.
|
|
||||||
if (AM.BaseOffs || AM.Scale > 1)
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (AM.Scale) {
|
switch (AM.Scale) {
|
||||||
|
@ -336,6 +336,11 @@ namespace llvm {
|
|||||||
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
|
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
|
||||||
/// constant +0.0.
|
/// constant +0.0.
|
||||||
bool isZeroNode(SDValue Elt);
|
bool isZeroNode(SDValue Elt);
|
||||||
|
|
||||||
|
/// isOffsetSuitableForCodeModel - Returns true of the given offset can be
|
||||||
|
/// fit into displacement field of the instruction.
|
||||||
|
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
|
||||||
|
bool hasSymbolicDisplacement = true);
|
||||||
}
|
}
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
|
Loading…
x
Reference in New Issue
Block a user