mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 00:32:55 +00:00
Better handle kernel code model. Also, generalize the things and fix one
subtle bug with small code model. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78255 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
44af10943c
commit
b5e0172405
@ -705,7 +705,7 @@ bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
|
||||
/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
|
||||
/// into an addressing mode. These wrap things that will resolve down into a
|
||||
/// symbol reference. If no match is possible, this returns true, otherwise it
|
||||
/// returns false.
|
||||
/// returns false.
|
||||
bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
||||
// If the addressing mode already has a symbol as the displacement, we can
|
||||
// never match another symbol.
|
||||
@ -713,28 +713,27 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
||||
return true;
|
||||
|
||||
SDValue N0 = N.getOperand(0);
|
||||
|
||||
CodeModel::Model M = TM.getCodeModel();
|
||||
|
||||
// Handle X86-64 rip-relative addresses. We check this before checking direct
|
||||
// folding because RIP is preferable to non-RIP accesses.
|
||||
if (Subtarget->is64Bit() &&
|
||||
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
|
||||
// they cannot be folded into immediate fields.
|
||||
// FIXME: This can be improved for kernel and other models?
|
||||
TM.getCodeModel() == CodeModel::Small &&
|
||||
|
||||
(M == CodeModel::Small || CodeModel::Kernel) &&
|
||||
// Base and index reg must be 0 in order to use %rip as base and lowering
|
||||
// must allow RIP.
|
||||
!AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
|
||||
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||
int64_t Offset = AM.Disp + G->getOffset();
|
||||
if (!isInt32(Offset)) return true;
|
||||
if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
|
||||
AM.GV = G->getGlobal();
|
||||
AM.Disp = Offset;
|
||||
AM.SymbolFlags = G->getTargetFlags();
|
||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||
int64_t Offset = AM.Disp + CP->getOffset();
|
||||
if (!isInt32(Offset)) return true;
|
||||
if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
|
||||
AM.CP = CP->getConstVal();
|
||||
AM.Align = CP->getAlignment();
|
||||
AM.Disp = Offset;
|
||||
@ -747,7 +746,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
||||
AM.JT = J->getIndex();
|
||||
AM.SymbolFlags = J->getTargetFlags();
|
||||
}
|
||||
|
||||
|
||||
if (N.getOpcode() == X86ISD::WrapperRIP)
|
||||
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
|
||||
return false;
|
||||
@ -757,7 +756,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
||||
// X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
|
||||
// mode, this results in a non-RIP-relative computation.
|
||||
if (!Subtarget->is64Bit() ||
|
||||
(TM.getCodeModel() == CodeModel::Small &&
|
||||
((M == CodeModel::Small || M == CodeModel::Kernel) &&
|
||||
TM.getRelocationModel() == Reloc::Static)) {
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||
AM.GV = G->getGlobal();
|
||||
@ -809,7 +808,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
// Limit recursion.
|
||||
if (Depth > 5)
|
||||
return MatchAddressBase(N, AM);
|
||||
|
||||
|
||||
CodeModel::Model M = TM.getCodeModel();
|
||||
|
||||
// If this is already a %rip relative address, we can only merge immediates
|
||||
// into it. Instead of handling this in every case, we handle it here.
|
||||
// RIP relative addressing: %rip + 32-bit displacement!
|
||||
@ -818,10 +819,11 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
// displacements. It isn't very important, but this should be fixed for
|
||||
// consistency.
|
||||
if (!AM.ES && AM.JT != -1) return true;
|
||||
|
||||
|
||||
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
|
||||
int64_t Val = AM.Disp + Cst->getSExtValue();
|
||||
if (isInt32(Val)) {
|
||||
if (X86::isOffsetSuitableForCodeModel(Val, M,
|
||||
AM.hasSymbolicDisplacement())) {
|
||||
AM.Disp = Val;
|
||||
return false;
|
||||
}
|
||||
@ -833,7 +835,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
default: break;
|
||||
case ISD::Constant: {
|
||||
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
|
||||
if (!is64Bit || isInt32(AM.Disp + Val)) {
|
||||
if (!is64Bit ||
|
||||
X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M,
|
||||
AM.hasSymbolicDisplacement())) {
|
||||
AM.Disp += Val;
|
||||
return false;
|
||||
}
|
||||
@ -889,7 +893,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
ConstantSDNode *AddVal =
|
||||
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
|
||||
uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
|
||||
if (!is64Bit || isInt32(Disp))
|
||||
if (!is64Bit ||
|
||||
X86::isOffsetSuitableForCodeModel(Disp, M,
|
||||
AM.hasSymbolicDisplacement()))
|
||||
AM.Disp = Disp;
|
||||
else
|
||||
AM.IndexReg = ShVal;
|
||||
@ -931,7 +937,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
|
||||
uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
|
||||
CN->getZExtValue();
|
||||
if (!is64Bit || isInt32(Disp))
|
||||
if (!is64Bit ||
|
||||
X86::isOffsetSuitableForCodeModel(Disp, M,
|
||||
AM.hasSymbolicDisplacement()))
|
||||
AM.Disp = Disp;
|
||||
else
|
||||
Reg = N.getNode()->getOperand(0);
|
||||
@ -1050,7 +1058,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
|
||||
// Address could not have picked a GV address for the displacement.
|
||||
AM.GV == NULL &&
|
||||
// On x86-64, the resultant disp must fit in 32-bits.
|
||||
(!is64Bit || isInt32(AM.Disp + Offset)) &&
|
||||
(!is64Bit ||
|
||||
X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
|
||||
AM.hasSymbolicDisplacement())) &&
|
||||
// Check to see if the LHS & C is zero.
|
||||
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
|
||||
AM.Disp += Offset;
|
||||
|
@ -2126,6 +2126,36 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
|
||||
}
|
||||
|
||||
|
||||
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
|
||||
bool hasSymbolicDisplacement) {
|
||||
// Offset should fit into 32 bit immediate field.
|
||||
if (!isInt32(Offset))
|
||||
return false;
|
||||
|
||||
// If we don't have a symbolic displacement - we don't have any extra
|
||||
// restrictions.
|
||||
if (!hasSymbolicDisplacement)
|
||||
return true;
|
||||
|
||||
// FIXME: Some tweaks might be needed for medium code model.
|
||||
if (M != CodeModel::Small && M != CodeModel::Kernel)
|
||||
return false;
|
||||
|
||||
// For small code model we assume that latest object is 16MB before end of 31
|
||||
// bits boundary. We may also accept pretty large negative constants knowing
|
||||
// that all objects are in the positive half of address space.
|
||||
if (M == CodeModel::Small && Offset < 16*1024*1024)
|
||||
return true;
|
||||
|
||||
// For kernel code model we know that all object resist in the negative half
|
||||
// of 32bits address space. We may not accept negative offsets, since they may
|
||||
// be just off and we may accept pretty large positive ones.
|
||||
if (M == CodeModel::Kernel && Offset > 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
|
||||
/// specific condition code, returning the condition code and the LHS/RHS of the
|
||||
/// comparison to make.
|
||||
@ -4440,9 +4470,10 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
|
||||
// global base reg.
|
||||
unsigned char OpFlag = 0;
|
||||
unsigned WrapperKind = X86ISD::Wrapper;
|
||||
|
||||
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||
|
||||
if (Subtarget->isPICStyleRIPRel() &&
|
||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
||||
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||
WrapperKind = X86ISD::WrapperRIP;
|
||||
else if (Subtarget->isPICStyleGOT())
|
||||
OpFlag = X86II::MO_GOTOFF;
|
||||
@ -4472,9 +4503,10 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
|
||||
// global base reg.
|
||||
unsigned char OpFlag = 0;
|
||||
unsigned WrapperKind = X86ISD::Wrapper;
|
||||
|
||||
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||
|
||||
if (Subtarget->isPICStyleRIPRel() &&
|
||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
||||
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||
WrapperKind = X86ISD::WrapperRIP;
|
||||
else if (Subtarget->isPICStyleGOT())
|
||||
OpFlag = X86II::MO_GOTOFF;
|
||||
@ -4505,8 +4537,10 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
|
||||
// global base reg.
|
||||
unsigned char OpFlag = 0;
|
||||
unsigned WrapperKind = X86ISD::Wrapper;
|
||||
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||
|
||||
if (Subtarget->isPICStyleRIPRel() &&
|
||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
||||
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||
WrapperKind = X86ISD::WrapperRIP;
|
||||
else if (Subtarget->isPICStyleGOT())
|
||||
OpFlag = X86II::MO_GOTOFF;
|
||||
@ -4540,8 +4574,10 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
|
||||
// offset if it is legal.
|
||||
unsigned char OpFlags =
|
||||
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
|
||||
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||
SDValue Result;
|
||||
if (OpFlags == X86II::MO_NO_FLAG && isInt32(Offset)) {
|
||||
if (OpFlags == X86II::MO_NO_FLAG &&
|
||||
X86::isOffsetSuitableForCodeModel(Offset, M)) {
|
||||
// A direct static reference to a global.
|
||||
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
|
||||
Offset = 0;
|
||||
@ -4550,7 +4586,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
|
||||
}
|
||||
|
||||
if (Subtarget->isPICStyleRIPRel() &&
|
||||
getTargetMachine().getCodeModel() == CodeModel::Small)
|
||||
(M == CodeModel::Small || M == CodeModel::Kernel))
|
||||
Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
|
||||
else
|
||||
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
|
||||
@ -7049,32 +7085,28 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
const Type *Ty) const {
|
||||
// X86 supports extremely general addressing modes.
|
||||
CodeModel::Model M = getTargetMachine().getCodeModel();
|
||||
|
||||
// X86 allows a sign-extended 32-bit immediate field as a displacement.
|
||||
if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
|
||||
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
|
||||
return false;
|
||||
|
||||
if (AM.BaseGV) {
|
||||
unsigned GVFlags =
|
||||
Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
|
||||
|
||||
|
||||
// If a reference to this global requires an extra load, we can't fold it.
|
||||
if (isGlobalStubReference(GVFlags))
|
||||
return false;
|
||||
|
||||
|
||||
// If BaseGV requires a register for the PIC base, we cannot also have a
|
||||
// BaseReg specified.
|
||||
if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
|
||||
return false;
|
||||
|
||||
// X86-64 only supports addr of globals in small code model.
|
||||
if (Subtarget->is64Bit()) {
|
||||
if (getTargetMachine().getCodeModel() != CodeModel::Small)
|
||||
return false;
|
||||
// If lower 4G is not available, then we must use rip-relative addressing.
|
||||
if (AM.BaseOffs || AM.Scale > 1)
|
||||
return false;
|
||||
}
|
||||
// If lower 4G is not available, then we must use rip-relative addressing.
|
||||
if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (AM.Scale) {
|
||||
|
@ -336,6 +336,11 @@ namespace llvm {
|
||||
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
|
||||
/// constant +0.0.
|
||||
bool isZeroNode(SDValue Elt);
|
||||
|
||||
/// isOffsetSuitableForCodeModel - Returns true of the given offset can be
|
||||
/// fit into displacement field of the instruction.
|
||||
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
|
||||
bool hasSymbolicDisplacement = true);
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
Loading…
x
Reference in New Issue
Block a user