mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-28 07:17:32 +00:00
Replace explicit pointer-size constants to TargetData query.
No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55996 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -49,10 +49,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|||||||
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
||||||
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
||||||
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
|
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
|
||||||
|
|
||||||
bool Fast = false;
|
bool Fast = false;
|
||||||
|
|
||||||
RegInfo = TM.getRegisterInfo();
|
RegInfo = TM.getRegisterInfo();
|
||||||
|
TD = getTargetData();
|
||||||
|
|
||||||
// Set up the TargetLowering object.
|
// Set up the TargetLowering object.
|
||||||
|
|
||||||
@@ -812,7 +813,7 @@ static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
|
|||||||
unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
|
unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
|
||||||
if (Subtarget->is64Bit()) {
|
if (Subtarget->is64Bit()) {
|
||||||
// Max of 8 and alignment of type.
|
// Max of 8 and alignment of type.
|
||||||
unsigned TyAlign = getTargetData()->getABITypeAlignment(Ty);
|
unsigned TyAlign = TD->getABITypeAlignment(Ty);
|
||||||
if (TyAlign > 8)
|
if (TyAlign > 8)
|
||||||
return TyAlign;
|
return TyAlign;
|
||||||
return 8;
|
return 8;
|
||||||
@@ -1832,7 +1833,7 @@ unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
|
|||||||
unsigned StackAlignment = TFI.getStackAlignment();
|
unsigned StackAlignment = TFI.getStackAlignment();
|
||||||
uint64_t AlignMask = StackAlignment - 1;
|
uint64_t AlignMask = StackAlignment - 1;
|
||||||
int64_t Offset = StackSize;
|
int64_t Offset = StackSize;
|
||||||
unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
|
uint64_t SlotSize = TD->getPointerSize();
|
||||||
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
|
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
|
||||||
// Number smaller than 12 so just add the difference.
|
// Number smaller than 12 so just add the difference.
|
||||||
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
|
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
|
||||||
@@ -1894,14 +1895,11 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
|
|||||||
MachineFunction &MF = DAG.getMachineFunction();
|
MachineFunction &MF = DAG.getMachineFunction();
|
||||||
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
||||||
int ReturnAddrIndex = FuncInfo->getRAIndex();
|
int ReturnAddrIndex = FuncInfo->getRAIndex();
|
||||||
|
uint64_t SlotSize = TD->getPointerSize();
|
||||||
|
|
||||||
if (ReturnAddrIndex == 0) {
|
if (ReturnAddrIndex == 0) {
|
||||||
// Set up a frame object for the return address.
|
// Set up a frame object for the return address.
|
||||||
if (Subtarget->is64Bit())
|
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize);
|
||||||
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
|
|
||||||
else
|
|
||||||
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
|
|
||||||
|
|
||||||
FuncInfo->setRAIndex(ReturnAddrIndex);
|
FuncInfo->setRAIndex(ReturnAddrIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5092,7 +5090,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG,
|
|||||||
if (const char *bzeroEntry =
|
if (const char *bzeroEntry =
|
||||||
V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
||||||
MVT IntPtr = getPointerTy();
|
MVT IntPtr = getPointerTy();
|
||||||
const Type *IntPtrTy = getTargetData()->getIntPtrType();
|
const Type *IntPtrTy = TD->getIntPtrType();
|
||||||
TargetLowering::ArgListTy Args;
|
TargetLowering::ArgListTy Args;
|
||||||
TargetLowering::ArgListEntry Entry;
|
TargetLowering::ArgListEntry Entry;
|
||||||
Entry.Node = Dst;
|
Entry.Node = Dst;
|
||||||
@@ -5595,12 +5593,12 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
|
|||||||
|
|
||||||
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
|
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
|
||||||
return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
|
return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
|
||||||
DAG.getIntPtrConstant(Subtarget->is64Bit() ? 8 : 4));
|
DAG.getIntPtrConstant(TD->getPointerSize()));
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
|
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
|
||||||
SelectionDAG &DAG) {
|
SelectionDAG &DAG) {
|
||||||
return DAG.getIntPtrConstant(Subtarget->is64Bit() ? 16 : 8);
|
return DAG.getIntPtrConstant(2*TD->getPointerSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
|
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
|
||||||
@@ -5615,8 +5613,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
|
|||||||
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
|
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
|
||||||
|
|
||||||
SDValue StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
|
SDValue StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
|
||||||
DAG.getIntPtrConstant(Subtarget->is64Bit() ?
|
DAG.getIntPtrConstant(-TD->getPointerSize()));
|
||||||
-8ULL: -4ULL));
|
|
||||||
StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
|
StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
|
||||||
Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
|
Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
|
||||||
Chain = DAG.getCopyToReg(Chain, StoreAddrReg, StoreAddr);
|
Chain = DAG.getCopyToReg(Chain, StoreAddrReg, StoreAddr);
|
||||||
@@ -5712,7 +5709,7 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
|
|||||||
E = FTy->param_end(); I != E; ++I, ++Idx)
|
E = FTy->param_end(); I != E; ++I, ++Idx)
|
||||||
if (Attrs.paramHasAttr(Idx, ParamAttr::InReg))
|
if (Attrs.paramHasAttr(Idx, ParamAttr::InReg))
|
||||||
// FIXME: should only count parameters that are lowered to integers.
|
// FIXME: should only count parameters that are lowered to integers.
|
||||||
InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32;
|
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
|
||||||
|
|
||||||
if (InRegCount > 2) {
|
if (InRegCount > 2) {
|
||||||
cerr << "Nest register in use - reduce number of inreg parameters!\n";
|
cerr << "Nest register in use - reduce number of inreg parameters!\n";
|
||||||
|
@@ -480,6 +480,7 @@ namespace llvm {
|
|||||||
/// make the right decision when generating code for different targets.
|
/// make the right decision when generating code for different targets.
|
||||||
const X86Subtarget *Subtarget;
|
const X86Subtarget *Subtarget;
|
||||||
const X86RegisterInfo *RegInfo;
|
const X86RegisterInfo *RegInfo;
|
||||||
|
const TargetData *TD;
|
||||||
|
|
||||||
/// X86StackPtr - X86 physical register used as stack ptr.
|
/// X86StackPtr - X86 physical register used as stack ptr.
|
||||||
unsigned X86StackPtr;
|
unsigned X86StackPtr;
|
||||||
|
Reference in New Issue
Block a user