Reuse a bunch of cached subtargets and remove getSubtarget calls

without a Function argument.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227814 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eric Christopher 2015-02-02 17:38:43 +00:00
parent aa6be3f734
commit 8115b6b867
11 changed files with 147 additions and 199 deletions

View File

@ -429,8 +429,8 @@ void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
MachineBasicBlock &MBB, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator MBBI,
DebugLoc DL) { DebugLoc DL) {
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>(); const TargetInstrInfo &TII = *STI.getInstrInfo();
bool Is64Bit = STI.is64Bit(); bool Is64Bit = STI.is64Bit();
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large; bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
@ -573,15 +573,14 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin(); MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction(); const Function *Fn = MF.getFunction();
const X86RegisterInfo *RegInfo = const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI(); MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment. uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate. uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF); bool HasFP = hasFP(MF);
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit(); bool Is64Bit = STI.is64Bit();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
@ -595,8 +594,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned StackAlign = getStackAlignment(); unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize(); unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF); unsigned FramePtr = RegInfo->getFrameRegister(MF);
const unsigned MachineFramePtr = STI.isTarget64BitILP32() ? const unsigned MachineFramePtr =
getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr; STI.isTarget64BitILP32()
? getX86SubSuperRegister(FramePtr, MVT::i64, false)
: FramePtr;
unsigned StackPtr = RegInfo->getStackRegister(); unsigned StackPtr = RegInfo->getStackRegister();
unsigned BasePtr = RegInfo->getBaseRegister(); unsigned BasePtr = RegInfo->getBaseRegister();
DebugLoc DL; DebugLoc DL;
@ -973,14 +974,13 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const { MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
const X86RegisterInfo *RegInfo = const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
assert(MBBI != MBB.end() && "Returning block has no instructions"); assert(MBBI != MBB.end() && "Returning block has no instructions");
unsigned RetOpcode = MBBI->getOpcode(); unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL = MBBI->getDebugLoc(); DebugLoc DL = MBBI->getDebugLoc();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit(); bool Is64Bit = STI.is64Bit();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit. // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
@ -989,8 +989,9 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned StackAlign = getStackAlignment(); unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize(); unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF); unsigned FramePtr = RegInfo->getFrameRegister(MF);
unsigned MachineFramePtr = Is64BitILP32 ? unsigned MachineFramePtr =
getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr; Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
: FramePtr;
unsigned StackPtr = RegInfo->getStackRegister(); unsigned StackPtr = RegInfo->getStackRegister();
bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
@ -1192,7 +1193,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int FI) const { int FI) const {
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo();
int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea(); int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
uint64_t StackSize = MFI->getStackSize(); uint64_t StackSize = MFI->getStackSize();
@ -1235,7 +1236,7 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const { unsigned &FrameReg) const {
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
// We can't calculate offset from frame pointer if the stack is realigned, // We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we // so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment. // have dynamic allocas in addition to dynamic realignment.
@ -1256,7 +1257,7 @@ int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int F
{ {
#ifndef NDEBUG #ifndef NDEBUG
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
// Note: LLVM arranges the stack as: // Note: LLVM arranges the stack as:
// Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP) // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
// > "Stack Slots" (<--SP) // > "Stack Slots" (<--SP)
@ -1310,11 +1311,11 @@ int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int F
return Offset + StackSize; return Offset + StackSize;
} }
// Simplified from getFrameIndexReference keeping only StackPointer cases // Simplified from getFrameIndexReference keeping only StackPointer cases
int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI, int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
unsigned &FrameReg) const { int FI,
unsigned &FrameReg) const {
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case"); assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
FrameReg = RegInfo->getStackRegister(); FrameReg = RegInfo->getStackRegister();
@ -1326,7 +1327,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
std::vector<CalleeSavedInfo> &CSI) const { std::vector<CalleeSavedInfo> &CSI) const {
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
unsigned SlotSize = RegInfo->getSlotSize(); unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@ -1393,8 +1394,8 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
DebugLoc DL = MBB.findDebugLoc(MI); DebugLoc DL = MBB.findDebugLoc(MI);
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>(); const TargetInstrInfo &TII = *STI.getInstrInfo();
// Push GPRs. It increases frame size. // Push GPRs. It increases frame size.
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r; unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
@ -1441,8 +1442,8 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
DebugLoc DL = MBB.findDebugLoc(MI); DebugLoc DL = MBB.findDebugLoc(MI);
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>(); const TargetInstrInfo &TII = *STI.getInstrInfo();
// Reload XMMs from stack frame. // Reload XMMs from stack frame.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) { for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
@ -1473,7 +1474,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const { RegScavenger *RS) const {
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo =
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo()); MF.getSubtarget<X86Subtarget>().getRegisterInfo();
unsigned SlotSize = RegInfo->getSlotSize(); unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@ -1554,9 +1555,9 @@ void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front(); MachineBasicBlock &prologueMBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const TargetInstrInfo &TII = *STI.getInstrInfo();
uint64_t StackSize; uint64_t StackSize;
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool Is64Bit = STI.is64Bit(); bool Is64Bit = STI.is64Bit();
const bool IsLP64 = STI.isTarget64BitLP64(); const bool IsLP64 = STI.isTarget64BitLP64();
unsigned TlsReg, TlsOffset; unsigned TlsReg, TlsOffset;
@ -1802,12 +1803,10 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
/// temp0 = sp - MaxStack /// temp0 = sp - MaxStack
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const { void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const unsigned SlotSize = const unsigned SlotSize = STI.getRegisterInfo()->getSlotSize();
static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())
->getSlotSize();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
const bool Is64Bit = STI.is64Bit(); const bool Is64Bit = STI.is64Bit();
const bool IsLP64 = STI.isTarget64BitLP64(); const bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL; DebugLoc DL;
@ -1937,14 +1936,13 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
void X86FrameLowering:: void X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const { MachineBasicBlock::iterator I) const {
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>( const TargetInstrInfo &TII = *STI.getInstrInfo();
MF.getSubtarget().getRegisterInfo()); const X86RegisterInfo &RegInfo = *STI.getRegisterInfo();
unsigned StackPtr = RegInfo.getStackRegister(); unsigned StackPtr = RegInfo.getStackRegister();
bool reserveCallFrame = hasReservedCallFrame(MF); bool reserveCallFrame = hasReservedCallFrame(MF);
int Opcode = I->getOpcode(); int Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode(); bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
bool IsLP64 = STI.isTarget64BitLP64(); bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL = I->getDebugLoc(); DebugLoc DL = I->getDebugLoc();
uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0; uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
@ -1961,10 +1959,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We need to keep the stack aligned properly. To do this, we round the // We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next // amount of space needed for the outgoing arguments up to the next
// alignment boundary. // alignment boundary.
unsigned StackAlign = MF.getTarget() unsigned StackAlign = STI.getFrameLowering()->getStackAlignment();
.getSubtargetImpl()
->getFrameLowering()
->getStackAlignment();
Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign; Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
MachineInstr *New = nullptr; MachineInstr *New = nullptr;

View File

@ -156,9 +156,7 @@ namespace {
public: public:
explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), : SelectionDAGISel(tm, OptLevel), OptForSize(false) {}
Subtarget(&tm.getSubtarget<X86Subtarget>()),
OptForSize(false) {}
const char *getPassName() const override { const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection"; return "X86 DAG->DAG Instruction Selection";
@ -166,7 +164,7 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override { bool runOnMachineFunction(MachineFunction &MF) override {
// Reset the subtarget each time through. // Reset the subtarget each time through.
Subtarget = &TM.getSubtarget<X86Subtarget>(); Subtarget = &MF.getSubtarget<X86Subtarget>();
SelectionDAGISel::runOnMachineFunction(MF); SelectionDAGISel::runOnMachineFunction(MF);
return true; return true;
} }
@ -298,7 +296,7 @@ namespace {
/// getInstrInfo - Return a reference to the TargetInstrInfo, casted /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
/// to the target-specific type. /// to the target-specific type.
const X86InstrInfo *getInstrInfo() const { const X86InstrInfo *getInstrInfo() const {
return getTargetMachine().getSubtargetImpl()->getInstrInfo(); return Subtarget->getInstrInfo();
} }
/// \brief Address-mode matching performs shift-of-and to and-of-shift /// \brief Address-mode matching performs shift-of-and to and-of-shift
@ -573,7 +571,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
/// the main function. /// the main function.
void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
MachineFrameInfo *MFI) { MachineFrameInfo *MFI) {
const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo(); const TargetInstrInfo *TII = getInstrInfo();
if (Subtarget->isTargetCygMing()) { if (Subtarget->isTargetCygMing()) {
unsigned CallOp = unsigned CallOp =
Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;

View File

@ -202,9 +202,9 @@ static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
return Insert256BitVector(V, V2, NumElems/2, DAG, dl); return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
} }
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM) X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
: TargetLowering(TM) { const X86Subtarget &STI)
Subtarget = &TM.getSubtarget<X86Subtarget>(); : TargetLowering(TM), Subtarget(&STI) {
X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1(); X86ScalarSSEf32 = Subtarget->hasSSE1();
TD = getDataLayout(); TD = getDataLayout();
@ -226,8 +226,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM)
setSchedulingPreference(Sched::ILP); setSchedulingPreference(Sched::ILP);
else else
setSchedulingPreference(Sched::RegPressure); setSchedulingPreference(Sched::RegPressure);
const X86RegisterInfo *RegInfo = const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
TM.getSubtarget<X86Subtarget>().getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides on Atom when compiling with O2. // Bypass expensive divides on Atom when compiling with O2.
@ -2607,7 +2606,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
} }
if (IsWin64) { if (IsWin64) {
const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
// Get to the caller-allocated home save location. Add 8 to account // Get to the caller-allocated home save location. Add 8 to account
// for the return address. // for the return address.
int HomeOffset = TFI.getOffsetOfLocalArea() + 8; int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
@ -2896,8 +2895,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case // Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later. // of tail call optimization arguments are handle later.
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
// Skip inalloca arguments, they have already been written. // Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[i].Flags; ISD::ArgFlagsTy Flags = Outs[i].Flags;
@ -3176,7 +3174,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
OpFlags); OpFlags);
} else if (Subtarget->isTarget64BitILP32() && Callee->getValueType(0) == MVT::i32) { } else if (Subtarget->isTarget64BitILP32() &&
Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee); Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
} }
@ -3205,7 +3204,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
RegsToPass[i].second.getValueType())); RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers. // Add a register mask operand representing the call-preserved registers.
const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
assert(Mask && "Missing call preserved mask for calling convention"); assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask)); Ops.push_back(DAG.getRegisterMask(Mask));
@ -3294,11 +3293,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const { SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction(); const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
const TargetMachine &TM = MF.getTarget(); const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
TM.getSubtargetImpl()->getRegisterInfo());
const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment(); unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1; uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize; int64_t Offset = StackSize;
@ -3412,8 +3408,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue. // emit a special epilogue.
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
if (RegInfo->needsStackRealignment(MF)) if (RegInfo->needsStackRealignment(MF))
return false; return false;
@ -3525,8 +3520,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// the caller's fixed stack objects. // the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo(); const MachineRegisterInfo *MRI = &MF.getRegInfo();
const X86InstrInfo *TII = const X86InstrInfo *TII = Subtarget->getInstrInfo();
static_cast<const X86InstrInfo *>(DAG.getSubtarget().getInstrInfo());
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i]; SDValue Arg = OutVals[i];
@ -3680,8 +3674,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex(); int ReturnAddrIndex = FuncInfo->getRAIndex();
@ -7178,7 +7171,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return Sh; return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element. // For SSE 4.1, use insertps to put the high elements into the low element.
if (getSubtarget()->hasSSE41()) { if (Subtarget->hasSSE41()) {
SDValue Result; SDValue Result;
if (Op.getOperand(0).getOpcode() != ISD::UNDEF) if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
@ -16602,7 +16595,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1); Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
const TargetFrameLowering &TFI = *DAG.getSubtarget().getFrameLowering(); const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment(); unsigned StackAlign = TFI.getStackAlignment();
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign) if (Align > StackAlign)
@ -16660,8 +16653,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
unsigned SPReg = RegInfo->getStackRegister(); unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy); SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1); Chain = SP.getValue(1);
@ -16919,7 +16911,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
} }
const X86Subtarget &Subtarget = const X86Subtarget &Subtarget =
DAG.getTarget().getSubtarget<X86Subtarget>(); static_cast<const X86Subtarget &>(DAG.getSubtarget());
if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND && if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) { ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
// Let the shuffle legalizer expand this shift amount node. // Let the shuffle legalizer expand this shift amount node.
@ -17709,8 +17701,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) { if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT); SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getNode(ISD::ADD, dl, PtrVT,
@ -17731,8 +17722,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
SDLoc dl(Op); // FIXME probably not meaningful SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
unsigned FrameReg = RegInfo->getPtrSizedFrameRegister( unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
DAG.getMachineFunction()); DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && VT == MVT::i64) || assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
@ -17761,8 +17751,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName,
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
} }
@ -17773,8 +17762,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl (Op); SDLoc dl (Op);
EVT PtrVT = getPointerTy(); EVT PtrVT = getPointerTy();
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
DAG.getSubtarget().getRegisterInfo());
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) && (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@ -17821,7 +17809,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDLoc dl (Op); SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
SDValue OutChains[6]; SDValue OutChains[6];
@ -17984,8 +17972,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
*/ */
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
const TargetMachine &TM = MF.getTarget(); const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment(); unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType(); MVT VT = Op.getSimpleValueType();
SDLoc DL(Op); SDLoc DL(Op);
@ -19049,14 +19036,12 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls). /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const { bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
const X86Subtarget &Subtarget =
getTargetMachine().getSubtarget<X86Subtarget>();
unsigned OpWidth = MemType->getPrimitiveSizeInBits(); unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64) if (OpWidth == 64)
return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128) else if (OpWidth == 128)
return Subtarget.hasCmpxchg16b(); return Subtarget->hasCmpxchg16b();
else else
return false; return false;
} }
@ -19073,9 +19058,7 @@ bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
} }
bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
const X86Subtarget &Subtarget = unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
getTargetMachine().getSubtarget<X86Subtarget>();
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
const Type *MemType = AI->getType(); const Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available // If the operand is too big, we must see if cmpxchg8/16b is available
@ -19118,9 +19101,7 @@ static bool hasMFENCE(const X86Subtarget& Subtarget) {
LoadInst * LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
const X86Subtarget &Subtarget = unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
getTargetMachine().getSubtarget<X86Subtarget>();
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
const Type *MemType = AI->getType(); const Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so // Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually // there is no benefit in turning such RMWs into loads, and it is actually
@ -19156,7 +19137,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic. // the IR level, so we must wrap it in an intrinsic.
return nullptr; return nullptr;
} else if (hasMFENCE(Subtarget)) { } else if (hasMFENCE(*Subtarget)) {
Function *MFence = llvm::Intrinsic::getDeclaration(M, Function *MFence = llvm::Intrinsic::getDeclaration(M,
Intrinsic::x86_sse2_mfence); Intrinsic::x86_sse2_mfence);
Builder.CreateCall(MFence); Builder.CreateCall(MFence);
@ -20433,11 +20414,10 @@ static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
return BB; return BB;
} }
static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB, static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
const TargetInstrInfo *TII, const X86Subtarget *Subtarget) {
const X86Subtarget* Subtarget) {
DebugLoc dl = MI->getDebugLoc(); DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX. // Address into RAX/EAX, other two args into ECX, EDX.
unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
@ -20459,9 +20439,8 @@ static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
} }
MachineBasicBlock * MachineBasicBlock *
X86TargetLowering::EmitVAARG64WithCustomInserter( X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64. // Emit va_arg instruction on X86-64.
// Operands to this pseudo-instruction: // Operands to this pseudo-instruction:
@ -20491,7 +20470,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(
MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
// Machine Information // Machine Information
const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
@ -20747,7 +20726,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
XMMSaveMBB->addSuccessor(EndMBB); XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions. // Now add the instructions.
const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
unsigned CountReg = MI->getOperand(0).getReg(); unsigned CountReg = MI->getOperand(0).getReg();
@ -20830,7 +20809,7 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock * MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineBasicBlock *BB) const { MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the // To "insert" a SELECT_CC instruction, we actually have to insert the
@ -20856,8 +20835,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's // If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks. // live into the sink and copy blocks.
const TargetRegisterInfo *TRI = const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
BB->getParent()->getSubtarget().getRegisterInfo();
if (!MI->killsRegister(X86::EFLAGS) && if (!MI->killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(MI, BB, TRI)) { !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
copy0MBB->addLiveIn(X86::EFLAGS); copy0MBB->addLiveIn(X86::EFLAGS);
@ -20899,7 +20877,7 @@ MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const { MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent(); MachineFunction *MF = BB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock(); const BasicBlock *LLVM_BB = BB->getBasicBlock();
@ -20972,10 +20950,8 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB); BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap. // Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask = MF->getTarget() const uint32_t *RegMask =
.getSubtargetImpl() Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
->getRegisterInfo()
->getCallPreservedMask(CallingConv::C);
if (IsLP64) { if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg); .addReg(sizeVReg);
@ -21050,8 +21026,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// or EAX and doing an indirect call. The return value will then // or EAX and doing an indirect call. The return value will then
// be in the normal return register. // be in the normal return register.
MachineFunction *F = BB->getParent(); MachineFunction *F = BB->getParent();
const X86InstrInfo *TII = const X86InstrInfo *TII = Subtarget->getInstrInfo();
static_cast<const X86InstrInfo *>(F->getSubtarget().getInstrInfo());
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
@ -21060,10 +21035,8 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
// Get a register mask for the lowered call. // Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a // FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask. // proper register mask.
const uint32_t *RegMask = F->getTarget() const uint32_t *RegMask =
.getSubtargetImpl() Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
->getRegisterInfo()
->getCallPreservedMask(CallingConv::C);
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI) TII->get(X86::MOV64rm), X86::RDI)
@ -21108,7 +21081,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const { MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent(); MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo(); MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock(); const BasicBlock *BB = MBB->getBasicBlock();
@ -21215,8 +21188,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB); .addMBB(restoreMBB);
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
MF->getSubtarget().getRegisterInfo());
MIB.addRegMask(RegInfo->getNoPreservedMask()); MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB); thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB); thisMBB->addSuccessor(restoreMBB);
@ -21234,8 +21206,8 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// restoreMBB: // restoreMBB:
if (RegInfo->hasBasePointer(*MF)) { if (RegInfo->hasBasePointer(*MF)) {
const X86Subtarget &STI = MF->getTarget().getSubtarget<X86Subtarget>(); const bool Uses64BitFramePtr =
const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64(); Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
X86FI->setRestoreBasePointer(MF); X86FI->setRestoreBasePointer(MF);
unsigned FramePtr = RegInfo->getFrameRegister(*MF); unsigned FramePtr = RegInfo->getFrameRegister(*MF);
@ -21258,7 +21230,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const { MachineBasicBlock *MBB) const {
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = MBB->getParent(); MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo(); MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference // Memory Reference
@ -21273,8 +21245,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC); unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR. // Since FP is only updated here but NOT referenced, it's treated as GPR.
const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
MF->getSubtarget().getRegisterInfo());
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister(); unsigned SP = RegInfo->getStackRegister();
@ -21393,7 +21364,7 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
default: llvm_unreachable("Unrecognized FMA variant."); default: llvm_unreachable("Unrecognized FMA variant.");
} }
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
MachineInstrBuilder MIB = MachineInstrBuilder MIB =
BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc)) BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
.addOperand(MI->getOperand(0)) .addOperand(MI->getOperand(0))
@ -21461,7 +21432,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT32_IN_MEM: case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: { case X86::FP80_TO_INT64_IN_MEM: {
MachineFunction *F = BB->getParent(); MachineFunction *F = BB->getParent();
const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo(); const TargetInstrInfo *TII = Subtarget->getInstrInfo();
DebugLoc DL = MI->getDebugLoc(); DebugLoc DL = MI->getDebugLoc();
// Change the floating point control register to use "round towards zero" // Change the floating point control register to use "round towards zero"
@ -21545,7 +21516,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128MEM: case X86::VPCMPESTRM128MEM:
assert(Subtarget->hasSSE42() && assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled"); "Target must have SSE4.2 or AVX features enabled");
return EmitPCMPSTRM(MI, BB, BB->getParent()->getSubtarget().getInstrInfo()); return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
// String/text processing lowering. // String/text processing lowering.
case X86::PCMPISTRIREG: case X86::PCMPISTRIREG:
@ -21558,16 +21529,15 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRIMEM: case X86::VPCMPESTRIMEM:
assert(Subtarget->hasSSE42() && assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled"); "Target must have SSE4.2 or AVX features enabled");
return EmitPCMPSTRI(MI, BB, BB->getParent()->getSubtarget().getInstrInfo()); return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
// Thread synchronization. // Thread synchronization.
case X86::MONITOR: case X86::MONITOR:
return EmitMonitor(MI, BB, BB->getParent()->getSubtarget().getInstrInfo(), return EmitMonitor(MI, BB, Subtarget);
Subtarget);
// xbegin // xbegin
case X86::XBEGIN: case X86::XBEGIN:
return EmitXBegin(MI, BB, BB->getParent()->getSubtarget().getInstrInfo()); return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS: case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
@ -25682,7 +25652,7 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
} }
static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
const X86TargetLowering *XTLI) { const X86Subtarget *Subtarget) {
// First try to optimize away the conversion entirely when it's // First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only. // conditionally from a constant. Vectors only.
SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG); SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
@ -25708,10 +25678,9 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = Ld->getValueType(0); EVT VT = Ld->getValueType(0);
if (!Ld->isVolatile() && !N->getValueType(0).isVector() && if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
!XTLI->getSubtarget()->is64Bit() && !Subtarget->is64Bit() && VT == MVT::i64) {
VT == MVT::i64) { SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain; return FILDChain;
} }
@ -25925,7 +25894,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget); case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget); case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
case X86ISD::FXOR: case X86ISD::FXOR:

View File

@ -554,7 +554,8 @@ namespace llvm {
// X86 Implementation of the TargetLowering interface // X86 Implementation of the TargetLowering interface
class X86TargetLowering final : public TargetLowering { class X86TargetLowering final : public TargetLowering {
public: public:
explicit X86TargetLowering(const X86TargetMachine &TM); explicit X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI);
unsigned getJumpTableEncoding() const override; unsigned getJumpTableEncoding() const override;
@ -782,10 +783,6 @@ namespace llvm {
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
EVT NewVT) const override; EVT NewVT) const override;
const X86Subtarget* getSubtarget() const {
return Subtarget;
}
/// Return true if the specified scalar FP type is computed in an SSE /// Return true if the specified scalar FP type is computed in an SSE
/// register, not on the X87 floating point stack. /// register, not on the X87 floating point stack.
bool isScalarFPTypeInSSEReg(EVT VT) const { bool isScalarFPTypeInSSEReg(EVT VT) const {

View File

@ -3700,11 +3700,9 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() && assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
"Stack slot too small for store"); "Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = (MF.getTarget() bool isAligned =
.getSubtargetImpl() (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
->getFrameLowering() RI.canRealignStack(MF);
->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI); DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx) addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
@ -3739,11 +3737,9 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI) const { const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent(); const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
bool isAligned = (MF.getTarget() bool isAligned =
.getSubtargetImpl() (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
->getFrameLowering() RI.canRealignStack(MF);
->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI); DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
@ -4875,10 +4871,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// If the function stack isn't realigned we don't want to fold instructions // If the function stack isn't realigned we don't want to fold instructions
// that need increased alignment. // that need increased alignment.
if (!RI.needsStackRealignment(MF)) if (!RI.needsStackRealignment(MF))
Alignment = std::min(Alignment, MF.getTarget() Alignment =
.getSubtargetImpl() std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment());
->getFrameLowering()
->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0; unsigned NewOpc = 0;
unsigned RCSize = 0; unsigned RCSize = 0;
@ -5955,10 +5949,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override { bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM = const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget()); static_cast<const X86TargetMachine *>(&MF.getTarget());
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
// Don't do anything if this is 64-bit as 64-bit PIC // Don't do anything if this is 64-bit as 64-bit PIC
// uses RIP relative addressing. // uses RIP relative addressing.
if (TM->getSubtarget<X86Subtarget>().is64Bit()) if (STI.is64Bit())
return false; return false;
// Only emit a global base reg in PIC mode. // Only emit a global base reg in PIC mode.
@ -5977,10 +5972,10 @@ namespace {
MachineBasicBlock::iterator MBBI = FirstMBB.begin(); MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc DL = FirstMBB.findDebugLoc(MBBI); DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
MachineRegisterInfo &RegInfo = MF.getRegInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo();
const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo(); const X86InstrInfo *TII = STI.getInstrInfo();
unsigned PC; unsigned PC;
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) if (STI.isPICStyleGOT())
PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
else else
PC = GlobalBaseReg; PC = GlobalBaseReg;
@ -5991,7 +5986,7 @@ namespace {
// If we're using vanilla 'GOT' PIC style, we should use relative addressing // If we're using vanilla 'GOT' PIC style, we should use relative addressing
// not to pc, but to _GLOBAL_OFFSET_TABLE_ external. // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) { if (STI.isPICStyleGOT()) {
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
.addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
@ -6072,10 +6067,9 @@ namespace {
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I, MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
unsigned TLSBaseAddrReg) { unsigned TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent(); MachineFunction *MF = I->getParent()->getParent();
const X86TargetMachine *TM = const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
static_cast<const X86TargetMachine *>(&MF->getTarget()); const bool is64Bit = STI.is64Bit();
const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit(); const X86InstrInfo *TII = STI.getInstrInfo();
const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX. // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(), MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
@ -6093,10 +6087,9 @@ namespace {
// inserting a copy instruction after I. Returns the new instruction. // inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I->getParent()->getParent(); MachineFunction *MF = I->getParent()->getParent();
const X86TargetMachine *TM = const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
static_cast<const X86TargetMachine *>(&MF->getTarget()); const bool is64Bit = STI.is64Bit();
const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit(); const X86InstrInfo *TII = STI.getInstrInfo();
const X86InstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
// Create a virtual register for the TLS base address. // Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo(); MachineRegisterInfo &RegInfo = MF->getRegInfo();

View File

@ -76,9 +76,8 @@ namespace llvm {
void void
X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &MF) { X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &MF) {
CodeEmitter.reset(TM.getTarget().createMCCodeEmitter( CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
*TM.getSubtargetImpl()->getInstrInfo(), *MF.getSubtarget().getInstrInfo(), *MF.getSubtarget().getRegisterInfo(),
*TM.getSubtargetImpl()->getRegisterInfo(), *TM.getSubtargetImpl(), MF.getSubtarget(), MF.getContext()));
MF.getContext()));
} }
void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
@ -112,8 +111,8 @@ namespace llvm {
X86MCInstLower::X86MCInstLower(const MachineFunction &mf, X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter) X86AsmPrinter &asmprinter)
: Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()),
MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {} AsmPrinter(asmprinter) {}
MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
@ -989,8 +988,7 @@ static std::string getShuffleComment(const MachineOperand &DstOp,
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this); X86MCInstLower MCInstLowering(*MF, *this);
const X86RegisterInfo *RI = static_cast<const X86RegisterInfo *>( const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
TM.getSubtargetImpl()->getRegisterInfo());
switch (MI->getOpcode()) { switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE: case TargetOpcode::DBG_VALUE:

View File

@ -51,7 +51,7 @@ namespace {
struct PadShortFunc : public MachineFunctionPass { struct PadShortFunc : public MachineFunctionPass {
static char ID; static char ID;
PadShortFunc() : MachineFunctionPass(ID) PadShortFunc() : MachineFunctionPass(ID)
, Threshold(4), TM(nullptr), TII(nullptr) {} , Threshold(4), STI(nullptr), TII(nullptr) {}
bool runOnMachineFunction(MachineFunction &MF) override; bool runOnMachineFunction(MachineFunction &MF) override;
@ -79,7 +79,7 @@ namespace {
// VisitedBBs - Cache of previously visited BBs. // VisitedBBs - Cache of previously visited BBs.
DenseMap<MachineBasicBlock*, VisitedBBInfo> VisitedBBs; DenseMap<MachineBasicBlock*, VisitedBBInfo> VisitedBBs;
const TargetMachine *TM; const X86Subtarget *STI;
const TargetInstrInfo *TII; const TargetInstrInfo *TII;
}; };
@ -101,11 +101,11 @@ bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
return false; return false;
} }
TM = &MF.getTarget(); STI = &MF.getSubtarget<X86Subtarget>();
if (!TM->getSubtarget<X86Subtarget>().padShortFunctions()) if (!STI->padShortFunctions())
return false; return false;
TII = TM->getSubtargetImpl()->getInstrInfo(); TII = STI->getInstrInfo();
// Search through basic blocks and mark the ones that have early returns // Search through basic blocks and mark the ones that have early returns
ReturnBBs.clear(); ReturnBBs.clear();
@ -195,8 +195,7 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
return true; return true;
} }
CyclesToEnd += TII->getInstrLatency( CyclesToEnd += TII->getInstrLatency(STI->getInstrItineraryData(), MI);
TM->getSubtargetImpl()->getInstrItineraryData(), MI);
} }
VisitedBBs[MBB] = VisitedBBInfo(false, CyclesToEnd); VisitedBBs[MBB] = VisitedBBInfo(false, CyclesToEnd);

View File

@ -325,7 +325,7 @@ def GR8 : RegisterClass<"X86", [i8], 8,
R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B)> { R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B)> {
let AltOrders = [(sub GR8, AH, BH, CH, DH)]; let AltOrders = [(sub GR8, AH, BH, CH, DH)];
let AltOrderSelect = [{ let AltOrderSelect = [{
return MF.getTarget().getSubtarget<X86Subtarget>().is64Bit(); return MF.getSubtarget<X86Subtarget>().is64Bit();
}]; }];
} }
@ -377,7 +377,7 @@ def GR8_NOREX : RegisterClass<"X86", [i8], 8,
(add AL, CL, DL, AH, CH, DH, BL, BH)> { (add AL, CL, DL, AH, CH, DH, BL, BH)> {
let AltOrders = [(sub GR8_NOREX, AH, BH, CH, DH)]; let AltOrders = [(sub GR8_NOREX, AH, BH, CH, DH)];
let AltOrderSelect = [{ let AltOrderSelect = [{
return MF.getTarget().getSubtarget<X86Subtarget>().is64Bit(); return MF.getSubtarget<X86Subtarget>().is64Bit();
}]; }];
} }
// GR16_NOREX - GR16 registers which do not require a REX prefix. // GR16_NOREX - GR16 registers which do not require a REX prefix.

View File

@ -57,7 +57,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
bool isVolatile, bool isVolatile,
MachinePointerInfo DstPtrInfo) const { MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>(); const X86Subtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<X86Subtarget>();
#ifndef NDEBUG #ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out. // If the base register might conflict with our physical registers, bail out.
@ -199,17 +200,15 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
return Chain; return Chain;
} }
SDValue SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
SDValue Size, unsigned Align, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
// This requires the copy size to be a constant, preferably // This requires the copy size to be a constant, preferably
// within a subtarget-specific limit. // within a subtarget-specific limit.
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
const X86Subtarget &Subtarget = DAG.getTarget().getSubtarget<X86Subtarget>(); const X86Subtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<X86Subtarget>();
if (!ConstantSize) if (!ConstantSize)
return SDValue(); return SDValue();
uint64_t SizeVal = ConstantSize->getZExtValue(); uint64_t SizeVal = ConstantSize->getZExtValue();

View File

@ -301,7 +301,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
In16BitMode(TargetTriple.getArch() == Triple::x86 && In16BitMode(TargetTriple.getArch() == Triple::x86 &&
TargetTriple.getEnvironment() == Triple::CODE16), TargetTriple.getEnvironment() == Triple::CODE16),
TSInfo(*TM.getDataLayout()), TSInfo(*TM.getDataLayout()),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM), InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
FrameLowering(TargetFrameLowering::StackGrowsDown, getStackAlignment(), FrameLowering(TargetFrameLowering::StackGrowsDown, getStackAlignment(),
is64Bit() ? -8 : -4) { is64Bit() ? -8 : -4) {
// Determine the PICStyle based on the target selected. // Determine the PICStyle based on the target selected.

View File

@ -247,10 +247,10 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting /// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// vzero upper instructions before function calls. /// vzero upper instructions before function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>(); const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
if (!ST.hasAVX() || ST.hasAVX512()) if (!ST.hasAVX() || ST.hasAVX512())
return false; return false;
TII = MF.getSubtarget().getInstrInfo(); TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo(); MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false; EverMadeChange = false;