mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-11-04 05:17:07 +00:00 
			
		
		
		
	[X86] Refactor the prologue emission to prepare for shrink-wrapping.
- Add a late pass to expand pseudo instructions (tail call and EH returns). Instead of doing it in the prologue emission. - Factor some static methods in X86FrameLowering to ease code sharing. NFC. Related to <rdar://problem/20821487> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@237977 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
		@@ -15,6 +15,7 @@ add_public_tablegen_target(X86CommonTableGen)
 | 
			
		||||
set(sources
 | 
			
		||||
  X86AsmPrinter.cpp
 | 
			
		||||
  X86CallFrameOptimization.cpp
 | 
			
		||||
  X86ExpandPseudo.cpp
 | 
			
		||||
  X86FastISel.cpp
 | 
			
		||||
  X86FloatingPoint.cpp
 | 
			
		||||
  X86FrameLowering.cpp
 | 
			
		||||
 
 | 
			
		||||
@@ -75,6 +75,11 @@ FunctionPass *createX86CallFrameOptimization();
 | 
			
		||||
/// preparation.
 | 
			
		||||
FunctionPass *createX86WinEHStatePass();
 | 
			
		||||
 | 
			
		||||
/// Return a Machine IR pass that expands X86-specific pseudo
 | 
			
		||||
/// instructions into a sequence of actual instructions. This pass
 | 
			
		||||
/// must run after prologue/epilogue insertion and before lowering
 | 
			
		||||
/// the MachineInstr to MC.
 | 
			
		||||
FunctionPass *createX86ExpandPseudoPass();
 | 
			
		||||
} // End llvm namespace
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										188
									
								
								lib/Target/X86/X86ExpandPseudo.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										188
									
								
								lib/Target/X86/X86ExpandPseudo.cpp
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,188 @@
 | 
			
		||||
//===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//
 | 
			
		||||
//
 | 
			
		||||
//                     The LLVM Compiler Infrastructure
 | 
			
		||||
//
 | 
			
		||||
// This file is distributed under the University of Illinois Open Source
 | 
			
		||||
// License. See LICENSE.TXT for details.
 | 
			
		||||
//
 | 
			
		||||
//===----------------------------------------------------------------------===//
 | 
			
		||||
//
 | 
			
		||||
// This file contains a pass that expands pseudo instructions into target
 | 
			
		||||
// instructions to allow proper scheduling, if-conversion, other late
 | 
			
		||||
// optimizations, or simply the encoding of the instructions.
 | 
			
		||||
//
 | 
			
		||||
//===----------------------------------------------------------------------===//
 | 
			
		||||
 | 
			
		||||
#include "X86.h"
 | 
			
		||||
#include "X86FrameLowering.h"
 | 
			
		||||
#include "X86InstrBuilder.h"
 | 
			
		||||
#include "X86InstrInfo.h"
 | 
			
		||||
#include "X86MachineFunctionInfo.h"
 | 
			
		||||
#include "X86Subtarget.h"
 | 
			
		||||
#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
 | 
			
		||||
#include "llvm/CodeGen/MachineFunctionPass.h"
 | 
			
		||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
 | 
			
		||||
#include "llvm/IR/GlobalValue.h"
 | 
			
		||||
using namespace llvm;
 | 
			
		||||
 | 
			
		||||
#define DEBUG_TYPE "x86-pseudo"
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
class X86ExpandPseudo : public MachineFunctionPass {
 | 
			
		||||
public:
 | 
			
		||||
  static char ID;
 | 
			
		||||
  X86ExpandPseudo() : MachineFunctionPass(ID) {}
 | 
			
		||||
 | 
			
		||||
  void getAnalysisUsage(AnalysisUsage &AU) const override {
 | 
			
		||||
    AU.setPreservesCFG();
 | 
			
		||||
    AU.addPreservedID(MachineLoopInfoID);
 | 
			
		||||
    AU.addPreservedID(MachineDominatorsID);
 | 
			
		||||
    MachineFunctionPass::getAnalysisUsage(AU);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const X86Subtarget *STI;
 | 
			
		||||
  const X86InstrInfo *TII;
 | 
			
		||||
  const X86RegisterInfo *TRI;
 | 
			
		||||
  const X86FrameLowering *X86FrameLowering;
 | 
			
		||||
 | 
			
		||||
  bool runOnMachineFunction(MachineFunction &Fn) override;
 | 
			
		||||
 | 
			
		||||
  const char *getPassName() const override {
 | 
			
		||||
    return "X86 pseudo instruction expansion pass";
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
  bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
 | 
			
		||||
  bool ExpandMBB(MachineBasicBlock &MBB);
 | 
			
		||||
};
 | 
			
		||||
char X86ExpandPseudo::ID = 0;
 | 
			
		||||
} // End anonymous namespace.
 | 
			
		||||
 | 
			
		||||
/// If \p MBBI is a pseudo instruction, this method expands
 | 
			
		||||
/// it to the corresponding (sequence of) actual instruction(s).
 | 
			
		||||
/// \returns true if \p MBBI has been expanded.
 | 
			
		||||
bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
 | 
			
		||||
                               MachineBasicBlock::iterator MBBI) {
 | 
			
		||||
  MachineInstr &MI = *MBBI;
 | 
			
		||||
  unsigned Opcode = MI.getOpcode();
 | 
			
		||||
  DebugLoc DL = MBBI->getDebugLoc();
 | 
			
		||||
  switch (Opcode) {
 | 
			
		||||
  default:
 | 
			
		||||
    return false;
 | 
			
		||||
  case X86::TCRETURNdi:
 | 
			
		||||
  case X86::TCRETURNri:
 | 
			
		||||
  case X86::TCRETURNmi:
 | 
			
		||||
  case X86::TCRETURNdi64:
 | 
			
		||||
  case X86::TCRETURNri64:
 | 
			
		||||
  case X86::TCRETURNmi64: {
 | 
			
		||||
    bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
 | 
			
		||||
    MachineOperand &JumpTarget = MBBI->getOperand(0);
 | 
			
		||||
    MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
 | 
			
		||||
    assert(StackAdjust.isImm() && "Expecting immediate value.");
 | 
			
		||||
 | 
			
		||||
    // Adjust stack pointer.
 | 
			
		||||
    int StackAdj = StackAdjust.getImm();
 | 
			
		||||
 | 
			
		||||
    if (StackAdj) {
 | 
			
		||||
      bool Is64Bit = STI->is64Bit();
 | 
			
		||||
      // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
 | 
			
		||||
      const bool Uses64BitFramePtr =
 | 
			
		||||
          STI->isTarget64BitLP64() || STI->isTargetNaCl64();
 | 
			
		||||
      bool UseLEAForSP =
 | 
			
		||||
          X86FrameLowering->useLEAForSPInProlog(*MBB.getParent());
 | 
			
		||||
      unsigned StackPtr = TRI->getStackRegister();
 | 
			
		||||
      // Check for possible merge with preceding ADD instruction.
 | 
			
		||||
      StackAdj += X86FrameLowering::mergeSPUpdates(MBB, MBBI, StackPtr, true);
 | 
			
		||||
      X86FrameLowering::emitSPUpdate(MBB, MBBI, StackPtr, StackAdj, Is64Bit,
 | 
			
		||||
                                     Uses64BitFramePtr, UseLEAForSP, *TII,
 | 
			
		||||
                                     *TRI);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Jump to label or value in register.
 | 
			
		||||
    bool IsWin64 = STI->isTargetWin64();
 | 
			
		||||
    if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
 | 
			
		||||
      unsigned Op = (Opcode == X86::TCRETURNdi)
 | 
			
		||||
                        ? X86::TAILJMPd
 | 
			
		||||
                        : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
 | 
			
		||||
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
 | 
			
		||||
      if (JumpTarget.isGlobal())
 | 
			
		||||
        MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
 | 
			
		||||
                             JumpTarget.getTargetFlags());
 | 
			
		||||
      else {
 | 
			
		||||
        assert(JumpTarget.isSymbol());
 | 
			
		||||
        MIB.addExternalSymbol(JumpTarget.getSymbolName(),
 | 
			
		||||
                              JumpTarget.getTargetFlags());
 | 
			
		||||
      }
 | 
			
		||||
    } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
 | 
			
		||||
      unsigned Op = (Opcode == X86::TCRETURNmi)
 | 
			
		||||
                        ? X86::TAILJMPm
 | 
			
		||||
                        : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
 | 
			
		||||
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
 | 
			
		||||
      for (unsigned i = 0; i != 5; ++i)
 | 
			
		||||
        MIB.addOperand(MBBI->getOperand(i));
 | 
			
		||||
    } else if (Opcode == X86::TCRETURNri64) {
 | 
			
		||||
      BuildMI(MBB, MBBI, DL,
 | 
			
		||||
              TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
 | 
			
		||||
          .addReg(JumpTarget.getReg(), RegState::Kill);
 | 
			
		||||
    } else {
 | 
			
		||||
      BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
 | 
			
		||||
          .addReg(JumpTarget.getReg(), RegState::Kill);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    MachineInstr *NewMI = std::prev(MBBI);
 | 
			
		||||
    NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);
 | 
			
		||||
 | 
			
		||||
    // Delete the pseudo instruction TCRETURN.
 | 
			
		||||
    MBB.erase(MBBI);
 | 
			
		||||
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
  case X86::EH_RETURN:
 | 
			
		||||
  case X86::EH_RETURN64: {
 | 
			
		||||
    MachineOperand &DestAddr = MBBI->getOperand(0);
 | 
			
		||||
    assert(DestAddr.isReg() && "Offset should be in register!");
 | 
			
		||||
    const bool Uses64BitFramePtr =
 | 
			
		||||
        STI->isTarget64BitLP64() || STI->isTargetNaCl64();
 | 
			
		||||
    unsigned StackPtr = TRI->getStackRegister();
 | 
			
		||||
    BuildMI(MBB, MBBI, DL,
 | 
			
		||||
            TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
 | 
			
		||||
        .addReg(DestAddr.getReg());
 | 
			
		||||
    // The EH_RETURN pseudo is really removed during the MC Lowering.
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
  }
 | 
			
		||||
  llvm_unreachable("Previous switch has a fallthrough?");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Expand all pseudo instructions contained in \p MBB.
 | 
			
		||||
/// \returns true if any expansion occurred for \p MBB.
 | 
			
		||||
bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
 | 
			
		||||
  bool Modified = false;
 | 
			
		||||
 | 
			
		||||
  // MBBI may be invalidated by the expansion.
 | 
			
		||||
  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
 | 
			
		||||
  while (MBBI != E) {
 | 
			
		||||
    MachineBasicBlock::iterator NMBBI = std::next(MBBI);
 | 
			
		||||
    Modified |= ExpandMI(MBB, MBBI);
 | 
			
		||||
    MBBI = NMBBI;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return Modified;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
 | 
			
		||||
  STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
 | 
			
		||||
  TII = STI->getInstrInfo();
 | 
			
		||||
  TRI = STI->getRegisterInfo();
 | 
			
		||||
  X86FrameLowering = STI->getFrameLowering();
 | 
			
		||||
 | 
			
		||||
  bool Modified = false;
 | 
			
		||||
  for (MachineBasicBlock &MBB : MF)
 | 
			
		||||
    Modified |= ExpandMBB(MBB);
 | 
			
		||||
  return Modified;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Returns an instance of the pseudo instruction expansion pass.
 | 
			
		||||
FunctionPass *llvm::createX86ExpandPseudoPass() {
 | 
			
		||||
  return new X86ExpandPseudo();
 | 
			
		||||
}
 | 
			
		||||
@@ -205,11 +205,12 @@ static bool isEAXLiveIn(MachineFunction &MF) {
 | 
			
		||||
 | 
			
		||||
/// emitSPUpdate - Emit a series of instructions to increment / decrement the
 | 
			
		||||
/// stack pointer by a constant value.
 | 
			
		||||
static
 | 
			
		||||
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
 | 
			
		||||
void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
 | 
			
		||||
                                    MachineBasicBlock::iterator &MBBI,
 | 
			
		||||
                                    unsigned StackPtr, int64_t NumBytes,
 | 
			
		||||
                  bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,
 | 
			
		||||
                  const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
 | 
			
		||||
                                    bool Is64BitTarget, bool Is64BitStackPtr,
 | 
			
		||||
                                    bool UseLEA, const TargetInstrInfo &TII,
 | 
			
		||||
                                    const TargetRegisterInfo &TRI) {
 | 
			
		||||
  bool isSub = NumBytes < 0;
 | 
			
		||||
  uint64_t Offset = isSub ? -NumBytes : NumBytes;
 | 
			
		||||
  unsigned Opc;
 | 
			
		||||
@@ -312,12 +313,9 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// mergeSPUpdates - Checks the instruction before/after the passed
 | 
			
		||||
/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
 | 
			
		||||
/// the stack adjustment is returned as a positive value for ADD/LEA and a
 | 
			
		||||
/// negative for SUB.
 | 
			
		||||
static int mergeSPUpdates(MachineBasicBlock &MBB,
 | 
			
		||||
                          MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
 | 
			
		||||
int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
 | 
			
		||||
                                     MachineBasicBlock::iterator &MBBI,
 | 
			
		||||
                                     unsigned StackPtr,
 | 
			
		||||
                                     bool doMergeWithPrevious) {
 | 
			
		||||
  if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
 | 
			
		||||
      (!doMergeWithPrevious && MBBI == MBB.end()))
 | 
			
		||||
@@ -967,6 +965,17 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool X86FrameLowering::useLEAForSPInProlog(const MachineFunction &MF) const {
 | 
			
		||||
  // We can't use LEA instructions for adjusting the stack pointer if this is a
 | 
			
		||||
  // leaf function in the Win64 ABI.  Only ADD instructions may be used to
 | 
			
		||||
  // deallocate the stack.
 | 
			
		||||
  // This means that we can use LEA for SP in two situations:
 | 
			
		||||
  // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
 | 
			
		||||
  // 2. We *have* a frame pointer which means we are permitted to use LEA.
 | 
			
		||||
  return MF.getSubtarget<X86Subtarget>().useLeaForSP() &&
 | 
			
		||||
         (!MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
 | 
			
		||||
                                    MachineBasicBlock &MBB) const {
 | 
			
		||||
  const MachineFrameInfo *MFI = MF.getFrameInfo();
 | 
			
		||||
@@ -974,14 +983,12 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
 | 
			
		||||
  const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
 | 
			
		||||
  const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
 | 
			
		||||
  const TargetInstrInfo &TII = *STI.getInstrInfo();
 | 
			
		||||
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
 | 
			
		||||
  MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
 | 
			
		||||
  assert(MBBI != MBB.end() && "Returning block has no instructions");
 | 
			
		||||
  unsigned RetOpcode = MBBI->getOpcode();
 | 
			
		||||
  DebugLoc DL = MBBI->getDebugLoc();
 | 
			
		||||
  bool Is64Bit = STI.is64Bit();
 | 
			
		||||
  // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
 | 
			
		||||
  const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
 | 
			
		||||
  bool HasFP = hasFP(MF);
 | 
			
		||||
  const bool Is64BitILP32 = STI.isTarget64BitILP32();
 | 
			
		||||
  unsigned SlotSize = RegInfo->getSlotSize();
 | 
			
		||||
  unsigned FramePtr = RegInfo->getFrameRegister(MF);
 | 
			
		||||
@@ -992,22 +999,9 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
 | 
			
		||||
 | 
			
		||||
  bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
 | 
			
		||||
  bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
 | 
			
		||||
  bool UseLEAForSP = false;
 | 
			
		||||
  bool UseLEAForSP = useLEAForSPInProlog(MF);
 | 
			
		||||
 | 
			
		||||
  // We can't use LEA instructions for adjusting the stack pointer if this is a
 | 
			
		||||
  // leaf function in the Win64 ABI.  Only ADD instructions may be used to
 | 
			
		||||
  // deallocate the stack.
 | 
			
		||||
  if (STI.useLeaForSP()) {
 | 
			
		||||
    if (!IsWinEH) {
 | 
			
		||||
      // We *aren't* using the Win64 ABI which means we are free to use LEA.
 | 
			
		||||
      UseLEAForSP = true;
 | 
			
		||||
    } else if (HasFP) {
 | 
			
		||||
      // We *have* a frame pointer which means we are permitted to use LEA.
 | 
			
		||||
      UseLEAForSP = true;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  switch (RetOpcode) {
 | 
			
		||||
  switch (MBBI->getOpcode()) {
 | 
			
		||||
  default:
 | 
			
		||||
    llvm_unreachable("Can only insert epilogue into returning blocks");
 | 
			
		||||
  case X86::RETQ:
 | 
			
		||||
@@ -1112,90 +1106,17 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
 | 
			
		||||
  if (NeedsWinEH)
 | 
			
		||||
    BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
 | 
			
		||||
 | 
			
		||||
  // We're returning from function via eh_return.
 | 
			
		||||
  if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
 | 
			
		||||
    MBBI = MBB.getLastNonDebugInstr();
 | 
			
		||||
    MachineOperand &DestAddr  = MBBI->getOperand(0);
 | 
			
		||||
    assert(DestAddr.isReg() && "Offset should be in register!");
 | 
			
		||||
    BuildMI(MBB, MBBI, DL,
 | 
			
		||||
            TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
 | 
			
		||||
            StackPtr).addReg(DestAddr.getReg());
 | 
			
		||||
  } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
 | 
			
		||||
             RetOpcode == X86::TCRETURNmi ||
 | 
			
		||||
             RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
 | 
			
		||||
             RetOpcode == X86::TCRETURNmi64) {
 | 
			
		||||
    bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
 | 
			
		||||
    // Tail call return: adjust the stack pointer and jump to callee.
 | 
			
		||||
    MBBI = MBB.getLastNonDebugInstr();
 | 
			
		||||
    MachineOperand &JumpTarget = MBBI->getOperand(0);
 | 
			
		||||
    MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
 | 
			
		||||
    assert(StackAdjust.isImm() && "Expecting immediate value.");
 | 
			
		||||
 | 
			
		||||
    // Adjust stack pointer.
 | 
			
		||||
    int StackAdj = StackAdjust.getImm();
 | 
			
		||||
    int MaxTCDelta = X86FI->getTCReturnAddrDelta();
 | 
			
		||||
    int Offset = 0;
 | 
			
		||||
    assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
 | 
			
		||||
 | 
			
		||||
    // Incoporate the retaddr area.
 | 
			
		||||
    Offset = StackAdj-MaxTCDelta;
 | 
			
		||||
    assert(Offset >= 0 && "Offset should never be negative");
 | 
			
		||||
 | 
			
		||||
  // Add the return addr area delta back since we are not tail calling.
 | 
			
		||||
  int Offset = -1 * X86FI->getTCReturnAddrDelta();
 | 
			
		||||
  assert(Offset >= 0 && "TCDelta should never be positive");
 | 
			
		||||
  if (Offset) {
 | 
			
		||||
    MBBI = MBB.getFirstTerminator();
 | 
			
		||||
 | 
			
		||||
    // Check for possible merge with preceding ADD instruction.
 | 
			
		||||
    Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
 | 
			
		||||
    emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
 | 
			
		||||
                 UseLEAForSP, TII, *RegInfo);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
    // Jump to label or value in register.
 | 
			
		||||
    bool IsWin64 = STI.isTargetWin64();
 | 
			
		||||
    if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
 | 
			
		||||
      unsigned Op = (RetOpcode == X86::TCRETURNdi)
 | 
			
		||||
                        ? X86::TAILJMPd
 | 
			
		||||
                        : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
 | 
			
		||||
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
 | 
			
		||||
      if (JumpTarget.isGlobal())
 | 
			
		||||
        MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
 | 
			
		||||
                             JumpTarget.getTargetFlags());
 | 
			
		||||
      else {
 | 
			
		||||
        assert(JumpTarget.isSymbol());
 | 
			
		||||
        MIB.addExternalSymbol(JumpTarget.getSymbolName(),
 | 
			
		||||
                              JumpTarget.getTargetFlags());
 | 
			
		||||
      }
 | 
			
		||||
    } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
 | 
			
		||||
      unsigned Op = (RetOpcode == X86::TCRETURNmi)
 | 
			
		||||
                        ? X86::TAILJMPm
 | 
			
		||||
                        : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
 | 
			
		||||
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
 | 
			
		||||
      for (unsigned i = 0; i != 5; ++i)
 | 
			
		||||
        MIB.addOperand(MBBI->getOperand(i));
 | 
			
		||||
    } else if (RetOpcode == X86::TCRETURNri64) {
 | 
			
		||||
      BuildMI(MBB, MBBI, DL,
 | 
			
		||||
              TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
 | 
			
		||||
          .addReg(JumpTarget.getReg(), RegState::Kill);
 | 
			
		||||
    } else {
 | 
			
		||||
      BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
 | 
			
		||||
        addReg(JumpTarget.getReg(), RegState::Kill);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    MachineInstr *NewMI = std::prev(MBBI);
 | 
			
		||||
    NewMI->copyImplicitOps(MF, MBBI);
 | 
			
		||||
 | 
			
		||||
    // Delete the pseudo instruction TCRETURN.
 | 
			
		||||
    MBB.erase(MBBI);
 | 
			
		||||
  } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
 | 
			
		||||
              RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
 | 
			
		||||
             (X86FI->getTCReturnAddrDelta() < 0)) {
 | 
			
		||||
    // Add the return addr area delta back since we are not tail calling.
 | 
			
		||||
    int delta = -1*X86FI->getTCReturnAddrDelta();
 | 
			
		||||
    MBBI = MBB.getLastNonDebugInstr();
 | 
			
		||||
 | 
			
		||||
    // Check for possible merge with preceding ADD instruction.
 | 
			
		||||
    delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
 | 
			
		||||
    emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr,
 | 
			
		||||
                 UseLEAForSP, TII, *RegInfo);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
 | 
			
		||||
 
 | 
			
		||||
@@ -79,6 +79,26 @@ public:
 | 
			
		||||
                                 MachineBasicBlock &MBB,
 | 
			
		||||
                                 MachineBasicBlock::iterator MI) const override;
 | 
			
		||||
 | 
			
		||||
  /// Check the instruction before/after the passed instruction. If
 | 
			
		||||
  /// it is an ADD/SUB/LEA instruction it is deleted argument and the
 | 
			
		||||
  /// stack adjustment is returned as a positive value for ADD/LEA and
 | 
			
		||||
  /// a negative for SUB.
 | 
			
		||||
  static int mergeSPUpdates(MachineBasicBlock &MBB,
 | 
			
		||||
                            MachineBasicBlock::iterator &MBBI,
 | 
			
		||||
                            unsigned StackPtr, bool doMergeWithPrevious);
 | 
			
		||||
 | 
			
		||||
  /// Emit a series of instructions to increment / decrement the stack
 | 
			
		||||
  /// pointer by a constant value.
 | 
			
		||||
  static void emitSPUpdate(MachineBasicBlock &MBB,
 | 
			
		||||
                           MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
 | 
			
		||||
                           int64_t NumBytes, bool Is64BitTarget,
 | 
			
		||||
                           bool Is64BitStackPtr, bool UseLEA,
 | 
			
		||||
                           const TargetInstrInfo &TII,
 | 
			
		||||
                           const TargetRegisterInfo &TRI);
 | 
			
		||||
 | 
			
		||||
  /// Check that LEA can be use on SP in a prologue sequence for \p MF.
 | 
			
		||||
  bool useLEAForSPInProlog(const MachineFunction &MF) const;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
  /// convertArgMovsToPushes - This method tries to convert a call sequence
 | 
			
		||||
  /// that uses sub and mov instructions to put the argument onto the stack
 | 
			
		||||
 
 | 
			
		||||
@@ -492,7 +492,8 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
 | 
			
		||||
  unsigned BasePtr;
 | 
			
		||||
 | 
			
		||||
  unsigned Opc = MI.getOpcode();
 | 
			
		||||
  bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
 | 
			
		||||
  bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
 | 
			
		||||
                    Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
 | 
			
		||||
  if (hasBasePointer(MF))
 | 
			
		||||
    BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
 | 
			
		||||
  else if (needsStackRealignment(MF))
 | 
			
		||||
 
 | 
			
		||||
@@ -187,6 +187,7 @@ public:
 | 
			
		||||
  void addPreRegAlloc() override;
 | 
			
		||||
  void addPostRegAlloc() override;
 | 
			
		||||
  void addPreEmitPass() override;
 | 
			
		||||
  void addPreSched2() override;
 | 
			
		||||
};
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
@@ -235,6 +236,8 @@ void X86PassConfig::addPostRegAlloc() {
 | 
			
		||||
  addPass(createX86FloatingPointStackifierPass());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); }
 | 
			
		||||
 | 
			
		||||
void X86PassConfig::addPreEmitPass() {
 | 
			
		||||
  if (getOptLevel() != CodeGenOpt::None)
 | 
			
		||||
    addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user