mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-03-21 03:32:29 +00:00
Matches MachineInstr changes.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31712 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
d7de496b23
commit
7ce4578353
include/llvm/CodeGen
lib
CodeGen/SelectionDAG
Target
Alpha
IA64
PowerPC
Sparc
X86
@ -18,6 +18,8 @@
|
||||
#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
|
||||
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -33,8 +35,9 @@ public:
|
||||
|
||||
/// addReg - Add a new virtual register operand...
|
||||
///
|
||||
const MachineInstrBuilder &addReg(int RegNo, bool isDef = false,
|
||||
bool isImp = false) const {
|
||||
const
|
||||
MachineInstrBuilder &addReg(int RegNo, bool isDef = false, bool isImp = false,
|
||||
bool isKill = false, bool isDead = false) const {
|
||||
MI->addRegOperand(RegNo, isDef, isImp);
|
||||
return *this;
|
||||
}
|
||||
@ -77,28 +80,24 @@ public:
|
||||
MI->addExternalSymbolOperand(FnName);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const MachineInstrBuilder &addImplicitDefsUses() const {
|
||||
MI->addImplicitDefUseOperands();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/// BuildMI - Builder interface. Specify how to create the initial instruction
|
||||
/// itself. NumOperands is the number of operands to the machine instruction to
|
||||
/// allow for memory efficient representation of machine instructions.
|
||||
///
|
||||
inline MachineInstrBuilder BuildMI(int Opcode, unsigned NumOperands) {
|
||||
return MachineInstrBuilder(new MachineInstr(Opcode, NumOperands));
|
||||
inline MachineInstrBuilder BuildMI(const TargetInstrInfo &TII, int Opcode,
|
||||
unsigned NumOperands) {
|
||||
return MachineInstrBuilder(new MachineInstr(TII, Opcode, NumOperands));
|
||||
}
|
||||
|
||||
/// BuildMI - This version of the builder sets up the first operand as a
|
||||
/// destination virtual register. NumOperands is the number of additional add*
|
||||
/// calls that are expected, not including the destination register.
|
||||
///
|
||||
inline MachineInstrBuilder
|
||||
BuildMI(int Opcode, unsigned NumOperands, unsigned DestReg) {
|
||||
return MachineInstrBuilder(new MachineInstr(Opcode, NumOperands+1))
|
||||
inline MachineInstrBuilder BuildMI(const TargetInstrInfo &TII, int Opcode,
|
||||
unsigned NumOperands, unsigned DestReg) {
|
||||
return MachineInstrBuilder(new MachineInstr(TII, Opcode, NumOperands+1))
|
||||
.addReg(DestReg, true);
|
||||
}
|
||||
|
||||
@ -112,7 +111,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator I,
|
||||
int Opcode, unsigned NumOperands,
|
||||
unsigned DestReg) {
|
||||
MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1);
|
||||
MachineInstr *MI = new MachineInstr(*BB.getParent()->getTarget().
|
||||
getInstrInfo(), Opcode, NumOperands+1);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MI).addReg(DestReg, true);
|
||||
}
|
||||
@ -124,7 +124,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
|
||||
MachineBasicBlock::iterator I,
|
||||
int Opcode, unsigned NumOperands) {
|
||||
MachineInstr *MI = new MachineInstr(Opcode, NumOperands);
|
||||
MachineInstr *MI = new MachineInstr(*BB.getParent()->getTarget().
|
||||
getInstrInfo(), Opcode, NumOperands);
|
||||
BB.insert(I, MI);
|
||||
return MachineInstrBuilder(MI);
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ void ScheduleDAG::EmitNode(SDNode *Node,
|
||||
#endif
|
||||
|
||||
// Create the new machine instruction.
|
||||
MachineInstr *MI = new MachineInstr(Opc, NumMIOperands);
|
||||
MachineInstr *MI = new MachineInstr(*TII, Opc, NumMIOperands);
|
||||
|
||||
// Add result register values for things that are defined by this
|
||||
// instruction.
|
||||
@ -441,9 +441,6 @@ void ScheduleDAG::EmitNode(SDNode *Node,
|
||||
}
|
||||
}
|
||||
|
||||
// Emit implicit def / use operands.
|
||||
MI->addImplicitDefUseOperands();
|
||||
|
||||
// Now that we have emitted all operands, emit this instruction itself.
|
||||
if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) {
|
||||
BB->insert(BB->end(), MI);
|
||||
|
@ -19,7 +19,8 @@
|
||||
using namespace llvm;
|
||||
|
||||
AlphaInstrInfo::AlphaInstrInfo()
|
||||
: TargetInstrInfo(AlphaInsts, sizeof(AlphaInsts)/sizeof(AlphaInsts[0])) { }
|
||||
: TargetInstrInfo(AlphaInsts, sizeof(AlphaInsts)/sizeof(AlphaInsts[0])),
|
||||
RI(*this) { }
|
||||
|
||||
|
||||
bool AlphaInstrInfo::isMoveInstr(const MachineInstr& MI,
|
||||
|
@ -51,8 +51,9 @@ static long getLower16(long l)
|
||||
return l - h * IMM_MULT;
|
||||
}
|
||||
|
||||
AlphaRegisterInfo::AlphaRegisterInfo()
|
||||
: AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP)
|
||||
AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
|
||||
: AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
|
||||
TII(tii)
|
||||
{
|
||||
}
|
||||
|
||||
@ -114,13 +115,13 @@ MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
|
||||
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
|
||||
return BuildMI(Opc, 3).addReg(InReg).addFrameIndex(FrameIndex)
|
||||
return BuildMI(TII, Opc, 3).addReg(InReg).addFrameIndex(FrameIndex)
|
||||
.addReg(Alpha::F31);
|
||||
} else { // load -> move
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
|
||||
((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
|
||||
return BuildMI(Opc, 2, OutReg).addFrameIndex(FrameIndex)
|
||||
return BuildMI(TII, Opc, 2, OutReg).addFrameIndex(FrameIndex)
|
||||
.addReg(Alpha::F31);
|
||||
}
|
||||
}
|
||||
@ -205,11 +206,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
|
||||
MachineInstr *New;
|
||||
if (Old->getOpcode() == Alpha::ADJUSTSTACKDOWN) {
|
||||
New=BuildMI(Alpha::LDA, 2, Alpha::R30)
|
||||
New=BuildMI(TII, Alpha::LDA, 2, Alpha::R30)
|
||||
.addImm(-Amount).addReg(Alpha::R30);
|
||||
} else {
|
||||
assert(Old->getOpcode() == Alpha::ADJUSTSTACKUP);
|
||||
New=BuildMI(Alpha::LDA, 2, Alpha::R30)
|
||||
New=BuildMI(TII, Alpha::LDA, 2, Alpha::R30)
|
||||
.addImm(Amount).addReg(Alpha::R30);
|
||||
}
|
||||
|
||||
@ -266,7 +267,7 @@ AlphaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II) const {
|
||||
MI.getOperand(i + 1).ChangeToRegister(Alpha::R28, false);
|
||||
MI.getOperand(i).ChangeToImmediate(getLower16(Offset));
|
||||
//insert the new
|
||||
MachineInstr* nMI=BuildMI(Alpha::LDAH, 2, Alpha::R28)
|
||||
MachineInstr* nMI=BuildMI(TII, Alpha::LDAH, 2, Alpha::R28)
|
||||
.addImm(getUpper16(Offset)).addReg(FP ? Alpha::R15 : Alpha::R30);
|
||||
MBB.insert(II, nMI);
|
||||
} else {
|
||||
|
@ -22,7 +22,9 @@ namespace llvm {
|
||||
class Type;
|
||||
|
||||
struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
|
||||
AlphaRegisterInfo();
|
||||
const TargetInstrInfo &TII;
|
||||
|
||||
AlphaRegisterInfo(const TargetInstrInfo &tii);
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
|
@ -121,7 +121,7 @@ static bool hasFP(const MachineFunction &MF) {
|
||||
void IA64RegisterInfo::
|
||||
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I) const {
|
||||
|
||||
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
|
||||
if (hasFP(MF)) {
|
||||
// If we have a frame pointer, turn the adjcallstackup instruction into a
|
||||
// 'sub SP, <amt>' and the adjcallstackdown instruction into 'add SP,
|
||||
@ -137,11 +137,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
|
||||
MachineInstr *New;
|
||||
if (Old->getOpcode() == IA64::ADJUSTCALLSTACKDOWN) {
|
||||
New=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
|
||||
New=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
|
||||
.addImm(-Amount);
|
||||
} else {
|
||||
assert(Old->getOpcode() == IA64::ADJUSTCALLSTACKUP);
|
||||
New=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
|
||||
New=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
|
||||
.addImm(Amount);
|
||||
}
|
||||
|
||||
@ -158,6 +158,7 @@ void IA64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II)const{
|
||||
MachineInstr &MI = *II;
|
||||
MachineBasicBlock &MBB = *MI.getParent();
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
|
||||
|
||||
bool FP = hasFP(MF);
|
||||
|
||||
@ -186,16 +187,16 @@ void IA64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II)const{
|
||||
// Fix up the old:
|
||||
MI.getOperand(i).ChangeToRegister(IA64::r22, false);
|
||||
//insert the new
|
||||
MachineInstr* nMI=BuildMI(IA64::ADDIMM22, 2, IA64::r22)
|
||||
MachineInstr* nMI=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r22)
|
||||
.addReg(BaseRegister).addImm(Offset);
|
||||
MBB.insert(II, nMI);
|
||||
} else { // it's big
|
||||
//fix up the old:
|
||||
MI.getOperand(i).ChangeToRegister(IA64::r22, false);
|
||||
MachineInstr* nMI;
|
||||
nMI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(Offset);
|
||||
nMI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(Offset);
|
||||
MBB.insert(II, nMI);
|
||||
nMI=BuildMI(IA64::ADD, 2, IA64::r22).addReg(BaseRegister)
|
||||
nMI=BuildMI(TII, IA64::ADD, 2, IA64::r22).addReg(BaseRegister)
|
||||
.addReg(IA64::r22);
|
||||
MBB.insert(II, nMI);
|
||||
}
|
||||
@ -206,6 +207,7 @@ void IA64RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
|
||||
MachineBasicBlock::iterator MBBI = MBB.begin();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
|
||||
MachineInstr *MI;
|
||||
bool FP = hasFP(MF);
|
||||
|
||||
@ -250,7 +252,7 @@ void IA64RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
}
|
||||
}
|
||||
|
||||
MI=BuildMI(IA64::ALLOC,5).addReg(dstRegOfPseudoAlloc).addImm(0).\
|
||||
MI=BuildMI(TII, IA64::ALLOC,5).addReg(dstRegOfPseudoAlloc).addImm(0). \
|
||||
addImm(numStackedGPRsUsed).addImm(numOutRegsUsed).addImm(0);
|
||||
MBB.insert(MBBI, MI);
|
||||
|
||||
@ -282,22 +284,23 @@ void IA64RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
|
||||
// adjust stack pointer: r12 -= numbytes
|
||||
if (NumBytes <= 8191) {
|
||||
MI=BuildMI(IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).addImm(-NumBytes);
|
||||
MI=BuildMI(TII, IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).
|
||||
addImm(-NumBytes);
|
||||
MBB.insert(MBBI, MI);
|
||||
} else { // we use r22 as a scratch register here
|
||||
MI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(-NumBytes);
|
||||
MI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(-NumBytes);
|
||||
// FIXME: MOVLSI32 expects a _u_32imm
|
||||
MBB.insert(MBBI, MI); // first load the decrement into r22
|
||||
MI=BuildMI(IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
|
||||
MI=BuildMI(TII,IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
|
||||
MBB.insert(MBBI, MI); // then add (subtract) it to r12 (stack ptr)
|
||||
}
|
||||
|
||||
// now if we need to, save the old FP and set the new
|
||||
if (FP) {
|
||||
MI = BuildMI(IA64::ST8, 2).addReg(IA64::r12).addReg(IA64::r5);
|
||||
MI = BuildMI(TII, IA64::ST8, 2).addReg(IA64::r12).addReg(IA64::r5);
|
||||
MBB.insert(MBBI, MI);
|
||||
// this must be the last instr in the prolog ? (XXX: why??)
|
||||
MI = BuildMI(IA64::MOV, 1, IA64::r5).addReg(IA64::r12);
|
||||
MI = BuildMI(TII, IA64::MOV, 1, IA64::r5).addReg(IA64::r12);
|
||||
MBB.insert(MBBI, MI);
|
||||
}
|
||||
|
||||
@ -306,6 +309,7 @@ void IA64RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
void IA64RegisterInfo::emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
|
||||
MachineBasicBlock::iterator MBBI = prior(MBB.end());
|
||||
MachineInstr *MI;
|
||||
assert(MBBI->getOpcode() == IA64::RET &&
|
||||
@ -320,22 +324,24 @@ void IA64RegisterInfo::emitEpilogue(MachineFunction &MF,
|
||||
if (FP)
|
||||
{
|
||||
//copy the FP into the SP (discards allocas)
|
||||
MI=BuildMI(IA64::MOV, 1, IA64::r12).addReg(IA64::r5);
|
||||
MI=BuildMI(TII, IA64::MOV, 1, IA64::r12).addReg(IA64::r5);
|
||||
MBB.insert(MBBI, MI);
|
||||
//restore the FP
|
||||
MI=BuildMI(IA64::LD8, 1, IA64::r5).addReg(IA64::r5);
|
||||
MI=BuildMI(TII, IA64::LD8, 1, IA64::r5).addReg(IA64::r5);
|
||||
MBB.insert(MBBI, MI);
|
||||
}
|
||||
|
||||
if (NumBytes != 0)
|
||||
{
|
||||
if (NumBytes <= 8191) {
|
||||
MI=BuildMI(IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).addImm(NumBytes);
|
||||
MI=BuildMI(TII, IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).
|
||||
addImm(NumBytes);
|
||||
MBB.insert(MBBI, MI);
|
||||
} else {
|
||||
MI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(NumBytes);
|
||||
MI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(NumBytes);
|
||||
MBB.insert(MBBI, MI);
|
||||
MI=BuildMI(IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
|
||||
MI=BuildMI(TII, IA64::ADD, 2, IA64::r12).addReg(IA64::r12).
|
||||
addReg(IA64::r22);
|
||||
MBB.insert(MBBI, MI);
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ using namespace llvm;
|
||||
|
||||
PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
|
||||
: TargetInstrInfo(PPCInsts, sizeof(PPCInsts)/sizeof(PPCInsts[0])), TM(tm),
|
||||
RI(*TM.getSubtargetImpl()) {}
|
||||
RI(*TM.getSubtargetImpl(), *this) {}
|
||||
|
||||
/// getPointerRegClass - Return the register class to use to hold pointers.
|
||||
/// This is used for addressing modes.
|
||||
|
@ -80,9 +80,10 @@ unsigned PPCRegisterInfo::getRegisterNumbering(unsigned RegEnum) {
|
||||
}
|
||||
}
|
||||
|
||||
PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST)
|
||||
PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
|
||||
const TargetInstrInfo &tii)
|
||||
: PPCGenRegisterInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
|
||||
Subtarget(ST) {
|
||||
Subtarget(ST), TII(tii) {
|
||||
ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
|
||||
ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
|
||||
ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
|
||||
@ -322,39 +323,39 @@ MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
return addFrameReference(BuildMI(PPC::STW,
|
||||
return addFrameReference(BuildMI(TII, PPC::STW,
|
||||
3).addReg(InReg), FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
return addFrameReference(BuildMI(PPC::LWZ, 2, OutReg), FrameIndex);
|
||||
return addFrameReference(BuildMI(TII, PPC::LWZ, 2, OutReg), FrameIndex);
|
||||
}
|
||||
} else if ((Opc == PPC::OR8 &&
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
return addFrameReference(BuildMI(PPC::STD,
|
||||
return addFrameReference(BuildMI(TII, PPC::STD,
|
||||
3).addReg(InReg), FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
return addFrameReference(BuildMI(PPC::LD, 2, OutReg), FrameIndex);
|
||||
return addFrameReference(BuildMI(TII, PPC::LD, 2, OutReg), FrameIndex);
|
||||
}
|
||||
} else if (Opc == PPC::FMRD) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
return addFrameReference(BuildMI(PPC::STFD,
|
||||
return addFrameReference(BuildMI(TII, PPC::STFD,
|
||||
3).addReg(InReg), FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
return addFrameReference(BuildMI(PPC::LFD, 2, OutReg), FrameIndex);
|
||||
return addFrameReference(BuildMI(TII, PPC::LFD, 2, OutReg), FrameIndex);
|
||||
}
|
||||
} else if (Opc == PPC::FMRS) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
return addFrameReference(BuildMI(PPC::STFS,
|
||||
return addFrameReference(BuildMI(TII, PPC::STFS,
|
||||
3).addReg(InReg), FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
return addFrameReference(BuildMI(PPC::LFS, 2, OutReg), FrameIndex);
|
||||
return addFrameReference(BuildMI(TII, PPC::LFS, 2, OutReg), FrameIndex);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -25,8 +25,9 @@ class Type;
|
||||
class PPCRegisterInfo : public PPCGenRegisterInfo {
|
||||
std::map<unsigned, unsigned> ImmToIdxMap;
|
||||
const PPCSubtarget &Subtarget;
|
||||
const TargetInstrInfo &TII;
|
||||
public:
|
||||
PPCRegisterInfo(const PPCSubtarget &SubTarget);
|
||||
PPCRegisterInfo(const PPCSubtarget &SubTarget, const TargetInstrInfo &tii);
|
||||
|
||||
/// getRegisterNumbering - Given the enum value for some register, e.g.
|
||||
/// PPC::F14, return the number that it corresponds to (e.g. 14).
|
||||
|
@ -19,7 +19,7 @@ using namespace llvm;
|
||||
|
||||
SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
|
||||
: TargetInstrInfo(SparcInsts, sizeof(SparcInsts)/sizeof(SparcInsts[0])),
|
||||
RI(ST) {
|
||||
RI(ST, *this) {
|
||||
}
|
||||
|
||||
static bool isZeroImm(const MachineOperand &op) {
|
||||
|
@ -23,9 +23,10 @@
|
||||
#include <iostream>
|
||||
using namespace llvm;
|
||||
|
||||
SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st)
|
||||
SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st,
|
||||
const TargetInstrInfo &tii)
|
||||
: SparcGenRegisterInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP),
|
||||
Subtarget(st) {
|
||||
Subtarget(st), TII(tii) {
|
||||
}
|
||||
|
||||
void SparcRegisterInfo::
|
||||
@ -81,10 +82,10 @@ MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
|
||||
if (MI->getOperand(1).isRegister() && MI->getOperand(1).getReg() == SP::G0&&
|
||||
MI->getOperand(0).isRegister() && MI->getOperand(2).isRegister()) {
|
||||
if (OpNum == 0) // COPY -> STORE
|
||||
return BuildMI(SP::STri, 3).addFrameIndex(FI).addImm(0)
|
||||
return BuildMI(TII, SP::STri, 3).addFrameIndex(FI).addImm(0)
|
||||
.addReg(MI->getOperand(2).getReg());
|
||||
else // COPY -> LOAD
|
||||
return BuildMI(SP::LDri, 2, MI->getOperand(0).getReg())
|
||||
return BuildMI(TII, SP::LDri, 2, MI->getOperand(0).getReg())
|
||||
.addFrameIndex(FI).addImm(0);
|
||||
}
|
||||
break;
|
||||
@ -93,10 +94,10 @@ MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
|
||||
// FALLTHROUGH
|
||||
case SP::FMOVD:
|
||||
if (OpNum == 0) // COPY -> STORE
|
||||
return BuildMI(isFloat ? SP::STFri : SP::STDFri, 3)
|
||||
return BuildMI(TII, isFloat ? SP::STFri : SP::STDFri, 3)
|
||||
.addFrameIndex(FI).addImm(0).addReg(MI->getOperand(1).getReg());
|
||||
else // COPY -> LOAD
|
||||
return BuildMI(isFloat ? SP::LDFri : SP::LDDFri, 2,
|
||||
return BuildMI(TII, isFloat ? SP::LDFri : SP::LDDFri, 2,
|
||||
MI->getOperand(0).getReg()).addFrameIndex(FI).addImm(0);
|
||||
break;
|
||||
}
|
||||
|
@ -24,8 +24,9 @@ class Type;
|
||||
|
||||
struct SparcRegisterInfo : public SparcGenRegisterInfo {
|
||||
SparcSubtarget &Subtarget;
|
||||
const TargetInstrInfo &TII;
|
||||
|
||||
SparcRegisterInfo(SparcSubtarget &st);
|
||||
SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/ADT/DepthFirstIterator.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include <algorithm>
|
||||
@ -213,20 +214,12 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
|
||||
|
||||
// Get dead variables list now because the MI pointer may be deleted as part
|
||||
// of processing!
|
||||
LiveVariables::killed_iterator IB, IE;
|
||||
tie(IB, IE) = LV->dead_range(MI);
|
||||
|
||||
DEBUG(
|
||||
const MRegisterInfo *MRI = MF.getTarget().getRegisterInfo();
|
||||
LiveVariables::killed_iterator I = LV->killed_begin(MI);
|
||||
LiveVariables::killed_iterator E = LV->killed_end(MI);
|
||||
if (I != E) {
|
||||
std::cerr << "Killed Operands:";
|
||||
for (; I != E; ++I)
|
||||
std::cerr << " %" << MRI->getName(*I);
|
||||
std::cerr << "\n";
|
||||
}
|
||||
);
|
||||
SmallVector<unsigned, 8> DeadRegs;
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
if (MO.isReg() && MO.isDead())
|
||||
DeadRegs.push_back(MO.getReg());
|
||||
}
|
||||
|
||||
switch (Flags & X86II::FPTypeMask) {
|
||||
case X86II::ZeroArgFP: handleZeroArgFP(I); break;
|
||||
@ -241,8 +234,8 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
|
||||
|
||||
// Check to see if any of the values defined by this instruction are dead
|
||||
// after definition. If so, pop them.
|
||||
for (; IB != IE; ++IB) {
|
||||
unsigned Reg = *IB;
|
||||
for (unsigned i = 0, e = DeadRegs.size(); i != e; ++i) {
|
||||
unsigned Reg = DeadRegs[i];
|
||||
if (Reg >= X86::FP0 && Reg <= X86::FP6) {
|
||||
DEBUG(std::cerr << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
|
||||
freeStackSlotAfter(I, Reg-X86::FP0);
|
||||
@ -762,6 +755,7 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
|
||||
|
||||
unsigned Op0 = getFPReg(MI->getOperand(0));
|
||||
unsigned Op1 = getFPReg(MI->getOperand(2));
|
||||
bool KillsOp1 = LV->KillsRegister(MI, X86::FP0+Op1);
|
||||
|
||||
// The first operand *must* be on the top of the stack.
|
||||
moveToTop(Op0, I);
|
||||
@ -773,9 +767,8 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
|
||||
MI->getOperand(0).setReg(getSTReg(Op1));
|
||||
MI->setOpcode(getConcreteOpcode(MI->getOpcode()));
|
||||
|
||||
|
||||
// If we kill the second operand, make sure to pop it from the stack.
|
||||
if (Op0 != Op1 && LV->KillsRegister(MI, X86::FP0+Op1)) {
|
||||
if (Op0 != Op1 && KillsOp1) {
|
||||
// Get this value off of the register stack.
|
||||
freeStackSlotAfter(I, Op1);
|
||||
}
|
||||
|
@ -525,8 +525,7 @@ void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
|
||||
|
||||
// Finally, if we found any FP code, emit the FP_REG_KILL instruction.
|
||||
if (ContainsFPCode) {
|
||||
BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0).
|
||||
addImplicitDefsUses();
|
||||
BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
|
||||
++NumFPKill;
|
||||
}
|
||||
}
|
||||
@ -537,8 +536,7 @@ void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
|
||||
void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
|
||||
MachineFrameInfo *MFI) {
|
||||
if (Subtarget->isTargetCygwin())
|
||||
BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("__main").
|
||||
addImplicitDefsUses();
|
||||
BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("__main");
|
||||
|
||||
// Switch the FPU to 64-bit precision mode for better compatibility and speed.
|
||||
int CWFrameIdx = MFI->CreateStackObject(2, 2);
|
||||
@ -949,8 +947,7 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
|
||||
// type of register here.
|
||||
GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
|
||||
BuildMI(FirstMBB, MBBI, X86::MovePCtoStack, 0);
|
||||
BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg).
|
||||
addImplicitDefsUses();
|
||||
BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg);
|
||||
}
|
||||
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
|
||||
}
|
||||
|
@ -5076,7 +5076,7 @@ X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
|
||||
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
|
||||
unsigned Opc =
|
||||
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
|
||||
BuildMI(BB, Opc, 1).addMBB(sinkMBB).addImplicitDefsUses();
|
||||
BuildMI(BB, Opc, 1).addMBB(sinkMBB);
|
||||
MachineFunction *F = BB->getParent();
|
||||
F->getBasicBlockList().insert(It, copy0MBB);
|
||||
F->getBasicBlockList().insert(It, sinkMBB);
|
||||
|
@ -140,7 +140,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr *MI) const {
|
||||
unsigned C = MI->getOperand(2).getReg();
|
||||
unsigned M = MI->getOperand(3).getImmedValue();
|
||||
if (!Subtarget->hasSSE2() || B != C) return 0;
|
||||
return BuildMI(X86::PSHUFDri, 2, A).addReg(B).addImm(M);
|
||||
return BuildMI(*this, X86::PSHUFDri, 2, A).addReg(B).addImm(M);
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,35 +157,35 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr *MI) const {
|
||||
case X86::INC32r:
|
||||
case X86::INC64_32r:
|
||||
assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
|
||||
return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, 1);
|
||||
return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src, 1);
|
||||
case X86::INC16r:
|
||||
case X86::INC64_16r:
|
||||
if (DisableLEA16) return 0;
|
||||
assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
|
||||
return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, 1);
|
||||
return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src, 1);
|
||||
case X86::DEC32r:
|
||||
case X86::DEC64_32r:
|
||||
assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
|
||||
return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, -1);
|
||||
return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src, -1);
|
||||
case X86::DEC16r:
|
||||
case X86::DEC64_16r:
|
||||
if (DisableLEA16) return 0;
|
||||
assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
|
||||
return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, -1);
|
||||
return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src, -1);
|
||||
case X86::ADD32rr:
|
||||
assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
|
||||
return addRegReg(BuildMI(X86::LEA32r, 5, Dest), Src,
|
||||
return addRegReg(BuildMI(*this, X86::LEA32r, 5, Dest), Src,
|
||||
MI->getOperand(2).getReg());
|
||||
case X86::ADD16rr:
|
||||
if (DisableLEA16) return 0;
|
||||
assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
|
||||
return addRegReg(BuildMI(X86::LEA16r, 5, Dest), Src,
|
||||
return addRegReg(BuildMI(*this, X86::LEA16r, 5, Dest), Src,
|
||||
MI->getOperand(2).getReg());
|
||||
case X86::ADD32ri:
|
||||
case X86::ADD32ri8:
|
||||
assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
|
||||
if (MI->getOperand(2).isImmediate())
|
||||
return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src,
|
||||
return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src,
|
||||
MI->getOperand(2).getImmedValue());
|
||||
return 0;
|
||||
case X86::ADD16ri:
|
||||
@ -193,7 +193,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr *MI) const {
|
||||
if (DisableLEA16) return 0;
|
||||
assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
|
||||
if (MI->getOperand(2).isImmediate())
|
||||
return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src,
|
||||
return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src,
|
||||
MI->getOperand(2).getImmedValue());
|
||||
break;
|
||||
|
||||
@ -208,7 +208,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr *MI) const {
|
||||
AM.Scale = 1 << ShAmt;
|
||||
AM.IndexReg = Src;
|
||||
unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r;
|
||||
return addFullAddress(BuildMI(Opc, 5, Dest), AM);
|
||||
return addFullAddress(BuildMI(*this, Opc, 5, Dest), AM);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -239,7 +239,7 @@ MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
|
||||
unsigned A = MI->getOperand(0).getReg();
|
||||
unsigned B = MI->getOperand(1).getReg();
|
||||
unsigned C = MI->getOperand(2).getReg();
|
||||
return BuildMI(Opc, 3, A).addReg(C).addReg(B).addImm(Size-Amt);
|
||||
return BuildMI(*this, Opc, 3, A).addReg(C).addReg(B).addImm(Size-Amt);
|
||||
}
|
||||
default:
|
||||
return TargetInstrInfo::commuteInstruction(MI);
|
||||
|
@ -165,7 +165,7 @@ static MachineInstr *FuseTwoAddrInst(unsigned Opcode, unsigned FrameIndex,
|
||||
const TargetInstrInfo &TII) {
|
||||
unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2;
|
||||
// Create the base instruction with the memory operand as the first part.
|
||||
MachineInstrBuilder MIB = addFrameReference(BuildMI(Opcode, 4+NumOps),
|
||||
MachineInstrBuilder MIB = addFrameReference(BuildMI(TII, Opcode, 4+NumOps),
|
||||
FrameIndex);
|
||||
|
||||
// Loop over the rest of the ri operands, converting them over.
|
||||
@ -188,7 +188,7 @@ static MachineInstr *FuseTwoAddrInst(unsigned Opcode, unsigned FrameIndex,
|
||||
static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
|
||||
unsigned FrameIndex, MachineInstr *MI,
|
||||
const TargetInstrInfo &TII) {
|
||||
MachineInstrBuilder MIB = BuildMI(Opcode, MI->getNumOperands()+3);
|
||||
MachineInstrBuilder MIB = BuildMI(TII, Opcode, MI->getNumOperands()+3);
|
||||
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
MachineOperand &MO = MI->getOperand(i);
|
||||
@ -209,9 +209,10 @@ static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
|
||||
return MIB;
|
||||
}
|
||||
|
||||
static MachineInstr *MakeM0Inst(unsigned Opcode, unsigned FrameIndex,
|
||||
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII,
|
||||
unsigned Opcode, unsigned FrameIndex,
|
||||
MachineInstr *MI) {
|
||||
return addFrameReference(BuildMI(Opcode, 5), FrameIndex).addImm(0);
|
||||
return addFrameReference(BuildMI(TII, Opcode, 5), FrameIndex).addImm(0);
|
||||
}
|
||||
|
||||
|
||||
@ -464,13 +465,13 @@ MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
|
||||
isTwoAddrFold = true;
|
||||
} else if (i == 0) { // If operand 0
|
||||
if (MI->getOpcode() == X86::MOV16r0)
|
||||
return MakeM0Inst(X86::MOV16mi, FrameIndex, MI);
|
||||
return MakeM0Inst(TII, X86::MOV16mi, FrameIndex, MI);
|
||||
else if (MI->getOpcode() == X86::MOV32r0)
|
||||
return MakeM0Inst(X86::MOV32mi, FrameIndex, MI);
|
||||
return MakeM0Inst(TII, X86::MOV32mi, FrameIndex, MI);
|
||||
else if (MI->getOpcode() == X86::MOV64r0)
|
||||
return MakeM0Inst(X86::MOV64mi32, FrameIndex, MI);
|
||||
return MakeM0Inst(TII, X86::MOV64mi32, FrameIndex, MI);
|
||||
else if (MI->getOpcode() == X86::MOV8r0)
|
||||
return MakeM0Inst(X86::MOV8mi, FrameIndex, MI);
|
||||
return MakeM0Inst(TII, X86::MOV8mi, FrameIndex, MI);
|
||||
|
||||
static const TableEntry OpcodeTable[] = {
|
||||
{ X86::CMP16ri, X86::CMP16mi },
|
||||
@ -899,7 +900,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
|
||||
MachineInstr *New = 0;
|
||||
if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
|
||||
New=BuildMI(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri, 1, StackPtr)
|
||||
New=BuildMI(TII, Is64Bit ? X86::SUB64ri32 : X86::SUB32ri, 1, StackPtr)
|
||||
.addReg(StackPtr).addImm(Amount);
|
||||
} else {
|
||||
assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
|
||||
@ -910,7 +911,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
unsigned Opc = (Amount < 128) ?
|
||||
(Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
|
||||
(Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
|
||||
New = BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(Amount);
|
||||
New = BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(Amount);
|
||||
}
|
||||
}
|
||||
|
||||
@ -926,7 +927,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
(Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
|
||||
(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
|
||||
MachineInstr *New =
|
||||
BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(CalleeAmt);
|
||||
BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(CalleeAmt);
|
||||
MBB.insert(I, New);
|
||||
}
|
||||
}
|
||||
@ -1003,15 +1004,15 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
// more than 4k bytes in one go. Touching the stack at 4K increments is
|
||||
// necessary to ensure that the guard pages used by the OS virtual memory
|
||||
// manager are allocated in correct sequence.
|
||||
MI = BuildMI(X86::MOV32ri, 2, X86::EAX).addImm(NumBytes);
|
||||
MI = BuildMI(TII, X86::MOV32ri, 2, X86::EAX).addImm(NumBytes);
|
||||
MBB.insert(MBBI, MI);
|
||||
MI = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
|
||||
MI = BuildMI(TII, X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
|
||||
MBB.insert(MBBI, MI);
|
||||
} else {
|
||||
unsigned Opc = (NumBytes < 128) ?
|
||||
(Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
|
||||
(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
|
||||
MI= BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(NumBytes);
|
||||
MI= BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(NumBytes);
|
||||
MBB.insert(MBBI, MI);
|
||||
}
|
||||
}
|
||||
@ -1023,16 +1024,16 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
|
||||
// Save EBP into the appropriate stack slot...
|
||||
// mov [ESP-<offset>], EBP
|
||||
MI = addRegOffset(BuildMI(Is64Bit ? X86::MOV64mr : X86::MOV32mr, 5),
|
||||
MI = addRegOffset(BuildMI(TII, Is64Bit ? X86::MOV64mr : X86::MOV32mr, 5),
|
||||
StackPtr, EBPOffset+NumBytes).addReg(FramePtr);
|
||||
MBB.insert(MBBI, MI);
|
||||
|
||||
// Update EBP with the new base value...
|
||||
if (NumBytes == SlotSize) // mov EBP, ESP
|
||||
MI = BuildMI(Is64Bit ? X86::MOV64rr : X86::MOV32rr, 2, FramePtr).
|
||||
MI = BuildMI(TII, Is64Bit ? X86::MOV64rr : X86::MOV32rr, 2, FramePtr).
|
||||
addReg(StackPtr);
|
||||
else // lea EBP, [ESP+StackSize]
|
||||
MI = addRegOffset(BuildMI(Is64Bit ? X86::LEA64r : X86::LEA32r,
|
||||
MI = addRegOffset(BuildMI(TII, Is64Bit ? X86::LEA64r : X86::LEA32r,
|
||||
5, FramePtr), StackPtr, NumBytes-SlotSize);
|
||||
|
||||
MBB.insert(MBBI, MI);
|
||||
@ -1041,13 +1042,13 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
// If it's main() on Cygwin\Mingw32 we should align stack as well
|
||||
if (Fn->hasExternalLinkage() && Fn->getName() == "main" &&
|
||||
Subtarget->isTargetCygwin()) {
|
||||
MI = BuildMI(X86::AND32ri, 2, X86::ESP).addReg(X86::ESP).addImm(-Align);
|
||||
MI= BuildMI(TII, X86::AND32ri, 2, X86::ESP).addReg(X86::ESP).addImm(-Align);
|
||||
MBB.insert(MBBI, MI);
|
||||
|
||||
// Probe the stack
|
||||
MI = BuildMI(X86::MOV32ri, 2, X86::EAX).addImm(Align);
|
||||
MI = BuildMI(TII, X86::MOV32ri, 2, X86::EAX).addImm(Align);
|
||||
MBB.insert(MBBI, MI);
|
||||
MI = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
|
||||
MI = BuildMI(TII, X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
|
||||
MBB.insert(MBBI, MI);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user