Split x86's ADJCALLSTACK instructions into 32-bit and 64-bit forms.

This allows the 64-bit forms to use+def RSP instead of ESP. This
doesn't fix any real bugs today, but it is more precise and it
makes the debug dumps on x86-64 look more consistent.

Also, add some comments describing the CALL instructions' physreg
operand uses and defs.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@56925 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2008-10-01 18:28:06 +00:00
parent efd30ba798
commit 6d4b052579
4 changed files with 45 additions and 14 deletions

View File

@ -960,7 +960,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
unsigned NumBytes = CCInfo.getNextStackOffset();
// Issue CALLSEQ_START
BuildMI(MBB, TII.get(X86::ADJCALLSTACKDOWN)).addImm(NumBytes);
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
BuildMI(MBB, TII.get(AdjStackDown)).addImm(NumBytes);
// Process argumenet: walk the register/memloc assignments, inserting
// copies / loads.
@ -1051,7 +1052,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
// Issue CALLSEQ_END
BuildMI(MBB, TII.get(X86::ADJCALLSTACKUP)).addImm(NumBytes).addImm(0);
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
BuildMI(MBB, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
if (RetVT.getSimpleVT() != MVT::isVoid) {

View File

@ -86,11 +86,30 @@ def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
// Instruction list...
//
// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
// a stack adjustment and the codegen must know that they may modify the stack
// pointer before prolog-epilog rewriting occurs.
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [RSP, EFLAGS], Uses = [RSP] in {
def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
"#ADJCALLSTACKDOWN",
[(X86callseq_start imm:$amt)]>,
Requires<[In64BitMode]>;
def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end imm:$amt1, imm:$amt2)]>,
Requires<[In64BitMode]>;
}
//===----------------------------------------------------------------------===//
// Call Instructions...
//
let isCall = 1 in
// All calls clobber the non-callee saved registers...
// All calls clobber the non-callee saved registers. RSP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
// registers are added manually.
let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,

View File

@ -321,12 +321,14 @@ def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [ESP, EFLAGS], Uses = [ESP] in {
def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt),
"#ADJCALLSTACKDOWN",
[(X86callseq_start imm:$amt)]>;
def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end imm:$amt1, imm:$amt2)]>;
def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
"#ADJCALLSTACKDOWN",
[(X86callseq_start imm:$amt)]>,
Requires<[In32BitMode]>;
def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
"#ADJCALLSTACKUP",
[(X86callseq_end imm:$amt1, imm:$amt2)]>,
Requires<[In32BitMode]>;
}
// Nop
@ -411,7 +413,10 @@ def JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst",
// Call Instructions...
//
let isCall = 1 in
// All calls clobber the non-callee saved registers...
// All calls clobber the non-callee saved registers. ESP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
// registers are added manually.
let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS],

View File

@ -42,7 +42,12 @@ using namespace llvm;
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
: X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP),
: X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
X86::ADJCALLSTACKDOWN64 :
X86::ADJCALLSTACKDOWN32,
tm.getSubtarget<X86Subtarget>().is64Bit() ?
X86::ADJCALLSTACKUP64 :
X86::ADJCALLSTACKUP32),
TM(tm), TII(tii) {
// Cache some information.
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
@ -367,11 +372,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
MachineInstr *New = 0;
if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
if (Old->getOpcode() == getCallFrameSetupOpcode()) {
New = BuildMI(MF, TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
StackPtr).addReg(StackPtr).addImm(Amount);
} else {
assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
assert(Old->getOpcode() == getCallFrameDestroyOpcode());
// factor out the amount the callee already popped.
uint64_t CalleeAmt = Old->getOperand(1).getImm();
Amount -= CalleeAmt;
@ -387,7 +392,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// Replace the pseudo instruction with a new instruction...
if (New) MBB.insert(I, New);
}
} else if (I->getOpcode() == X86::ADJCALLSTACKUP) {
} else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.