mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Simplify X86CompilationCallback: always align to 16-byte boundary; don't save EAX/EDX if unnecessary.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@28910 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
89d67faf30
commit
da08d2c39a
@ -762,26 +762,6 @@ SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG) {
|
||||
// (when we have a global fp allocator) and do other tricks.
|
||||
//
|
||||
|
||||
// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
|
||||
// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
|
||||
// EDX". Anything more is illegal.
|
||||
//
|
||||
// FIXME: The linscan register allocator currently has problem with
|
||||
// coalescing. At the time of this writing, whenever it decides to coalesce
|
||||
// a physreg with a virtreg, this increases the size of the physreg's live
|
||||
// range, and the live range cannot ever be reduced. This causes problems if
|
||||
// too many physregs are coaleced with virtregs, which can cause the register
|
||||
// allocator to wedge itself.
|
||||
//
|
||||
// This code triggers this problem more often if we pass args in registers,
|
||||
// so disable it until this is fixed.
|
||||
//
|
||||
// NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
|
||||
// about code being dead.
|
||||
//
|
||||
static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
|
||||
|
||||
|
||||
/// HowToPassFastCCArgument - Returns how an formal argument of the specified
|
||||
/// type should be passed. If it is through stack, returns the size of the stack
|
||||
/// slot; if it is through integer or XMM register, returns the number of
|
||||
@ -798,30 +778,38 @@ HowToPassFastCCArgument(MVT::ValueType ObjectVT,
|
||||
switch (ObjectVT) {
|
||||
default: assert(0 && "Unhandled argument type!");
|
||||
case MVT::i8:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
|
||||
ObjIntRegs = 1;
|
||||
else
|
||||
#endif
|
||||
ObjSize = 1;
|
||||
break;
|
||||
case MVT::i16:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
|
||||
ObjIntRegs = 1;
|
||||
else
|
||||
#endif
|
||||
ObjSize = 2;
|
||||
break;
|
||||
case MVT::i32:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
|
||||
ObjIntRegs = 1;
|
||||
else
|
||||
#endif
|
||||
ObjSize = 4;
|
||||
break;
|
||||
case MVT::i64:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
|
||||
ObjIntRegs = 2;
|
||||
} else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
|
||||
ObjIntRegs = 1;
|
||||
ObjSize = 4;
|
||||
} else
|
||||
#endif
|
||||
ObjSize = 8;
|
||||
case MVT::f32:
|
||||
ObjSize = 4;
|
||||
@ -1027,10 +1015,12 @@ SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG)
|
||||
case MVT::i8:
|
||||
case MVT::i16:
|
||||
case MVT::i32:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
|
||||
++NumIntRegs;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
// Fall through
|
||||
case MVT::f32:
|
||||
NumBytes += 4;
|
||||
@ -1076,6 +1066,7 @@ SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG)
|
||||
case MVT::i8:
|
||||
case MVT::i16:
|
||||
case MVT::i32:
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
|
||||
RegsToPass.push_back(
|
||||
std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
|
||||
@ -1083,6 +1074,7 @@ SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG)
|
||||
++NumIntRegs;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
// Fall through
|
||||
case MVT::f32: {
|
||||
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
|
||||
|
@ -370,4 +370,20 @@ namespace llvm {
|
||||
};
|
||||
}
|
||||
|
||||
// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
|
||||
// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
|
||||
// EDX". Anything more is illegal.
|
||||
//
|
||||
// FIXME: The linscan register allocator currently has problem with
|
||||
// coalescing. At the time of this writing, whenever it decides to coalesce
|
||||
// a physreg with a virtreg, this increases the size of the physreg's live
|
||||
// range, and the live range cannot ever be reduced. This causes problems if
|
||||
// too many physregs are coaleced with virtregs, which can cause the register
|
||||
// allocator to wedge itself.
|
||||
//
|
||||
// This code triggers this problem more often if we pass args in registers,
|
||||
// so disable it until this is fixed.
|
||||
//
|
||||
#define FASTCC_NUM_INT_ARGS_INREGS 0
|
||||
|
||||
#endif // X86ISELLOWERING_H
|
||||
|
@ -57,26 +57,28 @@ extern "C" {
|
||||
#endif
|
||||
"pushl %ebp\n"
|
||||
"movl %esp, %ebp\n" // Standard prologue
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
"pushl %eax\n"
|
||||
"pushl %edx\n" // save EAX/EDX
|
||||
#if defined(__CYGWIN__) || defined(__MINGW32__)
|
||||
"pushl %edx\n" // Save EAX/EDX
|
||||
#endif
|
||||
#if defined(__APPLE__)
|
||||
"andl $-16, %esp\n" // Align ESP on 16-byte boundary
|
||||
#endif
|
||||
#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(__APPLE__)
|
||||
"call _X86CompilationCallback2\n"
|
||||
#elif defined(__APPLE__)
|
||||
"movl 4(%ebp), %eax\n" // load the address of return address
|
||||
"movl $24, %edx\n" // if the opcode of the instruction at the
|
||||
"cmpb $-51, (%eax)\n" // return address is our 0xCD marker, then
|
||||
"movl $12, %eax\n" // subtract 24 from %esp to realign it to 16
|
||||
"cmovne %eax, %edx\n" // bytes after the push of edx, the amount to.
|
||||
"subl %edx, %esp\n" // the push of edx to keep it aligned.
|
||||
"pushl %edx\n" // subtract. Otherwise, subtract 12 bytes after
|
||||
"call _X86CompilationCallback2\n"
|
||||
"popl %edx\n"
|
||||
"addl %edx, %esp\n"
|
||||
#else
|
||||
"call X86CompilationCallback2\n"
|
||||
"call X86CompilationCallback2\n"
|
||||
#endif
|
||||
#if defined(__APPLE__)
|
||||
"movl %ebp, %esp\n" // Restore ESP
|
||||
#endif
|
||||
#if FASTCC_NUM_INT_ARGS_INREGS > 0
|
||||
#if defined(__APPLE__)
|
||||
"subl $8, %esp\n"
|
||||
#endif
|
||||
"popl %edx\n"
|
||||
"popl %eax\n"
|
||||
#endif
|
||||
"popl %ebp\n"
|
||||
"ret\n");
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user