mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 16:33:28 +00:00
musttail: Only set the inreg flag for fastcall and vectorcall
Otherwise we'll attempt to forward ECX, EDX, and EAX for cdecl and stdcall thunks, leaving us with no scratch registers for indirect call targets. Fixes PR22052. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225729 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
549b6dbbb7
commit
1ec250a32f
@ -181,15 +181,28 @@ void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
|
||||
}
|
||||
}
|
||||
|
||||
static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
|
||||
if (VT.isVector())
|
||||
return true; // Assume -msse-regparm might be in effect.
|
||||
if (!VT.isInteger())
|
||||
return false;
|
||||
if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
|
||||
MVT VT, CCAssignFn Fn) {
|
||||
unsigned SavedStackOffset = StackOffset;
|
||||
unsigned NumLocs = Locs.size();
|
||||
|
||||
// Allocate something of this value type repeatedly with just the inreg flag
|
||||
// set until we get assigned a location in memory.
|
||||
// Set the 'inreg' flag if it is used for this calling convention.
|
||||
ISD::ArgFlagsTy Flags;
|
||||
Flags.setInReg();
|
||||
if (isValueTypeInRegForCC(CallingConv, VT))
|
||||
Flags.setInReg();
|
||||
|
||||
// Allocate something of this value type repeatedly until we get assigned a
|
||||
// location in memory.
|
||||
bool HaveRegParm = true;
|
||||
while (HaveRegParm) {
|
||||
if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
|
||||
|
@ -1,5 +1,6 @@
|
||||
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
|
||||
; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
|
||||
; RUN: llc < %s -enable-tail-merge=0 -mtriple=i686-windows | FileCheck %s --check-prefix=X86
|
||||
|
||||
; Test that we actually spill and reload all arguments in the variadic argument
|
||||
; pack. Doing a normal call will clobber all argument registers, and we will
|
||||
@ -72,6 +73,12 @@ define void @f_thunk(i8* %this, ...) {
|
||||
; WINDOWS-NOT: mov{{.}}ps
|
||||
; WINDOWS: jmpq *{{.*}} # TAILCALL
|
||||
|
||||
; No regparms on normal x86 conventions.
|
||||
|
||||
; X86-LABEL: _f_thunk:
|
||||
; X86: calll _get_f
|
||||
; X86: jmpl *{{.*}} # TAILCALL
|
||||
|
||||
; This thunk shouldn't require any spills and reloads, assuming the register
|
||||
; allocator knows what it's doing.
|
||||
|
||||
@ -89,6 +96,9 @@ define void @g_thunk(i8* %fptr_i8, ...) {
|
||||
; WINDOWS-NOT: movq
|
||||
; WINDOWS: jmpq *%rcx # TAILCALL
|
||||
|
||||
; X86-LABEL: _g_thunk:
|
||||
; X86: jmpl *%eax # TAILCALL
|
||||
|
||||
; Do a simple multi-exit multi-bb test.
|
||||
|
||||
%struct.Foo = type { i1, i8*, i8* }
|
||||
@ -124,3 +134,7 @@ else:
|
||||
; WINDOWS: jne
|
||||
; WINDOWS: jmpq *{{.*}} # TAILCALL
|
||||
; WINDOWS: jmpq *{{.*}} # TAILCALL
|
||||
; X86-LABEL: _h_thunk:
|
||||
; X86: jne
|
||||
; X86: jmpl *{{.*}} # TAILCALL
|
||||
; X86: jmpl *{{.*}} # TAILCALL
|
||||
|
Loading…
x
Reference in New Issue
Block a user