Check for stack space more intelligently.

libgcc sets the stack limit field in TCB to 256 bytes above the actual
allocated stack limit.  This means if the function's stack frame needs
less than 256 bytes, we can just compare the stack pointer with the
stack limit.  This should result in lesser calls to __morestack.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145766 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjoy Das 2011-12-03 09:32:07 +00:00
parent 40f8222e1e
commit 199ce33b3b
2 changed files with 43 additions and 13 deletions

View File

@ -1306,6 +1306,10 @@ GetScratchRegister(bool Is64Bit, const MachineFunction &MF) {
}
}
// The stack limit in the TCB is set to this many bytes above the actual stack
// limit.
static const uint64_t kSplitStackAvailable = 256;
void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front();
@ -1360,16 +1364,24 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
TlsReg = X86::FS;
TlsOffset = 0x70;
BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
.addImm(0).addReg(0).addImm(-StackSize).addReg(0);
if (StackSize < kSplitStackAvailable)
ScratchReg = X86::RSP;
else
BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
.addImm(0).addReg(0).addImm(-StackSize).addReg(0);
BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else {
TlsReg = X86::GS;
TlsOffset = 0x30;
BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
.addImm(0).addReg(0).addImm(-StackSize).addReg(0);
if (StackSize < kSplitStackAvailable)
ScratchReg = X86::ESP;
else
BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
.addImm(0).addReg(0).addImm(-StackSize).addReg(0);
BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
.addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
}

View File

@ -20,8 +20,7 @@ false:
; X32: test_basic:
; X32: leal -12(%esp), %ecx
; X32-NEXT: cmpl %gs:48, %ecx
; X32: cmpl %gs:48, %esp
; X32: pushl $4
; X32-NEXT: pushl $12
@ -41,8 +40,7 @@ false:
; X64: test_basic:
; X64: leaq -24(%rsp), %r11
; X64-NEXT: cmpq %fs:112, %r11
; X64: cmpq %fs:112, %rsp
; X64: movabsq $24, %r10
; X64-NEXT: movabsq $0, %r11
@ -66,17 +64,14 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
%result = add i32 %other, %addend
ret i32 %result
; X32: leal (%esp), %edx
; X32-NEXT: cmpl %gs:48, %edx
; X32: cmpl %gs:48, %esp
; X32: pushl $4
; X32-NEXT: pushl $0
; X32-NEXT: calll __morestack
; X32-NEXT: ret
; X64: leaq (%rsp), %r11
; X64-NEXT: cmpq %fs:112, %r11
; X64: cmpq %fs:112, %rsp
; X64: movq %r10, %rax
; X64-NEXT: movabsq $0, %r10
@ -86,3 +81,26 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) {
; X64-NEXT: movq %rax, %r10
}
define void @test_large() {
%mem = alloca i32, i32 10000
call void @dummy_use (i32* %mem, i32 0)
ret void
; X32: leal -40012(%esp), %ecx
; X32-NEXT: cmpl %gs:48, %ecx
; X32: pushl $0
; X32-NEXT: pushl $40012
; X32-NEXT: calll __morestack
; X32-NEXT: ret
; X64: leaq -40008(%rsp), %r11
; X64-NEXT: cmpq %fs:112, %r11
; X64: movabsq $40008, %r10
; X64-NEXT: movabsq $0, %r11
; X64-NEXT: callq __morestack
; X64-NEXT: ret
}