From 199ce33b3bfa0b6293946c1b835da2a1fbc8cab4 Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Sat, 3 Dec 2011 09:32:07 +0000 Subject: [PATCH] Check for stack space more intelligently. libgcc sets the stack limit field in TCB to 256 bytes above the actual allocated stack limit. This means if the function's stack frame needs less than 256 bytes, we can just compare the stack pointer with the stack limit. This should result in lesser calls to __morestack. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145766 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86FrameLowering.cpp | 20 ++++++++++++---- test/CodeGen/X86/segmented-stacks.ll | 36 +++++++++++++++++++++------- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 182af84f7ca..27b762af69f 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -1306,6 +1306,10 @@ GetScratchRegister(bool Is64Bit, const MachineFunction &MF) { } } +// The stack limit in the TCB is set to this many bytes above the actual stack +// limit. +static const uint64_t kSplitStackAvailable = 256; + void X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { MachineBasicBlock &prologueMBB = MF.front(); @@ -1360,16 +1364,24 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { TlsReg = X86::FS; TlsOffset = 0x70; - BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) - .addImm(0).addReg(0).addImm(-StackSize).addReg(0); + if (StackSize < kSplitStackAvailable) + ScratchReg = X86::RSP; + else + BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP) + .addImm(0).addReg(0).addImm(-StackSize).addReg(0); + BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg) .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); } else { TlsReg = X86::GS; TlsOffset = 0x30; - BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) - .addImm(0).addReg(0).addImm(-StackSize).addReg(0); + if (StackSize < kSplitStackAvailable) + ScratchReg = X86::ESP; + else + BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP) + .addImm(0).addReg(0).addImm(-StackSize).addReg(0); + BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg) .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg); } diff --git a/test/CodeGen/X86/segmented-stacks.ll b/test/CodeGen/X86/segmented-stacks.ll index c28c31911d2..4f529c119f0 100644 --- a/test/CodeGen/X86/segmented-stacks.ll +++ b/test/CodeGen/X86/segmented-stacks.ll @@ -20,8 +20,7 @@ false: ; X32: test_basic: -; X32: leal -12(%esp), %ecx -; X32-NEXT: cmpl %gs:48, %ecx +; X32: cmpl %gs:48, %esp ; X32: pushl $4 ; X32-NEXT: pushl $12 @@ -41,8 +40,7 @@ false: ; X64: test_basic: -; X64: leaq -24(%rsp), %r11 -; X64-NEXT: cmpq %fs:112, %r11 +; X64: cmpq %fs:112, %rsp ; X64: movabsq $24, %r10 ; X64-NEXT: movabsq $0, %r11 @@ -66,17 +64,14 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) { %result = add i32 %other, %addend ret i32 %result -; X32: leal (%esp), %edx -; X32-NEXT: cmpl %gs:48, %edx - +; X32: cmpl %gs:48, %esp ; X32: pushl $4 ; X32-NEXT: pushl $0 ; X32-NEXT: calll __morestack ; X32-NEXT: ret -; X64: leaq (%rsp), %r11 -; X64-NEXT: cmpq %fs:112, %r11 +; X64: cmpq %fs:112, %rsp ; X64: movq %r10, %rax ; X64-NEXT: movabsq $0, %r10 @@ -86,3 +81,26 @@ define i32 @test_nested(i32 * nest %closure, i32 %other) { ; X64-NEXT: movq %rax, %r10 } + +define void @test_large() { + %mem = alloca i32, i32 10000 + call void @dummy_use (i32* %mem, i32 0) + ret void + +; X32: leal -40012(%esp), %ecx +; X32-NEXT: cmpl %gs:48, %ecx + +; X32: pushl $0 +; X32-NEXT: pushl $40012 +; X32-NEXT: calll __morestack +; X32-NEXT: ret + +; X64: leaq -40008(%rsp), %r11 +; X64-NEXT: cmpq %fs:112, %r11 + +; X64: movabsq $40008, %r10 +; X64-NEXT: movabsq $0, %r11 +; X64-NEXT: callq __morestack +; X64-NEXT: ret + +}