llvm-6502/test/CodeGen/X86/tailcallbyval64.ll
Bill Wendling 3844109d1e --- Merging r127731 into '.':
U    test/CodeGen/X86/byval2.ll
U    test/CodeGen/X86/byval4.ll
U    test/CodeGen/X86/byval.ll
U    test/CodeGen/X86/byval3.ll
U    test/CodeGen/X86/byval5.ll
--- Merging r127732 into '.':
U    test/CodeGen/X86/stdarg.ll
U    test/CodeGen/X86/fold-mul-lohi.ll
U    test/CodeGen/X86/scalar-min-max-fill-operand.ll
U    test/CodeGen/X86/tailcallbyval64.ll
U    test/CodeGen/X86/stride-reuse.ll
U    test/CodeGen/X86/sse-align-3.ll
U    test/CodeGen/X86/sse-commute.ll
U    test/CodeGen/X86/stride-nine-with-base-reg.ll
U    test/CodeGen/X86/coalescer-commute2.ll
U    test/CodeGen/X86/sse-align-7.ll
U    test/CodeGen/X86/sse_reload_fold.ll
U    test/CodeGen/X86/sse-align-0.ll
--- Merging r127733 into '.':
U    test/CodeGen/X86/peep-vector-extract-concat.ll
U    test/CodeGen/X86/pmulld.ll
U    test/CodeGen/X86/widen_load-0.ll
U    test/CodeGen/X86/v2f32.ll
U    test/CodeGen/X86/apm.ll
U    test/CodeGen/X86/h-register-store.ll
U    test/CodeGen/X86/h-registers-0.ll
--- Merging r127734 into '.':
U    test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
U    test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
U    test/CodeGen/X86/avoid-lea-scale2.ll
U    test/CodeGen/X86/lea-3.ll
U    test/CodeGen/X86/vec_set-8.ll
U    test/CodeGen/X86/i64-mem-copy.ll
U    test/CodeGen/X86/x86-64-malloc.ll
U    test/CodeGen/X86/mmx-copy-gprs.ll
U    test/CodeGen/X86/vec_shuffle-17.ll
U    test/CodeGen/X86/2007-07-18-Vector-Extract.ll
--- Merging r127775 into '.':
U    test/CodeGen/X86/constant-pool-remat-0.ll
--- Merging r127872 into '.':
U    utils/lit/lit/TestingConfig.py
U    lib/Support/raw_ostream.cpp



git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_29@128258 91177308-0d34-0410-b5e6-96231b3b80d8
2011-03-25 06:22:54 +00:00

43 lines
1.4 KiB
LLVM

; RUN: llc < %s -mtriple=x86_64-linux -tailcallopt | FileCheck %s
; FIXME: Win64 does not support byval.
; Expect the entry point.
; CHECK: tailcaller:
; Expect 2 rep;movs because of tail call byval lowering.
; CHECK: rep;
; CHECK: rep;
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct
; moves to argument register where it can (for registers that are not used in
; byval lowering - not rsi, not rdi, not rcx).
; Expect argument 4 to be moved directly to register edx.
; CHECK: movl $7, %edx
; Expect argument 6 to be moved directly to register r8.
; CHECK: movl $17, %r8d
; Expect not call but jmp to @tailcallee.
; CHECK: jmp tailcallee
; Expect the trailer.
; CHECK: .size tailcaller
%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64 }
declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
entry:
%tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
%tmp3 = load i64* %tmp2, align 8
%tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}