901 Commits

Author SHA1 Message Date
Chris Lattner
31805bf2b6 Implement MEMCPY natively in terms of rep movs*
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19468 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 06:19:26 +00:00
Chris Lattner
989de030c4 Implement memset -> rep stos*
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19467 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 06:14:36 +00:00
Chris Lattner
795069dd38 Announce that we don't support mem ops yet.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19466 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 05:57:36 +00:00
Chris Lattner
a95589be3f Teach the address selector to make 'reg+reg' addressing modes.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19457 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 04:40:19 +00:00
Chris Lattner
d4dab929d8 Emit NOT instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19455 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 04:31:30 +00:00
Chris Lattner
6c07aee7c9 Fix a bug emitting branches that broke a lot of programs.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19452 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 04:06:27 +00:00
Chris Lattner
ef7ba0756b Be more careful where we set ContainsFPCode. We were missing a set in the
int -> FP casting code.  Note that we don't have to set it for FP operations
that take FP values as operands: whatever produces the FP value will set the
flag.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19451 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 03:50:45 +00:00
Chris Lattner
a3aa2e2882 Fix a major bug in setcc/cmov folding, where we accidentally
inverted the sense of the comparison.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19450 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 03:37:59 +00:00
Chris Lattner
1133309e57 Take register pressure into account when we have to decide whether to
evaluate the LHS or the RHS of an operation first.  This causes good things
to happen.  For example, instead of compiling a loop to this:

.LBBstrength_result7_1: # loopentry
        movl 16(%esp), %edi
        movl (%edi), %edi             ;;; LOAD
        movl (%ecx), %ebx
        movl $2, (%eax,%ebx,4)
        movl (%edx), %ebx
        movl %esi, %ebp
        addl $21, %ebp
        addl $42, %esi
        cmpl $0, %edi                 ;;; USE
        cmovne %esi, %ebp
        cmpl %ebp, %ebx
        movl %ebp, %esi
        jg .LBBstrength_result7_1

We now compile it to this:

.LBBstrength_result7_1: # loopentry
        movl %edi, %ebx
        addl $42, %ebx
        addl $21, %edi
        movl (%ecx), %ebp              ;; LOAD
        cmpl $0, %ebp                  ;; USE
        cmovne %ebx, %edi
        movl (%edx), %ebx
        movl $2, (%eax,%ebx,4)
        movl (%esi), %ebx
        cmpl %edi, %ebx
        jg .LBBstrength_result7_1

Which reduces register pressure enough (in this case) to avoid spilling in the
loop.

As another example, consider the CodeGen/X86/regpressure.ll testcase.  We
used to generate this code for both cases:

regpressure1:
        subl $32, %esp
        movl %esi, 12(%esp)
        movl %edi, 8(%esp)
        movl %ebx, 4(%esp)
        movl %ebp, (%esp)
        movl 36(%esp), %ecx
        movl (%ecx), %eax
        movl 4(%ecx), %edx
        movl %edx, 24(%esp)
        movl 8(%ecx), %edx
        movl %edx, 16(%esp)
        movl 12(%ecx), %edx
        movl 16(%ecx), %esi
        movl 20(%ecx), %edi
        movl 24(%ecx), %ebx
        movl %ebx, 28(%esp)
        movl 28(%ecx), %ebx
        movl 32(%ecx), %ebp
        movl %ebp, 20(%esp)
        movl 36(%ecx), %ecx
        imull 24(%esp), %eax
        imull 16(%esp), %eax
        imull %edx, %eax
        imull %esi, %eax
        imull %edi, %eax
        imull 28(%esp), %eax
        imull %ebx, %eax
        imull 20(%esp), %eax
        imull %ecx, %eax
        movl (%esp), %ebp
        movl 4(%esp), %ebx
        movl 8(%esp), %edi
        movl 12(%esp), %esi
        addl $32, %esp
        ret

This code is basically trying to do all of the loads first, then execute all
of the multiplies.  Because we run out of registers, lots of spill code happens.
We now generate this code for both cases:

regpressure1:
        movl 4(%esp), %ecx
        movl (%ecx), %eax
        movl 4(%ecx), %edx
        imull %edx, %eax
        movl 8(%ecx), %edx
        imull %edx, %eax
        movl 12(%ecx), %edx
        imull %edx, %eax
        movl 16(%ecx), %edx
        imull %edx, %eax
        movl 20(%ecx), %edx
        imull %edx, %eax
        movl 24(%ecx), %edx
        imull %edx, %eax
        movl 28(%ecx), %edx
        imull %edx, %eax
        movl 32(%ecx), %edx
        imull %edx, %eax
        movl 36(%ecx), %ecx
        imull %ecx, %eax
        ret

which is much nicer (when we fold loads into the muls it will be even better).
The old instruction selector used to produce the good code for regpressure1
but not for regpressure2, as it depended on the order of operations in the
LLVM code.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19449 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-11 03:11:44 +00:00
Chris Lattner
24aad1b0c1 Fold setcc instructions into selects.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19438 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-10 22:10:13 +00:00
Chris Lattner
57fbfb5879 Add conditional moves for the parity flag.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19437 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-10 22:09:33 +00:00
Chris Lattner
a13d3236d1 Implement 8-bit multiply for X86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19435 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-10 20:55:48 +00:00
Chris Lattner
bd9f0eefb2 Codegen (Reg|imm)+&GV as an LEA, because we cannot put it into the immediate field
of an ADDri (due to current restrictions on MachineOperand :( ).  This allows
us to generate:

        leal Data+16000, %edx

instead of:

        movl $Data, %edx
        addl $16000, %edx


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19420 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-09 20:20:29 +00:00
Chris Lattner
250208587d Fix copy and pasto's for FP -> Int. This fixes fldry
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19418 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-09 19:49:59 +00:00
Chris Lattner
590d800a12 Initial implementation of FP->INT and INT->FP casts
Also, fix zero_extend from bool to i8, which fixes Shootout/objinst.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19414 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-09 18:52:44 +00:00
Chris Lattner
6e7c47c12d Fix a subtle bug involving constant expr casts from int to fp
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19410 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-09 01:49:29 +00:00
Chris Lattner
1482458b23 Implement varargs and returnaddress/frameaddress intrinsics. With this
patch, all of SingleSource/UnitTests passes.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19408 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-09 00:01:27 +00:00
Chris Lattner
b0802651bf Okay 15th time is the charm. Looking at the vector size is useless as it
gets clobbered by a previous statement.  This fixes all calls finally.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19399 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 20:51:36 +00:00
Chris Lattner
6cc70ef0f9 Okay, my off by one was actually off by two. This fixes Generic/2003-07-07-BadLongConst.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19398 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 20:39:31 +00:00
Chris Lattner
ce45d65d9a Fix off by one error
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19396 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 20:31:34 +00:00
Chris Lattner
5188ad735c Adjust to changes in LowerCallTo interface
Minor bugfixes


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19376 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 19:28:19 +00:00
Chris Lattner
8c92628d1c Wrap long line.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19367 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 06:59:50 +00:00
Chris Lattner
7ab65934a0 The X86 instruction selector already handles codegen of:
store float 123.45, float* %P

as an integer store.  This adds handling of float immediate stores as integers
for arguments passed function calls.

This is now tested by CodeGen/X86/store-fp-constant.ll


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19364 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-08 05:45:24 +00:00
Chris Lattner
18ad19488d Allow the selection-dag based selector to be diabled with -disable-pattern-isel.
For now, this is the default, as the current selector is missing some big pieces.
To enable the new selector, pass -disable-pattern-isel=false to llc or lli.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19335 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-07 07:50:50 +00:00
Chris Lattner
8acb1ba04d Reimplementation of the X86 pattern isel. This is still missing many large
pieces, but can already do amazing things in some cases.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19334 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-07 07:49:41 +00:00
Chris Lattner
d844d0128a This file is now dead.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19333 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-07 07:49:05 +00:00
Chris Lattner
95cdb36714 Add a new prototype
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19332 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-07 07:48:33 +00:00
Chris Lattner
6ac95f9679 Codegen -1 and -0.0 more efficiently. This implements CodeGen/X86/negatize_zero.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19313 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-06 21:19:16 +00:00
Chris Lattner
5384b38ccc 1. If a double FP constant must be put into a constant pool, but it can be
precisely represented as a float, put it into the constant pool as a
   float.
2. Use the cbw/cwd/cdq instructions instead of an explicit SAR for signed
   division.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19291 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-05 16:30:14 +00:00
Chris Lattner
0539313fe5 Minor optimization to allocate R8 registers in a better order.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19289 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-05 16:09:16 +00:00
Jeff Cohen
1c32f79a82 Revert elimination of global variable hack... still needed.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19273 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-03 16:34:19 +00:00
Chris Lattner
10197ff5cd ADC and IMUL are also commutable.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19264 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-03 01:27:59 +00:00
Jeff Cohen
40296bdf27 Eliminate the use of the global variable hack in the X86 target that was used
to get Visual Studio to link in X86.lib to the executables that need it.  There
is another way of doing it.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19252 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 04:23:12 +00:00
Chris Lattner
5aee0b97aa Disable 2->3 address promotion of add and inc instructions to LEA's. In
addition to being three address, LEA's don't set the flags.

This fixes 186.crafty.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19251 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 04:18:17 +00:00
Chris Lattner
5dd350defd Add a new method.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19249 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 02:38:18 +00:00
Chris Lattner
eb96ec52ff Add support for SETNPr to lower to memory form.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19248 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 02:37:46 +00:00
Chris Lattner
bcea4d6f28 Implement the convertToThreeAddress method, add support for inverting JP/JNP
branches.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19247 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 02:37:07 +00:00
Chris Lattner
cc65beeb39 Two changes here:
1. Add new instructions for checking parity flags: JP, JNP, SETP, SETNP.
2. Set the isCommutable and isPromotableTo3Address bits on several
   instructions.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19246 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-02 02:35:46 +00:00
Chris Lattner
6b4f501470 Remove unused enum value
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19024 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-17 22:41:46 +00:00
Chris Lattner
8cdbc35216 Change the sentinal
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19007 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-17 00:46:51 +00:00
Chris Lattner
11cf7aa775 Create a stack slot for the return address lazily instead of eagerly. This
save small amounts of time for functions that don't call llvm.returnaddress
or llvm.frameaddress (which is almost all functions).


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19006 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-17 00:07:46 +00:00
Chris Lattner
84b85c8262 Adjust to changes in asmwriter filenames
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18987 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-16 17:33:24 +00:00
Chris Lattner
c0354c904b Set the rounding mode for the X86 FPU to 64-bits instead of 80-bits. We
don't support long double anyway, and this gives us FP results closer to
other targets.

This also speeds up 179.art from 41.4s to 18.32s, by eliminating a problem
with extra precision that causes an FP == comparison to fail (leading to
extra loop iterations).


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18895 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-13 17:23:11 +00:00
Chris Lattner
3ea78c4276 Use the target triple to pick this target.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18830 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-12 17:40:28 +00:00
Chris Lattner
223d4c4b3a Fix a regression caused by the previous patch
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18449 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-03 05:13:15 +00:00
Chris Lattner
45de191b0b Spill/restore X86 floating point stack registers with 64-bits of precision
instead of 80-bits of precision.  This fixes PR467.

This change speeds up fldry on X86 with LLC from 7.32s on apoc to 4.68s.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18433 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-02 18:17:31 +00:00
Chris Lattner
3986924e0b Consider 64-bit registers to be FP as well.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18432 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-02 17:57:21 +00:00
Tanya Lattner
9855b84469 Reverting this patch:
http://mail.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20041122/021428.html

It broke Mutlisource/Applications/obsequi


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18407 91177308-0d34-0410-b5e6-96231b3b80d8
2004-12-01 18:27:03 +00:00
Chris Lattner
928b47ae2e Revamp long/ulong comparisons to use a much more efficient sequence (thanks
to Brian and the Sun compiler for pointing out that the obvious works :)

This also enables folding all long comparisons into setcc and branch
instructions: before we could only do == and !=

For example, for:
void test(unsigned long long A, unsigned long long B) {
   if (A < B) foo();
 }

We now generate:

test:
        subl $4, %esp
        movl %esi, (%esp)
        movl 8(%esp), %eax
        movl 12(%esp), %ecx
        movl 16(%esp), %edx
        movl 20(%esp), %esi
        subl %edx, %eax
        sbbl %esi, %ecx
        jae .LBBtest_2  # UnifiedReturnBlock
.LBBtest_1:     # then
        call foo
        movl (%esp), %esi
        addl $4, %esp
        ret
.LBBtest_2:     # UnifiedReturnBlock
        movl (%esp), %esi
        addl $4, %esp
        ret

Instead of:

test:
        subl $12, %esp
        movl %esi, 8(%esp)
        movl %ebx, 4(%esp)
        movl 16(%esp), %eax
        movl 20(%esp), %ecx
        movl 24(%esp), %edx
        movl 28(%esp), %esi
        cmpl %edx, %eax
        setb %al
        cmpl %esi, %ecx
        setb %bl
        cmove %ax, %bx
        testb %bl, %bl
        je .LBBtest_2   # UnifiedReturnBlock
.LBBtest_1:     # then
        call foo
        movl 4(%esp), %ebx
        movl 8(%esp), %esi
        addl $12, %esp
        ret
.LBBtest_2:     # UnifiedReturnBlock
        movl 4(%esp), %ebx
        movl 8(%esp), %esi
        addl $12, %esp
        ret


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18330 91177308-0d34-0410-b5e6-96231b3b80d8
2004-11-29 05:55:24 +00:00
Chris Lattner
90b1b457ef Do not push two return addresses on the stack when we call external functions who have their addresses taken. This fixes test-call.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18134 91177308-0d34-0410-b5e6-96231b3b80d8
2004-11-22 22:25:30 +00:00