Commit Graph

10693 Commits

Author SHA1 Message Date
Chris Lattner
aa96ae780a Use a new helper to split critical edges, making the code simpler.
Do not claim to not change the CFG.  We do change the cfg to split critical
edges.  This isn't causing us a problem now, but could likely do so in the
future.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22824 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 06:35:16 +00:00
Chris Lattner
fae59b99b8 Fix a regression on X86, where FP values can be promoted too.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22822 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 06:06:25 +00:00
Chris Lattner
6d9aed4f8f Fix a few small typos I noticed when converting this over to the DAG->DAG
selector.  Also, there is no difference between addSImm and addImm, so just
use addImm, folding some branches.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22819 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 01:25:14 +00:00
Jim Laskey
a033b4d8eb Removed UINT_TO_FP and SINT_TO_FP from ISel outright.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22818 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 01:14:38 +00:00
Andrew Lenharth
035b8abdab thinko. Should fix s4addl.ll regression
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22817 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:47:24 +00:00
Jim Laskey
361ca5c927 Remove ISel code generation for UINT_TO_FP and SINT_TO_FP. Now asserts if
marked as legal.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22816 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:41:40 +00:00
Jim Laskey
ad23c9d4f2 Make UINT_TO_FP and SINT_TO_FP use generic expansion.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22815 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:40:22 +00:00
Jim Laskey
6269ed125f Added generic code expansion for [signed|unsigned] i32 to [f32|f64] casts in the
legalizer.  PowerPC now uses this expansion instead of ISel version.

Example:

// signed integer to double conversion
double f1(signed x) {
  return (double)x;
}

// unsigned integer to double conversion
double f2(unsigned x) {
  return (double)x;
}

// signed integer to float conversion
float f3(signed x) {
  return (float)x;
}

// unsigned integer to float conversion
float f4(unsigned x) {
  return (float)x;
}


Byte Code:

internal fastcc double %_Z2f1i(int %x) {
entry:
        %tmp.1 = cast int %x to double          ; <double> [#uses=1]
        ret double %tmp.1
}

internal fastcc double %_Z2f2j(uint %x) {
entry:
        %tmp.1 = cast uint %x to double         ; <double> [#uses=1]
        ret double %tmp.1
}

internal fastcc float %_Z2f3i(int %x) {
entry:
        %tmp.1 = cast int %x to float           ; <float> [#uses=1]
        ret float %tmp.1
}

internal fastcc float %_Z2f4j(uint %x) {
entry:
        %tmp.1 = cast uint %x to float          ; <float> [#uses=1]
        ret float %tmp.1
}

internal fastcc double %_Z2g1i(int %x) {
entry:
        %buffer = alloca [2 x uint]             ; <[2 x uint]*> [#uses=3]
        %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0                ; <uint*> [#uses=1]
        store uint 1127219200, uint* %tmp.0
        %tmp.2 = cast int %x to uint            ; <uint> [#uses=1]
        %tmp.3 = xor uint %tmp.2, 2147483648            ; <uint> [#uses=1]
        %tmp.5 = getelementptr [2 x uint]* %buffer, int 0, int 1                ; <uint*> [#uses=1]
        store uint %tmp.3, uint* %tmp.5
        %tmp.9 = cast [2 x uint]* %buffer to double*            ; <double*> [#uses=1]
        %tmp.10 = load double* %tmp.9           ; <double> [#uses=1]
        %tmp.13 = load double* cast (long* %signed_bias to double*)             ; <double> [#uses=1]
        %tmp.14 = sub double %tmp.10, %tmp.13           ; <double> [#uses=1]
        ret double %tmp.14
}

internal fastcc double %_Z2g2j(uint %x) {
entry:
        %buffer = alloca [2 x uint]             ; <[2 x uint]*> [#uses=3]
        %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0                ; <uint*> [#uses=1]
        store uint 1127219200, uint* %tmp.0
        %tmp.1 = getelementptr [2 x uint]* %buffer, int 0, int 1                ; <uint*> [#uses=1]
        store uint %x, uint* %tmp.1
        %tmp.4 = cast [2 x uint]* %buffer to double*            ; <double*> [#uses=1]
        %tmp.5 = load double* %tmp.4            ; <double> [#uses=1]
        %tmp.8 = load double* cast (long* %unsigned_bias to double*)            ; <double> [#uses=1]
        %tmp.9 = sub double %tmp.5, %tmp.8              ; <double> [#uses=1]
        ret double %tmp.9
}

internal fastcc float %_Z2g3i(int %x) {
entry:
        %buffer = alloca [2 x uint]             ; <[2 x uint]*> [#uses=3]
        %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0                ; <uint*> [#uses=1]
        store uint 1127219200, uint* %tmp.0
        %tmp.2 = cast int %x to uint            ; <uint> [#uses=1]
        %tmp.3 = xor uint %tmp.2, 2147483648            ; <uint> [#uses=1]
        %tmp.5 = getelementptr [2 x uint]* %buffer, int 0, int 1                ; <uint*> [#uses=1]
        store uint %tmp.3, uint* %tmp.5
        %tmp.9 = cast [2 x uint]* %buffer to double*            ; <double*> [#uses=1]
        %tmp.10 = load double* %tmp.9           ; <double> [#uses=1]
        %tmp.13 = load double* cast (long* %signed_bias to double*)             ; <double> [#uses=1]
        %tmp.14 = sub double %tmp.10, %tmp.13           ; <double> [#uses=1]
        %tmp.16 = cast double %tmp.14 to float          ; <float> [#uses=1]
        ret float %tmp.16
}

internal fastcc float %_Z2g4j(uint %x) {
entry:
        %buffer = alloca [2 x uint]             ; <[2 x uint]*> [#uses=3]
        %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0                ; <uint*> [#uses=1]
        store uint 1127219200, uint* %tmp.0
        %tmp.1 = getelementptr [2 x uint]* %buffer, int 0, int 1                ; <uint*> [#uses=1]
        store uint %x, uint* %tmp.1
        %tmp.4 = cast [2 x uint]* %buffer to double*            ; <double*> [#uses=1]
        %tmp.5 = load double* %tmp.4            ; <double> [#uses=1]
        %tmp.8 = load double* cast (long* %unsigned_bias to double*)            ; <double> [#uses=1]
        %tmp.9 = sub double %tmp.5, %tmp.8              ; <double> [#uses=1]
        %tmp.11 = cast double %tmp.9 to float           ; <float> [#uses=1]
        ret float %tmp.11
}


PowerPC Code:

        .machine ppc970


        .const
        .align  2
.CPIl1__Z2f1i_0:                                        ; float 0x4330000080000000
        .long   1501560836      ; float 4.5036e+15
        .text
        .align  2
        .globl  l1__Z2f1i
l1__Z2f1i:
.LBBl1__Z2f1i_0:        ; entry
        xoris r2, r3, 32768
        stw r2, -4(r1)
        lis r2, 17200
        stw r2, -8(r1)
        lfd f0, -8(r1)
        lis r2, ha16(.CPIl1__Z2f1i_0)
        lfs f1, lo16(.CPIl1__Z2f1i_0)(r2)
        fsub f1, f0, f1
        blr


        .const
        .align  2
.CPIl2__Z2f2j_0:                                        ; float 0x4330000000000000
        .long   1501560832      ; float 4.5036e+15
        .text
        .align  2
        .globl  l2__Z2f2j
l2__Z2f2j:
.LBBl2__Z2f2j_0:        ; entry
        stw r3, -4(r1)
        lis r2, 17200
        stw r2, -8(r1)
        lfd f0, -8(r1)
        lis r2, ha16(.CPIl2__Z2f2j_0)
        lfs f1, lo16(.CPIl2__Z2f2j_0)(r2)
        fsub f1, f0, f1
        blr


        .const
        .align  2
.CPIl3__Z2f3i_0:                                        ; float 0x4330000080000000
        .long   1501560836      ; float 4.5036e+15
        .text
        .align  2
        .globl  l3__Z2f3i
l3__Z2f3i:
.LBBl3__Z2f3i_0:        ; entry
        xoris r2, r3, 32768
        stw r2, -4(r1)
        lis r2, 17200
        stw r2, -8(r1)
        lfd f0, -8(r1)
        lis r2, ha16(.CPIl3__Z2f3i_0)
        lfs f1, lo16(.CPIl3__Z2f3i_0)(r2)
        fsub f0, f0, f1
        frsp f1, f0
        blr


        .const
        .align  2
.CPIl4__Z2f4j_0:                                        ; float 0x4330000000000000
        .long   1501560832      ; float 4.5036e+15
        .text
        .align  2
        .globl  l4__Z2f4j
l4__Z2f4j:
.LBBl4__Z2f4j_0:        ; entry
        stw r3, -4(r1)
        lis r2, 17200
        stw r2, -8(r1)
        lfd f0, -8(r1)
        lis r2, ha16(.CPIl4__Z2f4j_0)
        lfs f1, lo16(.CPIl4__Z2f4j_0)(r2)
        fsub f0, f0, f1
        frsp f1, f0
        blr


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22814 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:39:29 +00:00
Chris Lattner
37bfbb47de add a new TargetConstant node
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22813 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:34:06 +00:00
Nate Begeman
456f1e890c Implement a couple improvements:
Remove dead code in ISD::Constant handling
Add support for add long, imm16

We now codegen 'long long foo(long long a) { return ++a; }'
as:
addic r4, r4, 1
addze r3, r3
blr

instead of:
li r2, 1
li r5, 0
addc r2, r4, r2
adde r3, r3, r5
blr


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22811 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-17 00:20:08 +00:00
Chris Lattner
fea493030b This is a dummy, it doesn't matter what the ValueType is
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22809 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 21:59:52 +00:00
Chris Lattner
a8cd01524f updates for changes in nodes
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22808 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 21:58:15 +00:00
Chris Lattner
707ebc5dd6 update the backends to work with the new CopyFromReg/CopyToReg/ImplicitDef nodes
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22807 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 21:56:37 +00:00
Chris Lattner
d5d0f9bd20 Eliminate the RegSDNode class, which 3 nodes (CopyFromReg/CopyToReg/ImplicitDef)
used to tack a register number onto the node.

Instead of doing this, make a new node, RegisterSDNode, which is a leaf
containing a register number.  These three operations just become normal
DAG nodes now, instead of requiring special handling.

Note that with this change, it is no longer correct to make illegal
CopyFromReg/CopyToReg nodes.  The legalizer will not touch them, and this
is bad, so don't do it. :)


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22806 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 21:55:35 +00:00
Nate Begeman
7cbd525ba8 Implement BR_CC and BRTWOWAY_CC. This allows the removal of a rather nasty
fixme from the PowerPC backend.  Emit slightly better code for legalizing
select_cc.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22805 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 19:49:35 +00:00
Chris Lattner
f3e133a35f Allow passing a dag into dump and getOperationName. If one is available
when printing a node, use it to render target operations with their
target instruction name instead of "<<unknown>>".


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22804 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 18:33:07 +00:00
Chris Lattner
ad95d6ab20 Use a extant helper to do this.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22802 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 18:31:23 +00:00
Chris Lattner
149c58ce0b Add some methods for dag->dag isel.
Split RemoveNodeFromCSEMaps out of DeleteNodesIfDead to do it.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22801 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 18:17:10 +00:00
Chris Lattner
7c5a3d390a Pull the LLVM -> DAG lowering code out of the pattern selector so that it
can be shared with the DAG->DAG selector.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22799 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 17:14:42 +00:00
Chris Lattner
80b32b3aab Fix a bad case in gzip where we put lots of things in registers across the
loop, because a IV-dependent value was used outside of the loop and didn't
have immediate-folding capability


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22798 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 00:38:11 +00:00
Chris Lattner
ff2006aa74 Fix Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22797 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-16 00:37:01 +00:00
Chris Lattner
b0096bd19d Turn loop strength reduction on by default.
Only run createLowerConstantExpressionsPass for the simple isel.  The DAG
isel has no need for it.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22794 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-15 23:47:04 +00:00
Chris Lattner
db25de496c Teach LLVM to know how many times a loop executes when constructed with
a < expression, e.g.: for (i = m; i < n; ++i)


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22793 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-15 23:33:51 +00:00
Jim Laskey
ea0617a023 Broke 80 column rule.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22792 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-15 17:35:26 +00:00
Jim Laskey
30679e691d Changed code gen for int to f32 to use rounding. This makes FP results
consistent with gcc.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22791 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-15 17:14:19 +00:00
Andrew Lenharth
d228427f87 isIntImmediate is a good Idea. Add a flavor that checks bounds while it is at it
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22790 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-15 14:31:37 +00:00
Nate Begeman
2d56e72f09 Fix last night's PPC32 regressions by
1. Not selecting the false value of a select_cc in the false arm, which
   isn't legal for nested selects.
2. Actually returning the node we created and Legalized in the FP_TO_UINT
   Expander.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22789 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 18:38:32 +00:00
Nate Begeman
889f2c1acf Fix last night's X86 regressions by putting code for SSE in the if(SSE)
block.  nur.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22788 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 18:37:02 +00:00
Andrew Lenharth
9ca65c7110 only build .a on alpha
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22787 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 15:14:34 +00:00
Nate Begeman
875f360e22 Fix FP_TO_UINT with Scalar SSE2 now that the legalizer can handle it. We
now generate the relatively good code sequences:
unsigned short foo(float a) { return a; }
_foo:
        movss 4(%esp), %xmm0
        cvttss2si %xmm0, %eax
        movzwl %ax, %eax
        ret

and
unsigned bar(float a) { return a; }
_bar:
        movss .CPI_bar_0, %xmm0
        movss 4(%esp), %xmm1
        movapd %xmm1, %xmm2
        subss %xmm0, %xmm2
        cvttss2si %xmm2, %eax
        xorl $-2147483648, %eax
        cvttss2si %xmm1, %ecx
        ucomiss %xmm0, %xmm1
        cmovb %ecx, %eax
        ret


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22786 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 04:36:51 +00:00
Nate Begeman
d2558e3e4a Teach the legalizer how to legalize FP_TO_UINT.
Teach the legalizer to promote FP_TO_UINT to FP_TO_SINT if the wider
  FP_TO_UINT is also illegal.  This allows us on PPC to codegen
  unsigned short foo(float a) { return a; }

as:
_foo:
.LBB_foo_0:     ; entry
        fctiwz f0, f1
        stfd f0, -8(r1)
        lwz r2, -4(r1)
        rlwinm r3, r2, 0, 16, 31
        blr

instead of:
_foo:
.LBB_foo_0:     ; entry
        fctiwz f0, f1
        stfd f0, -8(r1)
        lwz r2, -4(r1)
        lis r3, ha16(.CPI_foo_0)
        lfs f0, lo16(.CPI_foo_0)(r3)
        fcmpu cr0, f1, f0
        blt .LBB_foo_2  ; entry
.LBB_foo_1:     ; entry
        fsubs f0, f1, f0
        fctiwz f0, f0
        stfd f0, -16(r1)
        lwz r2, -12(r1)
        xoris r2, r2, 32768
.LBB_foo_2:     ; entry
        rlwinm r3, r2, 0, 16, 31
        blr


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22785 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 01:20:53 +00:00
Nate Begeman
5a01481585 Make FP_TO_UINT Illegal. This allows us to generate significantly better
codegen for FP_TO_UINT by using the legalizer's SELECT variant.

Implement a codegen improvement for SELECT_CC, selecting the false node in
the MBB that feeds the phi node.  This allows us to codegen:
void foo(int *a, int b, int c) { int d = (a < b) ? 5 : 9; *a = d; }
as:
_foo:
        li r2, 5
        cmpw cr0, r4, r3
        bgt .LBB_foo_2  ; entry
.LBB_foo_1:     ; entry
        li r2, 9
.LBB_foo_2:     ; entry
        stw r2, 0(r3)
        blr

insted of:
_foo:
        li r2, 5
        li r5, 9
        cmpw cr0, r4, r3
        bgt .LBB_foo_2  ; entry
.LBB_foo_1:     ; entry
        or r2, r5, r5
.LBB_foo_2:     ; entry
        stw r2, 0(r3)
        blr


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22784 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-14 01:17:16 +00:00
Andrew Lenharth
40b37cc28d Testing a variable before it is defined doesn't work so well. It is a fairly small thing, so just let everyone build the .a file
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22783 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 14:58:23 +00:00
Chris Lattner
27e5142309 Ooops, don't forget to clear this. The real inner loop is now:
.LBB_foo_3:     ; no_exit.1
        lfd f2, 0(r9)
        lfd f3, 8(r9)
        fmul f4, f1, f2
        fmadd f4, f0, f3, f4
        stfd f4, 8(r9)
        fmul f3, f1, f3
        fmsub f2, f0, f2, f3
        stfd f2, 0(r9)
        addi r9, r9, 16
        addi r8, r8, 1
        cmpw cr0, r8, r4
        ble .LBB_foo_3  ; no_exit.1


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22782 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 07:42:01 +00:00
Chris Lattner
934520a747 Recursively scan scev expressions for common subexpressions. This allows us
to handle nested loops much better, for example, by being able to tell that
these two expressions:

{( 8 + ( 16 * ( 1 +  %Tmp11 +  %Tmp12)) +  %c_),+,( 16 *  %Tmp 12)}<loopentry.1>

{(( 16 * ( 1 +  %Tmp11 +  %Tmp12)) +  %c_),+,( 16 *  %Tmp12)}<loopentry.1>

Have the following common part that can be shared:
{(( 16 * ( 1 +  %Tmp11 +  %Tmp12)) +  %c_),+,( 16 *  %Tmp12)}<loopentry.1>

This allows us to codegen an important inner loop in 168.wupwise as:

.LBB_foo_4:     ; no_exit.1
        lfd f2, 16(r9)
        fmul f3, f0, f2
        fmul f2, f1, f2
        fadd f4, f3, f2
        stfd f4, 8(r9)
        fsub f2, f3, f2
        stfd f2, 16(r9)
        addi r8, r8, 1
        addi r9, r9, 16
        cmpw cr0, r8, r4
        ble .LBB_foo_4  ; no_exit.1

instead of:

.LBB_foo_3:     ; no_exit.1
        lfdx f2, r6, r9
        add r10, r6, r9
        lfd f3, 8(r10)
        fmul f4, f1, f2
        fmadd f4, f0, f3, f4
        stfd f4, 8(r10)
        fmul f3, f1, f3
        fmsub f2, f0, f2, f3
        stfdx f2, r6, r9
        addi r9, r9, 16
        addi r8, r8, 1
        cmpw cr0, r8, r4
        ble .LBB_foo_3  ; no_exit.1


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22781 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 07:27:18 +00:00
Nate Begeman
ff66368a5f Remove an unncessary argument to SimplifySelectCC and add an additional
assert when creating a select_cc node.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22780 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 06:14:17 +00:00
Nate Begeman
32c392a3a5 Fix the fabs regression on x86 by abstracting the select_cc optimization
out into SimplifySelectCC.  This allows both ISD::SELECT and ISD::SELECT_CC
to use the same set of simplifying folds.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22779 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 06:00:21 +00:00
Nate Begeman
8f331325a2 Remove support for 64b PPC, it's been broken for a long time. It'll be
back once a DAG->DAG ISel exists.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22778 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 05:59:16 +00:00
Andrew Lenharth
7ac17529d2 Fix oversized GOT problem with gcc-4 on alpha
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22777 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 05:09:50 +00:00
Chris Lattner
0ae380a8ac Teach SplitCriticalEdge to update LoopInfo if it is alive. This fixes
a problem in LoopStrengthReduction, where it would split critical edges
then confused itself with outdated loop information.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22776 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 01:38:43 +00:00
Chris Lattner
8385393dc8 remove dead code. The exit block list is computed on demand, thus does not
need to be updated.  This code is a relic from when it did.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22775 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-13 01:30:36 +00:00
Chris Lattner
3e27b1f5c4 implement a couple of simple shift foldings.
e.g.  (X & 7) >> 3   -> 0


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22774 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 23:54:58 +00:00
Jim Laskey
cf083e312c Fix for 2005-08-12-rlwimi-crash.ll. Make allowance for masks being shifted to
zero.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22773 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 23:52:46 +00:00
Jim Laskey
847c3a976b 1. This changes handles the cases of (~x)&y and x&(~y) yielding ANDC, and
(~x)|y and x|(~y) yielding ORC.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22771 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 23:38:02 +00:00
Chris Lattner
c60fb08f7e When splitting critical edges, make sure not to leave the new block in the
middle of the loop.  This turns a critical loop in gzip into this:

.LBB_test_1:    ; loopentry
        or r27, r28, r28
        add r28, r3, r27
        lhz r28, 3(r28)
        add r26, r4, r27
        lhz r26, 3(r26)
        cmpw cr0, r28, r26
        bne .LBB_test_8 ; loopentry.loopexit_crit_edge
.LBB_test_2:    ; shortcirc_next.0
        add r28, r3, r27
        lhz r28, 5(r28)
        add r26, r4, r27
        lhz r26, 5(r26)
        cmpw cr0, r28, r26
        bne .LBB_test_7 ; shortcirc_next.0.loopexit_crit_edge
.LBB_test_3:    ; shortcirc_next.1
        add r28, r3, r27
        lhz r28, 7(r28)
        add r26, r4, r27
        lhz r26, 7(r26)
        cmpw cr0, r28, r26
        bne .LBB_test_6 ; shortcirc_next.1.loopexit_crit_edge
.LBB_test_4:    ; shortcirc_next.2
        add r28, r3, r27
        lhz r26, 9(r28)
        add r28, r4, r27
        lhz r25, 9(r28)
        addi r28, r27, 8
        cmpw cr7, r26, r25
        mfcr r26, 1
        rlwinm r26, r26, 31, 31, 31
        add r25, r8, r27
        cmpw cr7, r25, r7
        mfcr r25, 1
        rlwinm r25, r25, 29, 31, 31
        and. r26, r26, r25
        bne .LBB_test_1 ; loopentry

instead of this:

.LBB_test_1:    ; loopentry
        or r27, r28, r28
        add r28, r3, r27
        lhz r28, 3(r28)
        add r26, r4, r27
        lhz r26, 3(r26)
        cmpw cr0, r28, r26
        beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2:    ; loopentry.loopexit_crit_edge
        add r2, r30, r27
        add r8, r29, r27
        b .LBB_test_9   ; loopexit
.LBB_test_3:    ; shortcirc_next.0
        add r28, r3, r27
        lhz r28, 5(r28)
        add r26, r4, r27
        lhz r26, 5(r26)
        cmpw cr0, r28, r26
        beq .LBB_test_5 ; shortcirc_next.1
.LBB_test_4:    ; shortcirc_next.0.loopexit_crit_edge
        add r2, r11, r27
        add r8, r12, r27
        b .LBB_test_9   ; loopexit
.LBB_test_5:    ; shortcirc_next.1
        add r28, r3, r27
        lhz r28, 7(r28)
        add r26, r4, r27
        lhz r26, 7(r26)
        cmpw cr0, r28, r26
        beq .LBB_test_7 ; shortcirc_next.2
.LBB_test_6:    ; shortcirc_next.1.loopexit_crit_edge
        add r2, r9, r27
        add r8, r10, r27
        b .LBB_test_9   ; loopexit
.LBB_test_7:    ; shortcirc_next.2
        add r28, r3, r27
        lhz r26, 9(r28)
        add r28, r4, r27
        lhz r25, 9(r28)
        addi r28, r27, 8
        cmpw cr7, r26, r25
        mfcr r26, 1
        rlwinm r26, r26, 31, 31, 31
        add r25, r8, r27
        cmpw cr7, r25, r7
        mfcr r25, 1
        rlwinm r25, r25, 29, 31, 31
        and. r26, r26, r25
        bne .LBB_test_1 ; loopentry

Next up, improve the code for the loop.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22769 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 22:22:17 +00:00
Chris Lattner
6a13aed525 Add a helper method
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22768 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 22:14:06 +00:00
Chris Lattner
e0391beda8 Fix a FIXME: if we are inserting code for a PHI argument, split the critical
edge so that the code is not always executed for both operands.  This
prevents LSR from inserting code into loops whose exit blocks contain
PHI uses of IV expressions (which are outside of loops).  On gzip, for
example, we turn this ugly code:

.LBB_test_1:    ; loopentry
        add r27, r3, r28
        lhz r27, 3(r27)
        add r26, r4, r28
        lhz r26, 3(r26)
        add r25, r30, r28    ;; Only live if exiting the loop
        add r24, r29, r28    ;; Only live if exiting the loop
        cmpw cr0, r27, r26
        bne .LBB_test_5 ; loopexit

into this:

.LBB_test_1:    ; loopentry
        or r27, r28, r28
        add r28, r3, r27
        lhz r28, 3(r28)
        add r26, r4, r27
        lhz r26, 3(r26)
        cmpw cr0, r28, r26
        beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2:    ; loopentry.loopexit_crit_edge
        add r2, r30, r27
        add r8, r29, r27
        b .LBB_test_9   ; loopexit
.LBB_test_2:    ; shortcirc_next.0
        ...
        blt .LBB_test_1


into this:

.LBB_test_1:    ; loopentry
        or r27, r28, r28
        add r28, r3, r27
        lhz r28, 3(r28)
        add r26, r4, r27
        lhz r26, 3(r26)
        cmpw cr0, r28, r26
        beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2:    ; loopentry.loopexit_crit_edge
        add r2, r30, r27
        add r8, r29, r27
        b .LBB_t_3:    ; shortcirc_next.0
.LBB_test_3:    ; shortcirc_next.0
        ...
        blt .LBB_test_1


Next step: get the block out of the loop so that the loop is all
fall-throughs again.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22766 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 22:06:11 +00:00
Chris Lattner
b01bfd49c3 Change break critical edges to not remove, then insert, PHI node entries.
Instead, just update the BB in-place.  This is both faster, and it prevents
split-critical-edges from shuffling the PHI argument list unneccesarily.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22765 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 21:58:07 +00:00
Andrew Lenharth
1f3e808ebf match gcc's use of tabs, makes diffs easier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22764 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 16:14:08 +00:00
Andrew Lenharth
21e786bdb4 .section cleanup, patch from Nicholas Riley
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22763 91177308-0d34-0410-b5e6-96231b3b80d8
2005-08-12 16:13:43 +00:00