llvm-6502/test/CodeGen/X86/stride-reuse.ll
Dan Gohman 02e4fa7d5f Strength reduction improvements.
- Avoid attempting stride-reuse in the case that there are users that
   aren't addresses. In that case, there will be places where the
   multiplications won't be folded away, so it's better to try to
   strength-reduce them.

 - Several SSE intrinsics have operands that strength-reduction can
   treat as addresses. The previous item makes this more visible, as
   any non-address use of an IV can inhibit stride-reuse.

 - Make ValidStride aware of whether there's likely to be a base
   register in the address computation. This prevents it from thinking
   that things like stride 9 are valid on x86 when the base register is
   already occupied.

Also, XFAIL the 2007-08-10-LEA16Use32.ll test; the new logic to avoid
stride-reuse elimintes the LEA in the loop, so the test is no longer
testing what it was intended to test.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43231 91177308-0d34-0410-b5e6-96231b3b80d8
2007-10-22 20:40:42 +00:00

31 lines
950 B
LLVM

; RUN: llvm-as < %s | llc -march=x86 | not grep lea
; RUN: llvm-as < %s | llc -march=x86-64 | not grep lea
@B = external global [1000 x float], align 32
@A = external global [1000 x float], align 32
@P = external global [1000 x i32], align 32
define void @foo(i32 %m) {
entry:
%tmp1 = icmp sgt i32 %m, 0
br i1 %tmp1, label %bb, label %return
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp2 = getelementptr [1000 x float]* @B, i32 0, i32 %i.019.0
%tmp3 = load float* %tmp2, align 4
%tmp4 = mul float %tmp3, 2.000000e+00
%tmp5 = getelementptr [1000 x float]* @A, i32 0, i32 %i.019.0
store float %tmp4, float* %tmp5, align 4
%tmp8 = shl i32 %i.019.0, 1
%tmp9 = add i32 %tmp8, 64
%tmp10 = getelementptr [1000 x i32]* @P, i32 0, i32 %i.019.0
store i32 %tmp9, i32* %tmp10, align 4
%indvar.next = add i32 %i.019.0, 1
%exitcond = icmp eq i32 %indvar.next, %m
br i1 %exitcond, label %return, label %bb
return:
ret void
}