From 9bfcc624bad51270cc033f09a0a23bfc9cabe094 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Fri, 28 Dec 2007 21:50:40 +0000 Subject: [PATCH] add a note. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45387 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/README.txt | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index ad15b14f166..3bb92d1cad5 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1585,3 +1585,35 @@ movl $0, 124(%esp) if the flags of the xor are dead. //===---------------------------------------------------------------------===// + +This testcase misses a read/modify/write opportunity (from PR1425): + +void vertical_decompose97iH1(int *b0, int *b1, int *b2, int width){ + int i; + for(i=0; i>0; +} + +We compile it down to: + +LBB1_2: # bb + movl (%esi,%edi,4), %ebx + addl (%ecx,%edi,4), %ebx + addl (%edx,%edi,4), %ebx + movl %ebx, (%ecx,%edi,4) + incl %edi + cmpl %eax, %edi + jne LBB1_2 # bb + +the inner loop should add to the memory location (%ecx,%edi,4), saving +a mov. Something like: + + movl (%esi,%edi,4), %ebx + addl (%edx,%edi,4), %ebx + addl %ebx, (%ecx,%edi,4) + +Additionally, LSR should rewrite the exit condition of the loop to use +a stride-4 IV, would would allow all the scales in the loop to go away. +This would result in smaller code and more efficient microops. + +//===---------------------------------------------------------------------===//