mov %ECX, %EAX
add %ECX, 32768
mov %SI, WORD PTR [2*%ECX + l13_prev]
Generate this:
mov %SI, WORD PTR [2*%ECX + l13_prev + 65536]
This occurs when you have a GEP instruction where an index is
"something + imm".
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19472 91177308-0d34-0410-b5e6-96231b3b80d8
int -> FP casting code. Note that we don't have to set it for FP operations
that take FP values as operands: whatever produces the FP value will set the
flag.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19451 91177308-0d34-0410-b5e6-96231b3b80d8
evaluate the LHS or the RHS of an operation first. This causes good things
to happen. For example, instead of compiling a loop to this:
.LBBstrength_result7_1: # loopentry
movl 16(%esp), %edi
movl (%edi), %edi ;;; LOAD
movl (%ecx), %ebx
movl $2, (%eax,%ebx,4)
movl (%edx), %ebx
movl %esi, %ebp
addl $21, %ebp
addl $42, %esi
cmpl $0, %edi ;;; USE
cmovne %esi, %ebp
cmpl %ebp, %ebx
movl %ebp, %esi
jg .LBBstrength_result7_1
We now compile it to this:
.LBBstrength_result7_1: # loopentry
movl %edi, %ebx
addl $42, %ebx
addl $21, %edi
movl (%ecx), %ebp ;; LOAD
cmpl $0, %ebp ;; USE
cmovne %ebx, %edi
movl (%edx), %ebx
movl $2, (%eax,%ebx,4)
movl (%esi), %ebx
cmpl %edi, %ebx
jg .LBBstrength_result7_1
Which reduces register pressure enough (in this case) to avoid spilling in the
loop.
As another example, consider the CodeGen/X86/regpressure.ll testcase. We
used to generate this code for both cases:
regpressure1:
subl $32, %esp
movl %esi, 12(%esp)
movl %edi, 8(%esp)
movl %ebx, 4(%esp)
movl %ebp, (%esp)
movl 36(%esp), %ecx
movl (%ecx), %eax
movl 4(%ecx), %edx
movl %edx, 24(%esp)
movl 8(%ecx), %edx
movl %edx, 16(%esp)
movl 12(%ecx), %edx
movl 16(%ecx), %esi
movl 20(%ecx), %edi
movl 24(%ecx), %ebx
movl %ebx, 28(%esp)
movl 28(%ecx), %ebx
movl 32(%ecx), %ebp
movl %ebp, 20(%esp)
movl 36(%ecx), %ecx
imull 24(%esp), %eax
imull 16(%esp), %eax
imull %edx, %eax
imull %esi, %eax
imull %edi, %eax
imull 28(%esp), %eax
imull %ebx, %eax
imull 20(%esp), %eax
imull %ecx, %eax
movl (%esp), %ebp
movl 4(%esp), %ebx
movl 8(%esp), %edi
movl 12(%esp), %esi
addl $32, %esp
ret
This code is basically trying to do all of the loads first, then execute all
of the multiplies. Because we run out of registers, lots of spill code happens.
We now generate this code for both cases:
regpressure1:
movl 4(%esp), %ecx
movl (%ecx), %eax
movl 4(%ecx), %edx
imull %edx, %eax
movl 8(%ecx), %edx
imull %edx, %eax
movl 12(%ecx), %edx
imull %edx, %eax
movl 16(%ecx), %edx
imull %edx, %eax
movl 20(%ecx), %edx
imull %edx, %eax
movl 24(%ecx), %edx
imull %edx, %eax
movl 28(%ecx), %edx
imull %edx, %eax
movl 32(%ecx), %edx
imull %edx, %eax
movl 36(%ecx), %ecx
imull %ecx, %eax
ret
which is much nicer (when we fold loads into the muls it will be even better).
The old instruction selector used to produce the good code for regpressure1
but not for regpressure2, as it depended on the order of operations in the
LLVM code.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19449 91177308-0d34-0410-b5e6-96231b3b80d8
of an ADDri (due to current restrictions on MachineOperand :( ). This allows
us to generate:
leal Data+16000, %edx
instead of:
movl $Data, %edx
addl $16000, %edx
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19420 91177308-0d34-0410-b5e6-96231b3b80d8
store float 123.45, float* %P
as an integer store. This adds handling of float immediate stores as integers
for arguments passed function calls.
This is now tested by CodeGen/X86/store-fp-constant.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19364 91177308-0d34-0410-b5e6-96231b3b80d8
For now, this is the default, as the current selector is missing some big pieces.
To enable the new selector, pass -disable-pattern-isel=false to llc or lli.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19335 91177308-0d34-0410-b5e6-96231b3b80d8
precisely represented as a float, put it into the constant pool as a
float.
2. Use the cbw/cwd/cdq instructions instead of an explicit SAR for signed
division.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19291 91177308-0d34-0410-b5e6-96231b3b80d8
- unsigned TrueValue = getReg(TrueVal, BB, BB->begin());
+ unsigned TrueValue = getReg(TrueVal);
Fixes the PPC regressions from last night.
The other hunk is just a clarity improvement.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19263 91177308-0d34-0410-b5e6-96231b3b80d8
to get Visual Studio to link in X86.lib to the executables that need it. There
is another way of doing it.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19252 91177308-0d34-0410-b5e6-96231b3b80d8
1. Add new instructions for checking parity flags: JP, JNP, SETP, SETNP.
2. Set the isCommutable and isPromotableTo3Address bits on several
instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19246 91177308-0d34-0410-b5e6-96231b3b80d8
While we're at it, improve codegen of select instructions. For this
testcase:
int %test(bool %C, int %A, int %B) {
%D = select bool %C, int %A, int %B
ret int %D
}
We used to generate this code:
_test:
cmpwi cr0, r3, 0
bne .LBB_test_2 ;
.LBB_test_1: ;
b .LBB_test_3 ;
.LBB_test_2: ;
or r5, r4, r4
.LBB_test_3: ;
or r3, r5, r5
blr
Now we emit:
_test:
cmpwi cr0, r3, 0
bne .LBB_test_2 ;
.LBB_test_1: ;
or r4, r5, r5
.LBB_test_2: ;
or r3, r4, r4
blr
-Chris
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19214 91177308-0d34-0410-b5e6-96231b3b80d8
save small amounts of time for functions that don't call llvm.returnaddress
or llvm.frameaddress (which is almost all functions).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19006 91177308-0d34-0410-b5e6-96231b3b80d8