using Function::arg_{iterator|begin|end}. Likewise Module::g* -> Module::global_*.
This patch is contributed by Gabor Greif, thanks!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20597 91177308-0d34-0410-b5e6-96231b3b80d8
store float 123.45, float* %P
as an integer store. This adds handling of float immediate stores as integers
for arguments passed function calls.
This is now tested by CodeGen/X86/store-fp-constant.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19364 91177308-0d34-0410-b5e6-96231b3b80d8
precisely represented as a float, put it into the constant pool as a
float.
2. Use the cbw/cwd/cdq instructions instead of an explicit SAR for signed
division.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19291 91177308-0d34-0410-b5e6-96231b3b80d8
save small amounts of time for functions that don't call llvm.returnaddress
or llvm.frameaddress (which is almost all functions).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19006 91177308-0d34-0410-b5e6-96231b3b80d8
don't support long double anyway, and this gives us FP results closer to
other targets.
This also speeds up 179.art from 41.4s to 18.32s, by eliminating a problem
with extra precision that causes an FP == comparison to fail (leading to
extra loop iterations).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18895 91177308-0d34-0410-b5e6-96231b3b80d8
to Brian and the Sun compiler for pointing out that the obvious works :)
This also enables folding all long comparisons into setcc and branch
instructions: before we could only do == and !=
For example, for:
void test(unsigned long long A, unsigned long long B) {
if (A < B) foo();
}
We now generate:
test:
subl $4, %esp
movl %esi, (%esp)
movl 8(%esp), %eax
movl 12(%esp), %ecx
movl 16(%esp), %edx
movl 20(%esp), %esi
subl %edx, %eax
sbbl %esi, %ecx
jae .LBBtest_2 # UnifiedReturnBlock
.LBBtest_1: # then
call foo
movl (%esp), %esi
addl $4, %esp
ret
.LBBtest_2: # UnifiedReturnBlock
movl (%esp), %esi
addl $4, %esp
ret
Instead of:
test:
subl $12, %esp
movl %esi, 8(%esp)
movl %ebx, 4(%esp)
movl 16(%esp), %eax
movl 20(%esp), %ecx
movl 24(%esp), %edx
movl 28(%esp), %esi
cmpl %edx, %eax
setb %al
cmpl %esi, %ecx
setb %bl
cmove %ax, %bx
testb %bl, %bl
je .LBBtest_2 # UnifiedReturnBlock
.LBBtest_1: # then
call foo
movl 4(%esp), %ebx
movl 8(%esp), %esi
addl $12, %esp
ret
.LBBtest_2: # UnifiedReturnBlock
movl 4(%esp), %ebx
movl 8(%esp), %esi
addl $12, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@18330 91177308-0d34-0410-b5e6-96231b3b80d8
two or three, open code the equivalent operation which is faster on athlon
and P4 (by a substantial margin).
For example, instead of compiling this:
long long X2(long long Y) { return Y << 2; }
to:
X3_2:
movl 4(%esp), %eax
movl 8(%esp), %edx
shldl $2, %eax, %edx
shll $2, %eax
ret
Compile it to:
X2:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
shrl $30, %edx
leal (%edx,%ecx,4), %edx
shll $2, %eax
ret
Likewise, for << 3, compile to:
X3:
movl 4(%esp), %eax
movl 8(%esp), %ecx
movl %eax, %edx
shrl $29, %edx
leal (%edx,%ecx,8), %edx
shll $3, %eax
ret
This matches icc, except that icc open codes the shifts as adds on the P4.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17707 91177308-0d34-0410-b5e6-96231b3b80d8
double %test(uint %X) {
%tmp.1 = cast uint %X to double ; <double> [#uses=1]
ret double %tmp.1
}
into:
test:
sub %ESP, 8
mov %EAX, DWORD PTR [%ESP + 12]
mov %ECX, 0
mov DWORD PTR [%ESP], %EAX
mov DWORD PTR [%ESP + 4], %ECX
fild QWORD PTR [%ESP]
add %ESP, 8
ret
... which basically zero extends to 8 bytes, then does an fild for an
8-byte signed int.
Now we generate this:
test:
sub %ESP, 4
mov %EAX, DWORD PTR [%ESP + 8]
mov DWORD PTR [%ESP], %EAX
fild DWORD PTR [%ESP]
shr %EAX, 31
fadd DWORD PTR [.CPItest_0 + 4*%EAX]
add %ESP, 4
ret
.section .rodata
.align 4
.CPItest_0:
.quad 5728578726015270912
This does a 32-bit signed integer load, then adds in an offset if the sign
bit of the integer was set.
It turns out that this is substantially faster than the preceeding sequence.
Consider this testcase:
unsigned a[2]={1,2};
volatile double G;
void main() {
int i;
for (i=0; i<100000000; ++i )
G += a[i&1];
}
On zion (a P4 Xeon, 3Ghz), this patch speeds up the testcase from 2.140s
to 0.94s.
On apoc, an athlon MP 2100+, this patch speeds up the testcase from 1.72s
to 1.34s.
Note that the program takes 2.5s/1.97s on zion/apoc with GCC 3.3 -O3
-fomit-frame-pointer.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17083 91177308-0d34-0410-b5e6-96231b3b80d8
%X = and Y, constantint
%Z = setcc %X, 0
instead of emitting:
and %EAX, 3
test %EAX, %EAX
je .LBBfoo2_2 # UnifiedReturnBlock
We now emit:
test %EAX, 3
je .LBBfoo2_2 # UnifiedReturnBlock
This triggers 581 times on 176.gcc for example.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17080 91177308-0d34-0410-b5e6-96231b3b80d8
case:
int C[100];
int foo() {
return C[4];
}
We now codegen:
foo:
mov %EAX, DWORD PTR [C + 16]
ret
instead of:
foo:
mov %EAX, OFFSET C
mov %EAX, DWORD PTR [%EAX + 16]
ret
Other impressive features may be coming later.
This patch is contributed by Jeff Cohen!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17011 91177308-0d34-0410-b5e6-96231b3b80d8
which prevented setcc's from being folded into branches. It appears that
conditional branchinst's CC operand is actually operand(2), not operand(0)
as we might expect. :(
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@16859 91177308-0d34-0410-b5e6-96231b3b80d8
t:
mov %EDX, DWORD PTR [%ESP + 4]
mov %ECX, 2
mov %EAX, %EDX
sar %EDX, 31
idiv %ECX
mov %EAX, %EDX
ret
Generate:
t:
mov %ECX, DWORD PTR [%ESP + 4]
*** mov %EAX, %ECX
cdq
and %ECX, 1
xor %ECX, %EDX
sub %ECX, %EDX
*** mov %EAX, %ECX
ret
Note that the two marked moves are redundant, and should be eliminated by the
register allocator, but aren't.
Compare this to GCC, which generates:
t:
mov %eax, DWORD PTR [%esp+4]
mov %edx, %eax
shr %edx, 31
lea %ecx, [%edx+%eax]
and %ecx, -2
sub %eax, %ecx
ret
or ICC 8.0, which generates:
t:
movl 4(%esp), %ecx #3.5
movl $-2147483647, %eax #3.25
imull %ecx #3.25
movl %ecx, %eax #3.25
sarl $31, %eax #3.25
addl %ecx, %edx #3.25
subl %edx, %eax #3.25
addl %eax, %eax #3.25
negl %eax #3.25
subl %eax, %ecx #3.25
movl %ecx, %eax #3.25
ret #3.25
We would be in great shape if not for the moves.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@16763 91177308-0d34-0410-b5e6-96231b3b80d8
Move include/Config and include/Support into include/llvm/Config,
include/llvm/ADT and include/llvm/Support. From here on out, all LLVM
public header files must be under include/llvm/.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@16137 91177308-0d34-0410-b5e6-96231b3b80d8
improvements on instruction selection that account for register and frame
index bases.
Patch contributed by Jeff Cohen. Thanks Jeff!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@16110 91177308-0d34-0410-b5e6-96231b3b80d8
this LLVM function:
int %foo() {
ret int cast (int** getelementptr (int** null, int 1) to int)
}
into:
foo:
mov %EAX, 0
lea %EAX, DWORD PTR [%EAX + 4]
ret
now we compile it into:
foo:
mov %EAX, 4
ret
This sequence is frequently generated by the MSIL front-end, and soon the malloc lowering pass and
Java front-ends as well..
-Chris
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@14834 91177308-0d34-0410-b5e6-96231b3b80d8