mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-12 17:32:19 +00:00
Remove some README.txt entries which are now implemented.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92511 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
44c92e5826
commit
3c02c22ef9
@ -530,7 +530,7 @@ We should inline lrintf and probably other libc functions.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
Start using the flags more. For example, compile:
|
||||
Use the FLAGS values from arithmetic instructions more. For example, compile:
|
||||
|
||||
int add_zf(int *x, int y, int a, int b) {
|
||||
if ((*x += y) == 0)
|
||||
@ -554,31 +554,8 @@ _add_zf:
|
||||
movl %ecx, %eax
|
||||
ret
|
||||
|
||||
and:
|
||||
|
||||
int add_zf(int *x, int y, int a, int b) {
|
||||
if ((*x + y) < 0)
|
||||
return a;
|
||||
else
|
||||
return b;
|
||||
}
|
||||
|
||||
to:
|
||||
|
||||
add_zf:
|
||||
addl (%rdi), %esi
|
||||
movl %edx, %eax
|
||||
cmovns %ecx, %eax
|
||||
ret
|
||||
|
||||
instead of:
|
||||
|
||||
_add_zf:
|
||||
addl (%rdi), %esi
|
||||
testl %esi, %esi
|
||||
cmovs %edx, %ecx
|
||||
movl %ecx, %eax
|
||||
ret
|
||||
As another example, compile function f2 in test/CodeGen/X86/cmp-test.ll
|
||||
without a test instruction.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
@ -685,55 +662,6 @@ Though this probably isn't worth it.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
We need to teach the codegen to convert two-address INC instructions to LEA
|
||||
when the flags are dead (likewise dec). For example, on X86-64, compile:
|
||||
|
||||
int foo(int A, int B) {
|
||||
return A+1;
|
||||
}
|
||||
|
||||
to:
|
||||
|
||||
_foo:
|
||||
leal 1(%edi), %eax
|
||||
ret
|
||||
|
||||
instead of:
|
||||
|
||||
_foo:
|
||||
incl %edi
|
||||
movl %edi, %eax
|
||||
ret
|
||||
|
||||
Another example is:
|
||||
|
||||
;; X's live range extends beyond the shift, so the register allocator
|
||||
;; cannot coalesce it with Y. Because of this, a copy needs to be
|
||||
;; emitted before the shift to save the register value before it is
|
||||
;; clobbered. However, this copy is not needed if the register
|
||||
;; allocator turns the shift into an LEA. This also occurs for ADD.
|
||||
|
||||
; Check that the shift gets turned into an LEA.
|
||||
; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
|
||||
; RUN: not grep {mov E.X, E.X}
|
||||
|
||||
@G = external global i32 ; <i32*> [#uses=3]
|
||||
|
||||
define i32 @test1(i32 %X, i32 %Y) {
|
||||
%Z = add i32 %X, %Y ; <i32> [#uses=1]
|
||||
volatile store i32 %Y, i32* @G
|
||||
volatile store i32 %Z, i32* @G
|
||||
ret i32 %X
|
||||
}
|
||||
|
||||
define i32 @test2(i32 %X) {
|
||||
%Z = add i32 %X, 1 ; <i32> [#uses=1]
|
||||
volatile store i32 %Z, i32* @G
|
||||
ret i32 %X
|
||||
}
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
Sometimes it is better to codegen subtractions from a constant (e.g. 7-x) with
|
||||
a neg instead of a sub instruction. Consider:
|
||||
|
||||
@ -852,11 +780,6 @@ __Z11no_overflowjj:
|
||||
ret
|
||||
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
Re-materialize MOV32r0 etc. with xor instead of changing them to moves if the
|
||||
condition register is dead. xor reg reg is shorter than mov reg, #0.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
The following code:
|
||||
|
Loading…
x
Reference in New Issue
Block a user