mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-19 01:13:25 +00:00
2efbbb38ba
optimize addressing modes. This allows us to optimize things like isel-sink2.ll into: movl 4(%esp), %eax cmpb $0, 4(%eax) jne LBB1_2 ## F LBB1_1: ## TB movl $4, %eax ret LBB1_2: ## F movzbl 7(%eax), %eax ret instead of: _test: movl 4(%esp), %eax cmpb $0, 4(%eax) leal 4(%eax), %eax jne LBB1_2 ## F LBB1_1: ## TB movl $4, %eax ret LBB1_2: ## F movzbl 3(%eax), %eax ret This shrinks (e.g.) 403.gcc from 1133510 to 1128345 lines of .s. Note that the 2008-10-16-SpillerBug.ll testcase is dubious at best, I doubt it is really testing what it thinks it is. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60068 91177308-0d34-0410-b5e6-96231b3b80d8
17 lines
378 B
LLVM
17 lines
378 B
LLVM
; RUN: llvm-as < %s | llc -march=x86 | grep {movzbl.7(%...)}
|
|
; RUN: llvm-as < %s | llc -march=x86 | not grep leal
|
|
|
|
define i8 @test(i32 *%P) nounwind {
|
|
%Q = getelementptr i32* %P, i32 1
|
|
%R = bitcast i32* %Q to i8*
|
|
%S = load i8* %R
|
|
%T = icmp eq i8 %S, 0
|
|
br i1 %T, label %TB, label %F
|
|
TB:
|
|
ret i8 4
|
|
F:
|
|
%U = getelementptr i8* %R, i32 3
|
|
%V = load i8* %U
|
|
ret i8 %V
|
|
}
|