mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-17 03:07:06 +00:00
10c0d9179e
getelementptr-seteq.ll into: define i1 @test(i64 %X, %S* %P) { %C = icmp eq i64 %X, -1 ; <i1> [#uses=1] ret i1 %C } instead of: define i1 @test(i64 %X, %S* %P) { %A.idx.mask = and i64 %X, 4611686018427387903 ; <i64> [#uses=1] %C = icmp eq i64 %A.idx.mask, 4611686018427387903 ; <i1> [#uses=1] ret i1 %C } And fixes the second half of PR2235. This speeds up the insertion sort case by 45%, from 1.12s to 0.77s. In practice, this will significantly speed up for loops structured like: for (double *P = Base + N; P != Base; --P) ... Which happens frequently for C++ iterators. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@50079 91177308-0d34-0410-b5e6-96231b3b80d8
14 lines
353 B
LLVM
14 lines
353 B
LLVM
; Test folding of constantexpr geps into normal geps.
|
|
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {icmp eq i64 %X, -1}
|
|
; PR2235
|
|
|
|
%S = type { i32, [ 100 x i32] }
|
|
|
|
define i1 @test(i64 %X, %S* %P) {
|
|
%A = getelementptr %S* %P, i32 0, i32 1, i64 %X
|
|
%B = getelementptr %S* %P, i32 0, i32 0
|
|
%C = icmp eq i32* %A, %B
|
|
ret i1 %C
|
|
}
|
|
|