2009-11-26 23:32:59 +00:00
|
|
|
; RUN: opt < %s -gvn -S | FileCheck %s
|
|
|
|
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
|
|
|
target triple = "i386-apple-darwin7"
|
|
|
|
|
2009-11-26 23:32:59 +00:00
|
|
|
define i32 @test1(i32* %b, i32* %c) nounwind {
|
2009-11-26 23:41:07 +00:00
|
|
|
; CHECK: @test1
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
entry:
|
2009-11-26 23:32:59 +00:00
|
|
|
%g = alloca i32
|
|
|
|
%t1 = icmp eq i32* %b, null
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
br i1 %t1, label %bb, label %bb1
|
|
|
|
|
2009-11-26 23:32:59 +00:00
|
|
|
bb:
|
|
|
|
%t2 = load i32* %c, align 4
|
|
|
|
%t3 = add i32 %t2, 1
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
store i32 %t3, i32* %g, align 4
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb1: ; preds = %entry
|
2009-11-26 23:32:59 +00:00
|
|
|
%t5 = load i32* %b, align 4
|
|
|
|
%t6 = add i32 %t5, 1
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
store i32 %t6, i32* %g, align 4
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2: ; preds = %bb1, %bb
|
2009-11-26 23:32:59 +00:00
|
|
|
%c_addr.0 = phi i32* [ %g, %bb1 ], [ %c, %bb ]
|
|
|
|
%b_addr.0 = phi i32* [ %b, %bb1 ], [ %g, %bb ]
|
|
|
|
%cv = load i32* %c_addr.0, align 4
|
|
|
|
%bv = load i32* %b_addr.0, align 4
|
|
|
|
; CHECK: %bv = phi i32
|
|
|
|
; CHECK: %cv = phi i32
|
|
|
|
; CHECK-NOT: load
|
|
|
|
; CHECK: ret i32
|
|
|
|
%ret = add i32 %cv, %bv
|
Implement initial support for PHI translation in memdep. This means that
memdep keeps track of how PHIs affect the pointer in dep queries, which
allows it to eliminate the load in cases like rle-phi-translate.ll, which
basically end up being:
BB1:
X = load P
br BB3
BB2:
Y = load Q
br BB3
BB3:
R = phi [P] [Q]
load R
turning "load R" into a phi of X/Y. In addition to additional exposed
opportunities, this makes memdep safe in many cases that it wasn't before
(which is required for load PRE) and also makes it substantially more
efficient. For example, consider:
bb1: // has many predecessors.
P = some_operator()
load P
In this example, previously memdep would scan all the predecessors of BB1
to see if they had something that would mustalias P. In some cases (e.g.
test/Transforms/GVN/rle-must-alias.ll) it would actually find them and end
up eliminating something. In many other cases though, it would scan and not
find anything useful. MemDep now stops at a block if the pointer is defined
in that block and cannot be phi translated to predecessors. This causes it
to miss the (rare) cases like rle-must-alias.ll, but makes it faster by not
scanning tons of stuff that is unlikely to be useful. For example, this
speeds up GVN as a whole from 3.928s to 2.448s (60%)!. IMO, scalar GVN
should be enhanced to simplify the rle-must-alias pointer base anyway, which
would allow the loads to be eliminated.
In the future, this should be enhanced to phi translate through geps and
bitcasts as well (as indicated by FIXMEs) making memdep even more powerful.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61022 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-15 03:35:32 +00:00
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
2009-11-26 23:41:07 +00:00
|
|
|
define i8 @test2(i1 %cond, i32* %b, i32* %c) nounwind {
|
|
|
|
; CHECK: @test2
|
|
|
|
entry:
|
|
|
|
br i1 %cond, label %bb, label %bb1
|
|
|
|
|
|
|
|
bb:
|
|
|
|
%b1 = bitcast i32* %b to i8*
|
|
|
|
store i8 4, i8* %b1
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
%c1 = bitcast i32* %c to i8*
|
|
|
|
store i8 92, i8* %c1
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
|
|
|
|
%d1 = bitcast i32* %d to i8*
|
|
|
|
%dv = load i8* %d1
|
2009-11-27 00:07:37 +00:00
|
|
|
; CHECK: %dv = phi i8 [ 92, %bb1 ], [ 4, %bb ]
|
2009-11-26 23:41:07 +00:00
|
|
|
; CHECK-NOT: load
|
|
|
|
; CHECK: ret i8 %dv
|
|
|
|
ret i8 %dv
|
|
|
|
}
|
|
|
|
|
2009-11-27 00:07:37 +00:00
|
|
|
define i32 @test3(i1 %cond, i32* %b, i32* %c) nounwind {
|
|
|
|
; CHECK: @test3
|
|
|
|
entry:
|
|
|
|
br i1 %cond, label %bb, label %bb1
|
|
|
|
|
|
|
|
bb:
|
|
|
|
%b1 = getelementptr i32* %b, i32 17
|
|
|
|
store i32 4, i32* %b1
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
%c1 = getelementptr i32* %c, i32 7
|
|
|
|
store i32 82, i32* %c1
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
|
|
|
|
%i = phi i32 [ 7, %bb1 ], [ 17, %bb ]
|
|
|
|
%d1 = getelementptr i32* %d, i32 %i
|
|
|
|
%dv = load i32* %d1
|
2009-11-27 06:31:14 +00:00
|
|
|
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
|
|
|
|
; CHECK-NOT: load
|
|
|
|
; CHECK: ret i32 %dv
|
2009-11-27 00:07:37 +00:00
|
|
|
ret i32 %dv
|
|
|
|
}
|
|
|
|
|
teach phi translation of GEPs to simplify geps like 'gep x, 0'.
This allows us to compile the example from PR5313 into:
LBB1_2: ## %bb
incl %ecx
movb %al, (%rsi)
movslq %ecx, %rax
movb (%rdi,%rax), %al
testb %al, %al
jne LBB1_2
instead of:
LBB1_2: ## %bb
movslq %eax, %rcx
incl %eax
movb (%rdi,%rcx), %cl
movb %cl, (%rsi)
movslq %eax, %rcx
cmpb $0, (%rdi,%rcx)
jne LBB1_2
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@89981 91177308-0d34-0410-b5e6-96231b3b80d8
2009-11-27 00:34:38 +00:00
|
|
|
; PR5313
|
|
|
|
define i32 @test4(i1 %cond, i32* %b, i32* %c) nounwind {
|
|
|
|
; CHECK: @test4
|
|
|
|
entry:
|
|
|
|
br i1 %cond, label %bb, label %bb1
|
|
|
|
|
|
|
|
bb:
|
|
|
|
store i32 4, i32* %b
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb1:
|
|
|
|
%c1 = getelementptr i32* %c, i32 7
|
|
|
|
store i32 82, i32* %c1
|
|
|
|
br label %bb2
|
|
|
|
|
|
|
|
bb2:
|
|
|
|
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
|
|
|
|
%i = phi i32 [ 7, %bb1 ], [ 0, %bb ]
|
|
|
|
%d1 = getelementptr i32* %d, i32 %i
|
|
|
|
%dv = load i32* %d1
|
2009-11-27 06:31:14 +00:00
|
|
|
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
|
|
|
|
; CHECK-NOT: load
|
|
|
|
; CHECK: ret i32 %dv
|
teach phi translation of GEPs to simplify geps like 'gep x, 0'.
This allows us to compile the example from PR5313 into:
LBB1_2: ## %bb
incl %ecx
movb %al, (%rsi)
movslq %ecx, %rax
movb (%rdi,%rax), %al
testb %al, %al
jne LBB1_2
instead of:
LBB1_2: ## %bb
movslq %eax, %rcx
incl %eax
movb (%rdi,%rcx), %cl
movb %cl, (%rsi)
movslq %eax, %rcx
cmpb $0, (%rdi,%rcx)
jne LBB1_2
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@89981 91177308-0d34-0410-b5e6-96231b3b80d8
2009-11-27 00:34:38 +00:00
|
|
|
ret i32 %dv
|
|
|
|
}
|
|
|
|
|