mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-24 23:28:41 +00:00
Turn a memcpy from a double* into a load/store of double instead of
a load/store of i64. The later prevents promotion/scalarrepl of the source and dest in many cases. This fixes the 300% performance regression of the byval stuff on stepanov_v1p2. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45945 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
14
test/Transforms/InstCombine/memcpy-to-load.ll
Normal file
14
test/Transforms/InstCombine/memcpy-to-load.ll
Normal file
@@ -0,0 +1,14 @@
|
||||
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {load double}
|
||||
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
||||
target triple = "i686-apple-darwin8"
|
||||
|
||||
define void @foo(double* %X, double* %Y) {
|
||||
entry:
|
||||
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
|
||||
%tmp2 = bitcast double* %X to i8* ; <i8*> [#uses=1]
|
||||
%tmp13 = bitcast double* %Y to i8* ; <i8*> [#uses=1]
|
||||
call void @llvm.memcpy.i32( i8* %tmp2, i8* %tmp13, i32 8, i32 1 )
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
|
Reference in New Issue
Block a user