mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
3ebb05d9a6
type's bitwidth matches the (allocated) size of the alloca. This severely pessimizes vector scalar replacement when the only vector type being used is something like <3 x float> on x86 or ARM whose allocated size matches a <4 x float>. I hope to fix some of the flawed assumptions about allocated size throughout scalar replacement and reenable this in most cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@133338 91177308-0d34-0410-b5e6-96231b3b80d8
38 lines
1.3 KiB
LLVM
38 lines
1.3 KiB
LLVM
; RUN: opt < %s -scalarrepl -S | FileCheck %s
|
|
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
|
|
target triple = "thumbv7-apple-darwin10"
|
|
|
|
; CHECK: f
|
|
; CHECK-NOT: alloca
|
|
; CHECK: %[[A:[a-z0-9]*]] = and i128 undef, -16777216
|
|
; CHECK: %[[B:[a-z0-9]*]] = bitcast i128 %[[A]] to <4 x float>
|
|
; CHECK: %[[C:[a-z0-9]*]] = extractelement <4 x float> %[[B]], i32 0
|
|
; CHECK: ret float %[[C]]
|
|
|
|
define float @f() nounwind ssp {
|
|
entry:
|
|
%a = alloca <4 x float>, align 16
|
|
%p = bitcast <4 x float>* %a to i8*
|
|
call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 3, i32 16, i1 false)
|
|
%vec = load <4 x float>* %a, align 8
|
|
%val = extractelement <4 x float> %vec, i32 0
|
|
ret float %val
|
|
}
|
|
|
|
; CHECK: g
|
|
; CHECK-NOT: alloca
|
|
; CHECK: and i128
|
|
|
|
define void @g() nounwind ssp {
|
|
entry:
|
|
%a = alloca { <4 x float> }, align 16
|
|
%p = bitcast { <4 x float> }* %a to i8*
|
|
call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 16, i32 16, i1 false)
|
|
%q = bitcast { <4 x float> }* %a to [2 x <2 x float>]*
|
|
%arrayidx = getelementptr inbounds [2 x <2 x float>]* %q, i32 0, i32 0
|
|
store <2 x float> undef, <2 x float>* %arrayidx, align 8
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
|