mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-02 07:11:49 +00:00
2e0d5f8432
be able to handle *ANY* alloca that is poked by loads and stores of bitcasts and GEPs with constant offsets. Before the code had a number of annoying limitations and caused it to miss cases such as storing into holes in structs and complex casts (as in bitfield-sroa) where we had unions of bitfields etc. This also handles a number of important cases that are exposed due to the ABI lowering stuff we do to pass stuff by value. One case that is pretty great is that we compile 2006-11-07-InvalidArrayPromote.ll into: define i32 @func(<4 x float> %v0, <4 x float> %v1) nounwind { %tmp10 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %v1) %tmp105 = bitcast <4 x i32> %tmp10 to i128 %tmp1056 = zext i128 %tmp105 to i256 %tmp.upgrd.43 = lshr i256 %tmp1056, 96 %tmp.upgrd.44 = trunc i256 %tmp.upgrd.43 to i32 ret i32 %tmp.upgrd.44 } which turns into: _func: subl $28, %esp cvttps2dq %xmm1, %xmm0 movaps %xmm0, (%esp) movl 12(%esp), %eax addl $28, %esp ret Which is pretty good code all things considering :). One effect of this is that SROA will start generating arbitrary bitwidth integers that are a multiple of 8 bits. In the case above, we got a 256 bit integer, but the codegen guys assure me that it can handle the simple and/or/shift/zext stuff that we're doing on these operations. This addresses rdar://6532315 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63469 91177308-0d34-0410-b5e6-96231b3b80d8
17 lines
517 B
LLVM
17 lines
517 B
LLVM
; RUN: llvm-as < %s | opt -scalarrepl | llvm-dis | not grep alloca
|
|
; rdar://6532315
|
|
%t = type { { i32, i16, i8, i8 } }
|
|
|
|
define i8 @foo(i64 %A) {
|
|
%ALL = alloca %t, align 8
|
|
%tmp59172 = bitcast %t* %ALL to i64*
|
|
store i64 %A, i64* %tmp59172, align 8
|
|
%C = getelementptr %t* %ALL, i32 0, i32 0, i32 1
|
|
%D = bitcast i16* %C to i32*
|
|
%E = load i32* %D, align 4
|
|
%F = bitcast %t* %ALL to i8*
|
|
%G = load i8* %F, align 8
|
|
ret i8 %G
|
|
}
|
|
|