mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
b76f5ba103
Currently the VSX support enables use of lxvd2x and stxvd2x for 2x64 types, but does not yet use lxvw4x and stxvw4x for 4x32 types. This patch adds that support. As with lxvd2x/stxvd2x, this involves straightforward overriding of the patterns normally recognized for lvx/stvx, with preference given to the VSX patterns when VSX is enabled. In addition, the logic for permitting misaligned memory accesses is modified so that v4r32 and v4i32 are treated the same as v2f64 and v2i64 when VSX is enabled. Finally, the DAG generation for unaligned loads is changed to just use a normal LOAD (which will become lxvw4x) on P8 and later hardware, where unaligned loads are preferred over lvsl/lvx/lvx/vperm. A number of tests now generate the VSX loads/stores instead of lvx/stvx, so this patch adds VSX variants to those tests. I've also added <4 x float> tests to the vsx.ll test case, and created a vsx-p8.ll test case to be used for testing code generation for the P8Vector feature. For now, that simply tests the unaligned load/store behavior. This has been tested along with a temporary patch to enable the VSX and P8Vector features, with no new regressions encountered with or without the temporary patch applied. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@220047 91177308-0d34-0410-b5e6-96231b3b80d8
46 lines
1.9 KiB
LLVM
46 lines
1.9 KiB
LLVM
; RUN: llc < %s -march=ppc32 -mcpu=g5 | FileCheck %s
|
|
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mattr=-power8-vector | FileCheck %s
|
|
; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s -check-prefix=CHECK-LE
|
|
|
|
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
|
|
target triple = "powerpc-apple-darwin8"
|
|
%struct.S2203 = type { %struct.u16qi }
|
|
%struct.u16qi = type { <16 x i8> }
|
|
@s = weak global %struct.S2203 zeroinitializer ; <%struct.S2203*> [#uses=1]
|
|
|
|
define void @foo(i32 %x, ...) {
|
|
entry:
|
|
; CHECK: foo:
|
|
; CHECK-LE: foo:
|
|
%x_addr = alloca i32 ; <i32*> [#uses=1]
|
|
%ap = alloca i8* ; <i8**> [#uses=3]
|
|
%ap.0 = alloca i8* ; <i8**> [#uses=3]
|
|
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
|
|
store i32 %x, i32* %x_addr
|
|
%ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
|
|
call void @llvm.va_start( i8* %ap1 )
|
|
%tmp = load i8** %ap, align 4 ; <i8*> [#uses=1]
|
|
store i8* %tmp, i8** %ap.0, align 4
|
|
%tmp2 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
|
|
%tmp3 = getelementptr i8* %tmp2, i64 16 ; <i8*> [#uses=1]
|
|
store i8* %tmp3, i8** %ap, align 4
|
|
%tmp4 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
|
|
%tmp45 = bitcast i8* %tmp4 to %struct.S2203* ; <%struct.S2203*> [#uses=1]
|
|
%tmp6 = getelementptr %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
|
|
%tmp7 = getelementptr %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
|
|
%tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
|
|
%tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
|
|
%tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
|
|
; CHECK: lvsl
|
|
; CHECK: vperm
|
|
; CHECK-LE: lvsr
|
|
; CHECK-LE: vperm
|
|
store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
|
|
br label %return
|
|
|
|
return: ; preds = %entry
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.va_start(i8*) nounwind
|