mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-18 10:31:57 +00:00
7eacad03ef
Vectors were being manually scalarized by the backend. Instead, let the target-independent code do all of the work. The manual scalarization was from a time before good target-independent support for scalarization in LLVM. However, this forces us to specially-handle vector loads and stores, which we can turn into PTX instructions that produce/consume multiple operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174968 91177308-0d34-0410-b5e6-96231b3b80d8
67 lines
2.0 KiB
LLVM
67 lines
2.0 KiB
LLVM
; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
|
|
|
|
; Even though general vector types are not supported in PTX, we can still
|
|
; optimize loads/stores with pseudo-vector instructions of the form:
|
|
;
|
|
; ld.v2.f32 {%f0, %f1}, [%r0]
|
|
;
|
|
; which will load two floats at once into scalar registers.
|
|
|
|
define void @foo(<2 x float>* %a) {
|
|
; CHECK: .func foo
|
|
; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
%t1 = load <2 x float>* %a
|
|
%t2 = fmul <2 x float> %t1, %t1
|
|
store <2 x float> %t2, <2 x float>* %a
|
|
ret void
|
|
}
|
|
|
|
define void @foo2(<4 x float>* %a) {
|
|
; CHECK: .func foo2
|
|
; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
%t1 = load <4 x float>* %a
|
|
%t2 = fmul <4 x float> %t1, %t1
|
|
store <4 x float> %t2, <4 x float>* %a
|
|
ret void
|
|
}
|
|
|
|
define void @foo3(<8 x float>* %a) {
|
|
; CHECK: .func foo3
|
|
; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}+16];
|
|
%t1 = load <8 x float>* %a
|
|
%t2 = fmul <8 x float> %t1, %t1
|
|
store <8 x float> %t2, <8 x float>* %a
|
|
ret void
|
|
}
|
|
|
|
|
|
|
|
define void @foo4(<2 x i32>* %a) {
|
|
; CHECK: .func foo4
|
|
; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
%t1 = load <2 x i32>* %a
|
|
%t2 = mul <2 x i32> %t1, %t1
|
|
store <2 x i32> %t2, <2 x i32>* %a
|
|
ret void
|
|
}
|
|
|
|
define void @foo5(<4 x i32>* %a) {
|
|
; CHECK: .func foo5
|
|
; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
%t1 = load <4 x i32>* %a
|
|
%t2 = mul <4 x i32> %t1, %t1
|
|
store <4 x i32> %t2, <4 x i32>* %a
|
|
ret void
|
|
}
|
|
|
|
define void @foo6(<8 x i32>* %a) {
|
|
; CHECK: .func foo6
|
|
; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
|
|
; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}+16];
|
|
%t1 = load <8 x i32>* %a
|
|
%t2 = mul <8 x i32> %t1, %t1
|
|
store <8 x i32> %t2, <8 x i32>* %a
|
|
ret void
|
|
}
|