mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-29 10:32:47 +00:00
87773c318f
Patch by Ana Pazos. - Completed implementation of instruction formats: AdvSIMD three same AdvSIMD modified immediate AdvSIMD scalar pairwise - Completed implementation of instruction classes (some of the instructions in these classes belong to yet unfinished instruction formats): Vector Arithmetic Vector Immediate Vector Pairwise Arithmetic - Initial implementation of instruction formats: AdvSIMD scalar two-reg misc AdvSIMD scalar three same - Intial implementation of instruction class: Scalar Arithmetic - Initial clang changes to support arm v8 intrinsics. Note: no clang changes for scalar intrinsics function name mangling yet. - Comprehensive test cases for added instructions To verify auto codegen, encoding, decoding, diagnosis, intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187567 91177308-0d34-0410-b5e6-96231b3b80d8
89 lines
2.7 KiB
LLVM
89 lines
2.7 KiB
LLVM
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
|
|
|
|
|
|
define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
|
|
%tmp1 = mul <8 x i8> %A, %B;
|
|
%tmp2 = add <8 x i8> %C, %tmp1;
|
|
ret <8 x i8> %tmp2
|
|
}
|
|
|
|
define <16 x i8> @mla16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
|
|
%tmp1 = mul <16 x i8> %A, %B;
|
|
%tmp2 = add <16 x i8> %C, %tmp1;
|
|
ret <16 x i8> %tmp2
|
|
}
|
|
|
|
define <4 x i16> @mla4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
|
|
%tmp1 = mul <4 x i16> %A, %B;
|
|
%tmp2 = add <4 x i16> %C, %tmp1;
|
|
ret <4 x i16> %tmp2
|
|
}
|
|
|
|
define <8 x i16> @mla8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
|
|
%tmp1 = mul <8 x i16> %A, %B;
|
|
%tmp2 = add <8 x i16> %C, %tmp1;
|
|
ret <8 x i16> %tmp2
|
|
}
|
|
|
|
define <2 x i32> @mla2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
|
|
%tmp1 = mul <2 x i32> %A, %B;
|
|
%tmp2 = add <2 x i32> %C, %tmp1;
|
|
ret <2 x i32> %tmp2
|
|
}
|
|
|
|
define <4 x i32> @mla4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
|
|
;CHECK: mla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
|
|
%tmp1 = mul <4 x i32> %A, %B;
|
|
%tmp2 = add <4 x i32> %C, %tmp1;
|
|
ret <4 x i32> %tmp2
|
|
}
|
|
|
|
define <8 x i8> @mls8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
|
|
%tmp1 = mul <8 x i8> %A, %B;
|
|
%tmp2 = sub <8 x i8> %C, %tmp1;
|
|
ret <8 x i8> %tmp2
|
|
}
|
|
|
|
define <16 x i8> @mls16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
|
|
%tmp1 = mul <16 x i8> %A, %B;
|
|
%tmp2 = sub <16 x i8> %C, %tmp1;
|
|
ret <16 x i8> %tmp2
|
|
}
|
|
|
|
define <4 x i16> @mls4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
|
|
%tmp1 = mul <4 x i16> %A, %B;
|
|
%tmp2 = sub <4 x i16> %C, %tmp1;
|
|
ret <4 x i16> %tmp2
|
|
}
|
|
|
|
define <8 x i16> @mls8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
|
|
%tmp1 = mul <8 x i16> %A, %B;
|
|
%tmp2 = sub <8 x i16> %C, %tmp1;
|
|
ret <8 x i16> %tmp2
|
|
}
|
|
|
|
define <2 x i32> @mls2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
|
|
%tmp1 = mul <2 x i32> %A, %B;
|
|
%tmp2 = sub <2 x i32> %C, %tmp1;
|
|
ret <2 x i32> %tmp2
|
|
}
|
|
|
|
define <4 x i32> @mls4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
|
|
;CHECK: mls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
|
|
%tmp1 = mul <4 x i32> %A, %B;
|
|
%tmp2 = sub <4 x i32> %C, %tmp1;
|
|
ret <4 x i32> %tmp2
|
|
}
|
|
|
|
|