mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
87773c318f
Patch by Ana Pazos. - Completed implementation of instruction formats: AdvSIMD three same AdvSIMD modified immediate AdvSIMD scalar pairwise - Completed implementation of instruction classes (some of the instructions in these classes belong to yet unfinished instruction formats): Vector Arithmetic Vector Immediate Vector Pairwise Arithmetic - Initial implementation of instruction formats: AdvSIMD scalar two-reg misc AdvSIMD scalar three same - Intial implementation of instruction class: Scalar Arithmetic - Initial clang changes to support arm v8 intrinsics. Note: no clang changes for scalar intrinsics function name mangling yet. - Comprehensive test cases for added instructions To verify auto codegen, encoding, decoding, diagnosis, intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187567 91177308-0d34-0410-b5e6-96231b3b80d8
275 lines
9.7 KiB
LLVM
275 lines
9.7 KiB
LLVM
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
|
|
|
|
declare <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8>, <8 x i8>)
|
|
declare <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8>, <8 x i8>)
|
|
|
|
define <8 x i8> @test_uqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
|
|
; CHECK: test_uqadd_v8i8:
|
|
%tmp1 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
|
|
; CHECK: uqadd v0.8b, v0.8b, v1.8b
|
|
ret <8 x i8> %tmp1
|
|
}
|
|
|
|
define <8 x i8> @test_sqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
|
|
; CHECK: test_sqadd_v8i8:
|
|
%tmp1 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
|
|
; CHECK: sqadd v0.8b, v0.8b, v1.8b
|
|
ret <8 x i8> %tmp1
|
|
}
|
|
|
|
declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>)
|
|
declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>)
|
|
|
|
define <16 x i8> @test_uqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
|
|
; CHECK: test_uqadd_v16i8:
|
|
%tmp1 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
|
|
; CHECK: uqadd v0.16b, v0.16b, v1.16b
|
|
ret <16 x i8> %tmp1
|
|
}
|
|
|
|
define <16 x i8> @test_sqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
|
|
; CHECK: test_sqadd_v16i8:
|
|
%tmp1 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
|
|
; CHECK: sqadd v0.16b, v0.16b, v1.16b
|
|
ret <16 x i8> %tmp1
|
|
}
|
|
|
|
declare <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16>, <4 x i16>)
|
|
declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
|
|
|
|
define <4 x i16> @test_uqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
|
|
; CHECK: test_uqadd_v4i16:
|
|
%tmp1 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
|
|
; CHECK: uqadd v0.4h, v0.4h, v1.4h
|
|
ret <4 x i16> %tmp1
|
|
}
|
|
|
|
define <4 x i16> @test_sqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
|
|
; CHECK: test_sqadd_v4i16:
|
|
%tmp1 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
|
|
; CHECK: sqadd v0.4h, v0.4h, v1.4h
|
|
ret <4 x i16> %tmp1
|
|
}
|
|
|
|
declare <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16>, <8 x i16>)
|
|
declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>)
|
|
|
|
define <8 x i16> @test_uqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
|
|
; CHECK: test_uqadd_v8i16:
|
|
%tmp1 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
|
|
; CHECK: uqadd v0.8h, v0.8h, v1.8h
|
|
ret <8 x i16> %tmp1
|
|
}
|
|
|
|
define <8 x i16> @test_sqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
|
|
; CHECK: test_sqadd_v8i16:
|
|
%tmp1 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
|
|
; CHECK: sqadd v0.8h, v0.8h, v1.8h
|
|
ret <8 x i16> %tmp1
|
|
}
|
|
|
|
declare <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32>, <2 x i32>)
|
|
declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>)
|
|
|
|
define <2 x i32> @test_uqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
|
|
; CHECK: test_uqadd_v2i32:
|
|
%tmp1 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
|
|
; CHECK: uqadd v0.2s, v0.2s, v1.2s
|
|
ret <2 x i32> %tmp1
|
|
}
|
|
|
|
define <2 x i32> @test_sqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
|
|
; CHECK: test_sqadd_v2i32:
|
|
%tmp1 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
|
|
; CHECK: sqadd v0.2s, v0.2s, v1.2s
|
|
ret <2 x i32> %tmp1
|
|
}
|
|
|
|
declare <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32>, <4 x i32>)
|
|
declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
|
|
|
|
define <4 x i32> @test_uqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
|
|
; CHECK: test_uqadd_v4i32:
|
|
%tmp1 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
|
|
; CHECK: uqadd v0.4s, v0.4s, v1.4s
|
|
ret <4 x i32> %tmp1
|
|
}
|
|
|
|
define <4 x i32> @test_sqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
|
|
; CHECK: test_sqadd_v4i32:
|
|
%tmp1 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
|
|
; CHECK: sqadd v0.4s, v0.4s, v1.4s
|
|
ret <4 x i32> %tmp1
|
|
}
|
|
|
|
declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>)
|
|
declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>)
|
|
|
|
define <1 x i64> @test_uqadd_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
|
|
; CHECK: test_uqadd_v1i64:
|
|
%tmp1 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
|
|
; CHECK: uqadd d0, d0, d1
|
|
ret <1 x i64> %tmp1
|
|
}
|
|
|
|
define <1 x i64> @test_sqadd_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
|
|
; CHECK: test_sqadd_v1i64:
|
|
%tmp1 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
|
|
; CHECK: sqadd d0, d0, d1
|
|
ret <1 x i64> %tmp1
|
|
}
|
|
|
|
declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>)
|
|
declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
|
|
|
|
define <2 x i64> @test_uqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
|
|
; CHECK: test_uqadd_v2i64:
|
|
%tmp1 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
|
|
; CHECK: uqadd v0.2d, v0.2d, v1.2d
|
|
ret <2 x i64> %tmp1
|
|
}
|
|
|
|
define <2 x i64> @test_sqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
|
|
; CHECK: test_sqadd_v2i64:
|
|
%tmp1 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
|
|
; CHECK: sqadd v0.2d, v0.2d, v1.2d
|
|
ret <2 x i64> %tmp1
|
|
}
|
|
|
|
declare <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8>, <8 x i8>)
|
|
declare <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8>, <8 x i8>)
|
|
|
|
define <8 x i8> @test_uqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
|
|
; CHECK: test_uqsub_v8i8:
|
|
%tmp1 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
|
|
; CHECK: uqsub v0.8b, v0.8b, v1.8b
|
|
ret <8 x i8> %tmp1
|
|
}
|
|
|
|
define <8 x i8> @test_sqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
|
|
; CHECK: test_sqsub_v8i8:
|
|
%tmp1 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
|
|
; CHECK: sqsub v0.8b, v0.8b, v1.8b
|
|
ret <8 x i8> %tmp1
|
|
}
|
|
|
|
declare <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8>, <16 x i8>)
|
|
declare <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8>, <16 x i8>)
|
|
|
|
define <16 x i8> @test_uqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
|
|
; CHECK: test_uqsub_v16i8:
|
|
%tmp1 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
|
|
; CHECK: uqsub v0.16b, v0.16b, v1.16b
|
|
ret <16 x i8> %tmp1
|
|
}
|
|
|
|
define <16 x i8> @test_sqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
|
|
; CHECK: test_sqsub_v16i8:
|
|
%tmp1 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
|
|
; CHECK: sqsub v0.16b, v0.16b, v1.16b
|
|
ret <16 x i8> %tmp1
|
|
}
|
|
|
|
declare <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16>, <4 x i16>)
|
|
declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
|
|
|
|
define <4 x i16> @test_uqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
|
|
; CHECK: test_uqsub_v4i16:
|
|
%tmp1 = call <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
|
|
; CHECK: uqsub v0.4h, v0.4h, v1.4h
|
|
ret <4 x i16> %tmp1
|
|
}
|
|
|
|
define <4 x i16> @test_sqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
|
|
; CHECK: test_sqsub_v4i16:
|
|
%tmp1 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
|
|
; CHECK: sqsub v0.4h, v0.4h, v1.4h
|
|
ret <4 x i16> %tmp1
|
|
}
|
|
|
|
declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>)
|
|
declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>)
|
|
|
|
define <8 x i16> @test_uqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
|
|
; CHECK: test_uqsub_v8i16:
|
|
%tmp1 = call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
|
|
; CHECK: uqsub v0.8h, v0.8h, v1.8h
|
|
ret <8 x i16> %tmp1
|
|
}
|
|
|
|
define <8 x i16> @test_sqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
|
|
; CHECK: test_sqsub_v8i16:
|
|
%tmp1 = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
|
|
; CHECK: sqsub v0.8h, v0.8h, v1.8h
|
|
ret <8 x i16> %tmp1
|
|
}
|
|
|
|
declare <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32>, <2 x i32>)
|
|
declare <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32>, <2 x i32>)
|
|
|
|
define <2 x i32> @test_uqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
|
|
; CHECK: test_uqsub_v2i32:
|
|
%tmp1 = call <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
|
|
; CHECK: uqsub v0.2s, v0.2s, v1.2s
|
|
ret <2 x i32> %tmp1
|
|
}
|
|
|
|
define <2 x i32> @test_sqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
|
|
; CHECK: test_sqsub_v2i32:
|
|
%tmp1 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
|
|
; CHECK: sqsub v0.2s, v0.2s, v1.2s
|
|
ret <2 x i32> %tmp1
|
|
}
|
|
|
|
declare <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32>, <4 x i32>)
|
|
declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
|
|
|
|
define <4 x i32> @test_uqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
|
|
; CHECK: test_uqsub_v4i32:
|
|
%tmp1 = call <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
|
|
; CHECK: uqsub v0.4s, v0.4s, v1.4s
|
|
ret <4 x i32> %tmp1
|
|
}
|
|
|
|
define <4 x i32> @test_sqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
|
|
; CHECK: test_sqsub_v4i32:
|
|
%tmp1 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
|
|
; CHECK: sqsub v0.4s, v0.4s, v1.4s
|
|
ret <4 x i32> %tmp1
|
|
}
|
|
|
|
declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>)
|
|
declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
|
|
|
|
define <2 x i64> @test_uqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
|
|
; CHECK: test_uqsub_v2i64:
|
|
%tmp1 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
|
|
; CHECK: uqsub v0.2d, v0.2d, v1.2d
|
|
ret <2 x i64> %tmp1
|
|
}
|
|
|
|
define <2 x i64> @test_sqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
|
|
; CHECK: test_sqsub_v2i64:
|
|
%tmp1 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
|
|
; CHECK: sqsub v0.2d, v0.2d, v1.2d
|
|
ret <2 x i64> %tmp1
|
|
}
|
|
|
|
declare <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64>, <1 x i64>)
|
|
declare <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64>, <1 x i64>)
|
|
|
|
define <1 x i64> @test_uqsub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
|
|
; CHECK: test_uqsub_v1i64:
|
|
%tmp1 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
|
|
; CHECK: uqsub d0, d0, d1
|
|
ret <1 x i64> %tmp1
|
|
}
|
|
|
|
define <1 x i64> @test_sqsub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
|
|
; CHECK: test_sqsub_v1i64:
|
|
%tmp1 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
|
|
; CHECK: sqsub d0, d0, d1
|
|
ret <1 x i64> %tmp1
|
|
}
|
|
|