AArch64/ARM64: port some NEON tests to ARM64

These ones used completely different sets of intrinsics, so the only way to do
it is create a separate ARM64 copy and change them all.

Other than that, CodeGen was straightforward, no deficiencies detected here.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206392 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-04-16 15:28:02 +00:00
parent 9a8aff0062
commit f539725734
15 changed files with 6064 additions and 1 deletions

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
; arm64 has copied test in its directory due to differing intrinsics.
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
; arm64 has copied test in its directory due to differing intrinsics.
declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has its own copy of this test in its directory.
declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>)

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
; arm64 has copied test in its own directory (different intrinsic names).
declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has copied test in its own directory.
declare float @llvm.aarch64.neon.vminnmv(<4 x float>)

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
; arm64 has a copy of this test in its own directory.
declare <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has its own copy of this test
define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s
; From <8 x i8>

View File

@ -0,0 +1,341 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
declare <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>)
declare <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
declare <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
declare <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
declare <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16>, <4 x i16>)
declare <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16>, <4 x i16>)
define <4 x i32> @test_vmull_high_n_s16(<8 x i16> %a, i16 %b) {
; CHECK-LABEL: test_vmull_high_n_s16:
; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
%vmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
ret <4 x i32> %vmull15.i.i
}
define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: test_vmull_high_n_s32:
; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
%vmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
ret <2 x i64> %vmull9.i.i
}
define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) {
; CHECK-LABEL: test_vmull_high_n_u16:
; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
%vmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
ret <4 x i32> %vmull15.i.i
}
define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: test_vmull_high_n_u32:
; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
%vmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
ret <2 x i64> %vmull9.i.i
}
define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) {
; CHECK-LABEL: test_vqdmull_high_n_s16:
; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
%vqdmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
ret <4 x i32> %vqdmull15.i.i
}
define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: test_vqdmull_high_n_s32:
; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
%vqdmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
ret <2 x i64> %vqdmull9.i.i
}
define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vmlal_high_n_s16:
; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%add.i.i = add <4 x i32> %vmull2.i.i.i, %a
ret <4 x i32> %add.i.i
}
define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vmlal_high_n_s32:
; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%add.i.i = add <2 x i64> %vmull2.i.i.i, %a
ret <2 x i64> %add.i.i
}
define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vmlal_high_n_u16:
; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0
; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%add.i.i = add <4 x i32> %vmull2.i.i.i, %a
ret <4 x i32> %add.i.i
}
define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vmlal_high_n_u32:
; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0
; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%add.i.i = add <2 x i64> %vmull2.i.i.i, %a
ret <2 x i64> %add.i.i
}
define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vqdmlal_high_n_s16:
; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vqdmlal15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%vqdmlal17.i.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i)
ret <4 x i32> %vqdmlal17.i.i
}
define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vqdmlal_high_n_s32:
; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vqdmlal9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%vqdmlal11.i.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i)
ret <2 x i64> %vqdmlal11.i.i
}
define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vmlsl_high_n_s16:
; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
ret <4 x i32> %sub.i.i
}
define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vmlsl_high_n_s32:
; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
ret <2 x i64> %sub.i.i
}
define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vmlsl_high_n_u16:
; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
ret <4 x i32> %sub.i.i
}
define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vmlsl_high_n_u32:
; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
ret <2 x i64> %sub.i.i
}
define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
; CHECK-LABEL: test_vqdmlsl_high_n_s16:
; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
entry:
%shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
%vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
%vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
%vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
%vqdmlsl15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
%vqdmlsl17.i.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i)
ret <4 x i32> %vqdmlsl17.i.i
}
define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
; CHECK-LABEL: test_vqdmlsl_high_n_s32:
; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
entry:
%shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
%vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
%vqdmlsl9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
%vqdmlsl11.i.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
ret <2 x i64> %vqdmlsl11.i.i
}
define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) {
; CHECK-LABEL: test_vmul_n_f32:
; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
entry:
%vecinit.i = insertelement <2 x float> undef, float %b, i32 0
%vecinit1.i = insertelement <2 x float> %vecinit.i, float %b, i32 1
%mul.i = fmul <2 x float> %vecinit1.i, %a
ret <2 x float> %mul.i
}
define <4 x float> @test_vmulq_n_f32(<4 x float> %a, float %b) {
; CHECK-LABEL: test_vmulq_n_f32:
; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
entry:
%vecinit.i = insertelement <4 x float> undef, float %b, i32 0
%vecinit1.i = insertelement <4 x float> %vecinit.i, float %b, i32 1
%vecinit2.i = insertelement <4 x float> %vecinit1.i, float %b, i32 2
%vecinit3.i = insertelement <4 x float> %vecinit2.i, float %b, i32 3
%mul.i = fmul <4 x float> %vecinit3.i, %a
ret <4 x float> %mul.i
}
define <2 x double> @test_vmulq_n_f64(<2 x double> %a, double %b) {
; CHECK-LABEL: test_vmulq_n_f64:
; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
entry:
%vecinit.i = insertelement <2 x double> undef, double %b, i32 0
%vecinit1.i = insertelement <2 x double> %vecinit.i, double %b, i32 1
%mul.i = fmul <2 x double> %vecinit1.i, %a
ret <2 x double> %mul.i
}
define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
; CHECK-LABEL: test_vfma_n_f32:
; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
entry:
%vecinit.i = insertelement <2 x float> undef, float %n, i32 0
%vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
%0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %b, <2 x float> %vecinit1.i, <2 x float> %a)
ret <2 x float> %0
}
define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
; CHECK-LABEL: test_vfmaq_n_f32:
; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
entry:
%vecinit.i = insertelement <4 x float> undef, float %n, i32 0
%vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
%vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
%vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
%0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %vecinit3.i, <4 x float> %a)
ret <4 x float> %0
}
define <2 x float> @test_vfms_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
; CHECK-LABEL: test_vfms_n_f32:
; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
entry:
%vecinit.i = insertelement <2 x float> undef, float %n, i32 0
%vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
%0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %b
%1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %vecinit1.i, <2 x float> %a)
ret <2 x float> %1
}
define <4 x float> @test_vfmsq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
; CHECK-LABEL: test_vfmsq_n_f32:
; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
entry:
%vecinit.i = insertelement <4 x float> undef, float %n, i32 0
%vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
%vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
%vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
%0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
%1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %vecinit3.i, <4 x float> %a)
ret <4 x float> %1
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,236 @@
; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
declare <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @test_uabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
; CHECK: test_uabd_v8i8:
%abd = call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
; CHECK: uabd v0.8b, v0.8b, v1.8b
ret <8 x i8> %abd
}
define <8 x i8> @test_uaba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
; CHECK: test_uaba_v8i8:
%abd = call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
%aba = add <8 x i8> %lhs, %abd
; CHECK: uaba v0.8b, v0.8b, v1.8b
ret <8 x i8> %aba
}
define <8 x i8> @test_sabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
; CHECK: test_sabd_v8i8:
%abd = call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
; CHECK: sabd v0.8b, v0.8b, v1.8b
ret <8 x i8> %abd
}
define <8 x i8> @test_saba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
; CHECK: test_saba_v8i8:
%abd = call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
%aba = add <8 x i8> %lhs, %abd
; CHECK: saba v0.8b, v0.8b, v1.8b
ret <8 x i8> %aba
}
declare <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8>, <16 x i8>)
declare <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @test_uabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
; CHECK: test_uabd_v16i8:
%abd = call <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
; CHECK: uabd v0.16b, v0.16b, v1.16b
ret <16 x i8> %abd
}
define <16 x i8> @test_uaba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
; CHECK: test_uaba_v16i8:
%abd = call <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
%aba = add <16 x i8> %lhs, %abd
; CHECK: uaba v0.16b, v0.16b, v1.16b
ret <16 x i8> %aba
}
define <16 x i8> @test_sabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
; CHECK: test_sabd_v16i8:
%abd = call <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
; CHECK: sabd v0.16b, v0.16b, v1.16b
ret <16 x i8> %abd
}
define <16 x i8> @test_saba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
; CHECK: test_saba_v16i8:
%abd = call <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
%aba = add <16 x i8> %lhs, %abd
; CHECK: saba v0.16b, v0.16b, v1.16b
ret <16 x i8> %aba
}
declare <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @test_uabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
; CHECK: test_uabd_v4i16:
%abd = call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
; CHECK: uabd v0.4h, v0.4h, v1.4h
ret <4 x i16> %abd
}
define <4 x i16> @test_uaba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
; CHECK: test_uaba_v4i16:
%abd = call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
%aba = add <4 x i16> %lhs, %abd
; CHECK: uaba v0.4h, v0.4h, v1.4h
ret <4 x i16> %aba
}
define <4 x i16> @test_sabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
; CHECK: test_sabd_v4i16:
%abd = call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
; CHECK: sabd v0.4h, v0.4h, v1.4h
ret <4 x i16> %abd
}
define <4 x i16> @test_saba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
; CHECK: test_saba_v4i16:
%abd = call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
%aba = add <4 x i16> %lhs, %abd
; CHECK: saba v0.4h, v0.4h, v1.4h
ret <4 x i16> %aba
}
declare <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16>, <8 x i16>)
declare <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @test_uabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
; CHECK: test_uabd_v8i16:
%abd = call <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
; CHECK: uabd v0.8h, v0.8h, v1.8h
ret <8 x i16> %abd
}
define <8 x i16> @test_uaba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
; CHECK: test_uaba_v8i16:
%abd = call <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
%aba = add <8 x i16> %lhs, %abd
; CHECK: uaba v0.8h, v0.8h, v1.8h
ret <8 x i16> %aba
}
define <8 x i16> @test_sabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
; CHECK: test_sabd_v8i16:
%abd = call <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
; CHECK: sabd v0.8h, v0.8h, v1.8h
ret <8 x i16> %abd
}
define <8 x i16> @test_saba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
; CHECK: test_saba_v8i16:
%abd = call <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
%aba = add <8 x i16> %lhs, %abd
; CHECK: saba v0.8h, v0.8h, v1.8h
ret <8 x i16> %aba
}
declare <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32>, <2 x i32>)
declare <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @test_uabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; CHECK: test_uabd_v2i32:
%abd = call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
; CHECK: uabd v0.2s, v0.2s, v1.2s
ret <2 x i32> %abd
}
define <2 x i32> @test_uaba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; CHECK: test_uaba_v2i32:
%abd = call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
%aba = add <2 x i32> %lhs, %abd
; CHECK: uaba v0.2s, v0.2s, v1.2s
ret <2 x i32> %aba
}
define <2 x i32> @test_sabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; CHECK: test_sabd_v2i32:
%abd = call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
; CHECK: sabd v0.2s, v0.2s, v1.2s
ret <2 x i32> %abd
}
define <2 x i32> @test_sabd_v2i32_const() {
; CHECK: test_sabd_v2i32_const:
; CHECK: movi d1, #0x00ffffffff0000
; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s
%1 = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32(
<2 x i32> <i32 -2147483648, i32 2147450880>,
<2 x i32> <i32 -65536, i32 65535>)
ret <2 x i32> %1
}
define <2 x i32> @test_saba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; CHECK: test_saba_v2i32:
%abd = call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
%aba = add <2 x i32> %lhs, %abd
; CHECK: saba v0.2s, v0.2s, v1.2s
ret <2 x i32> %aba
}
declare <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @test_uabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK: test_uabd_v4i32:
%abd = call <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
; CHECK: uabd v0.4s, v0.4s, v1.4s
ret <4 x i32> %abd
}
define <4 x i32> @test_uaba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK: test_uaba_v4i32:
%abd = call <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
%aba = add <4 x i32> %lhs, %abd
; CHECK: uaba v0.4s, v0.4s, v1.4s
ret <4 x i32> %aba
}
define <4 x i32> @test_sabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK: test_sabd_v4i32:
%abd = call <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
; CHECK: sabd v0.4s, v0.4s, v1.4s
ret <4 x i32> %abd
}
define <4 x i32> @test_saba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK: test_saba_v4i32:
%abd = call <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
%aba = add <4 x i32> %lhs, %abd
; CHECK: saba v0.4s, v0.4s, v1.4s
ret <4 x i32> %aba
}
declare <2 x float> @llvm.arm64.neon.fabd.v2f32(<2 x float>, <2 x float>)
define <2 x float> @test_fabd_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
; CHECK: test_fabd_v2f32:
%abd = call <2 x float> @llvm.arm64.neon.fabd.v2f32(<2 x float> %lhs, <2 x float> %rhs)
; CHECK: fabd v0.2s, v0.2s, v1.2s
ret <2 x float> %abd
}
declare <4 x float> @llvm.arm64.neon.fabd.v4f32(<4 x float>, <4 x float>)
define <4 x float> @test_fabd_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
; CHECK: test_fabd_v4f32:
%abd = call <4 x float> @llvm.arm64.neon.fabd.v4f32(<4 x float> %lhs, <4 x float> %rhs)
; CHECK: fabd v0.4s, v0.4s, v1.4s
ret <4 x float> %abd
}
declare <2 x double> @llvm.arm64.neon.fabd.v2f64(<2 x double>, <2 x double>)
define <2 x double> @test_fabd_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
; CHECK: test_fabd_v2f64:
%abd = call <2 x double> @llvm.arm64.neon.fabd.v2f64(<2 x double> %lhs, <2 x double> %rhs)
; CHECK: fabd v0.2d, v0.2d, v1.2d
ret <2 x double> %abd
}

View File

@ -0,0 +1,460 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
declare float @llvm.arm64.neon.fminnmv.f32.v4f32(<4 x float>)
declare float @llvm.arm64.neon.fmaxnmv.f32.v4f32(<4 x float>)
declare float @llvm.arm64.neon.fminv.f32.v4f32(<4 x float>)
declare float @llvm.arm64.neon.fmaxv.f32.v4f32(<4 x float>)
declare i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8>)
declare i32 @llvm.arm64.neon.uminv.i32.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.uminv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.uminv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.sminv.i32.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.sminv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.sminv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.uminv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.uminv.i32.v8i8(<8 x i8>)
declare i32 @llvm.arm64.neon.sminv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.sminv.i32.v8i8(<8 x i8>)
declare i32 @llvm.arm64.neon.umaxv.i32.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.umaxv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.umaxv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.smaxv.i32.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.smaxv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.smaxv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.umaxv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.umaxv.i32.v8i8(<8 x i8>)
declare i32 @llvm.arm64.neon.smaxv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.smaxv.i32.v8i8(<8 x i8>)
declare i64 @llvm.arm64.neon.uaddlv.i64.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.uaddlv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.uaddlv.i32.v16i8(<16 x i8>)
declare i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32>)
declare i32 @llvm.arm64.neon.saddlv.i32.v8i16(<8 x i16>)
declare i32 @llvm.arm64.neon.saddlv.i32.v16i8(<16 x i8>)
declare i32 @llvm.arm64.neon.uaddlv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.uaddlv.i32.v8i8(<8 x i8>)
declare i32 @llvm.arm64.neon.saddlv.i32.v4i16(<4 x i16>)
declare i32 @llvm.arm64.neon.saddlv.i32.v8i8(<8 x i8>)
define i16 @test_vaddlv_s8(<8 x i8> %a) {
; CHECK: test_vaddlv_s8:
; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %saddlvv.i to i16
ret i16 %0
}
define i32 @test_vaddlv_s16(<4 x i16> %a) {
; CHECK: test_vaddlv_s16:
; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v4i16(<4 x i16> %a)
ret i32 %saddlvv.i
}
define i16 @test_vaddlv_u8(<8 x i8> %a) {
; CHECK: test_vaddlv_u8:
; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %uaddlvv.i to i16
ret i16 %0
}
define i32 @test_vaddlv_u16(<4 x i16> %a) {
; CHECK: test_vaddlv_u16:
; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v4i16(<4 x i16> %a)
ret i32 %uaddlvv.i
}
define i16 @test_vaddlvq_s8(<16 x i8> %a) {
; CHECK: test_vaddlvq_s8:
; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %saddlvv.i to i16
ret i16 %0
}
define i32 @test_vaddlvq_s16(<8 x i16> %a) {
; CHECK: test_vaddlvq_s16:
; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v8i16(<8 x i16> %a)
ret i32 %saddlvv.i
}
define i64 @test_vaddlvq_s32(<4 x i32> %a) {
; CHECK: test_vaddlvq_s32:
; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%saddlvv.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %a)
ret i64 %saddlvv.i
}
define i16 @test_vaddlvq_u8(<16 x i8> %a) {
; CHECK: test_vaddlvq_u8:
; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %uaddlvv.i to i16
ret i16 %0
}
define i32 @test_vaddlvq_u16(<8 x i16> %a) {
; CHECK: test_vaddlvq_u16:
; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v8i16(<8 x i16> %a)
ret i32 %uaddlvv.i
}
define i64 @test_vaddlvq_u32(<4 x i32> %a) {
; CHECK: test_vaddlvq_u32:
; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%uaddlvv.i = tail call i64 @llvm.arm64.neon.uaddlv.i64.v4i32(<4 x i32> %a)
ret i64 %uaddlvv.i
}
define i8 @test_vmaxv_s8(<8 x i8> %a) {
; CHECK: test_vmaxv_s8:
; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %smaxv.i to i8
ret i8 %0
}
define i16 @test_vmaxv_s16(<4 x i16> %a) {
; CHECK: test_vmaxv_s16:
; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %smaxv.i to i16
ret i16 %0
}
define i8 @test_vmaxv_u8(<8 x i8> %a) {
; CHECK: test_vmaxv_u8:
; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %umaxv.i to i8
ret i8 %0
}
define i16 @test_vmaxv_u16(<4 x i16> %a) {
; CHECK: test_vmaxv_u16:
; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %umaxv.i to i16
ret i16 %0
}
define i8 @test_vmaxvq_s8(<16 x i8> %a) {
; CHECK: test_vmaxvq_s8:
; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %smaxv.i to i8
ret i8 %0
}
define i16 @test_vmaxvq_s16(<8 x i16> %a) {
; CHECK: test_vmaxvq_s16:
; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %smaxv.i to i16
ret i16 %0
}
define i32 @test_vmaxvq_s32(<4 x i32> %a) {
; CHECK: test_vmaxvq_s32:
; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v4i32(<4 x i32> %a)
ret i32 %smaxv.i
}
define i8 @test_vmaxvq_u8(<16 x i8> %a) {
; CHECK: test_vmaxvq_u8:
; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %umaxv.i to i8
ret i8 %0
}
define i16 @test_vmaxvq_u16(<8 x i16> %a) {
; CHECK: test_vmaxvq_u16:
; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %umaxv.i to i16
ret i16 %0
}
define i32 @test_vmaxvq_u32(<4 x i32> %a) {
; CHECK: test_vmaxvq_u32:
; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v4i32(<4 x i32> %a)
ret i32 %umaxv.i
}
define i8 @test_vminv_s8(<8 x i8> %a) {
; CHECK: test_vminv_s8:
; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %sminv.i to i8
ret i8 %0
}
define i16 @test_vminv_s16(<4 x i16> %a) {
; CHECK: test_vminv_s16:
; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %sminv.i to i16
ret i16 %0
}
define i8 @test_vminv_u8(<8 x i8> %a) {
; CHECK: test_vminv_u8:
; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %uminv.i to i8
ret i8 %0
}
define i16 @test_vminv_u16(<4 x i16> %a) {
; CHECK: test_vminv_u16:
; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %uminv.i to i16
ret i16 %0
}
define i8 @test_vminvq_s8(<16 x i8> %a) {
; CHECK: test_vminvq_s8:
; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %sminv.i to i8
ret i8 %0
}
define i16 @test_vminvq_s16(<8 x i16> %a) {
; CHECK: test_vminvq_s16:
; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %sminv.i to i16
ret i16 %0
}
define i32 @test_vminvq_s32(<4 x i32> %a) {
; CHECK: test_vminvq_s32:
; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v4i32(<4 x i32> %a)
ret i32 %sminv.i
}
define i8 @test_vminvq_u8(<16 x i8> %a) {
; CHECK: test_vminvq_u8:
; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %uminv.i to i8
ret i8 %0
}
define i16 @test_vminvq_u16(<8 x i16> %a) {
; CHECK: test_vminvq_u16:
; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %uminv.i to i16
ret i16 %0
}
define i32 @test_vminvq_u32(<4 x i32> %a) {
; CHECK: test_vminvq_u32:
; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v4i32(<4 x i32> %a)
ret i32 %uminv.i
}
define i8 @test_vaddv_s8(<8 x i8> %a) {
; CHECK: test_vaddv_s8:
; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %vaddv.i to i8
ret i8 %0
}
define i16 @test_vaddv_s16(<4 x i16> %a) {
; CHECK: test_vaddv_s16:
; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %vaddv.i to i16
ret i16 %0
}
define i8 @test_vaddv_u8(<8 x i8> %a) {
; CHECK: test_vaddv_u8:
; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8> %a)
%0 = trunc i32 %vaddv.i to i8
ret i8 %0
}
define i16 @test_vaddv_u16(<4 x i16> %a) {
; CHECK: test_vaddv_u16:
; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16> %a)
%0 = trunc i32 %vaddv.i to i16
ret i16 %0
}
define i8 @test_vaddvq_s8(<16 x i8> %a) {
; CHECK: test_vaddvq_s8:
; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %vaddv.i to i8
ret i8 %0
}
define i16 @test_vaddvq_s16(<8 x i16> %a) {
; CHECK: test_vaddvq_s16:
; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %vaddv.i to i16
ret i16 %0
}
define i32 @test_vaddvq_s32(<4 x i32> %a) {
; CHECK: test_vaddvq_s32:
; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32> %a)
ret i32 %vaddv.i
}
define i8 @test_vaddvq_u8(<16 x i8> %a) {
; CHECK: test_vaddvq_u8:
; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8> %a)
%0 = trunc i32 %vaddv.i to i8
ret i8 %0
}
define i16 @test_vaddvq_u16(<8 x i16> %a) {
; CHECK: test_vaddvq_u16:
; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16> %a)
%0 = trunc i32 %vaddv.i to i16
ret i16 %0
}
define i32 @test_vaddvq_u32(<4 x i32> %a) {
; CHECK: test_vaddvq_u32:
; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32> %a)
ret i32 %vaddv.i
}
define float @test_vmaxvq_f32(<4 x float> %a) {
; CHECK: test_vmaxvq_f32:
; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%0 = call float @llvm.arm64.neon.fmaxv.f32.v4f32(<4 x float> %a)
ret float %0
}
define float @test_vminvq_f32(<4 x float> %a) {
; CHECK: test_vminvq_f32:
; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%0 = call float @llvm.arm64.neon.fminv.f32.v4f32(<4 x float> %a)
ret float %0
}
define float @test_vmaxnmvq_f32(<4 x float> %a) {
; CHECK: test_vmaxnmvq_f32:
; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%0 = call float @llvm.arm64.neon.fmaxnmv.f32.v4f32(<4 x float> %a)
ret float %0
}
define float @test_vminnmvq_f32(<4 x float> %a) {
; CHECK: test_vminnmvq_f32:
; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
%0 = call float @llvm.arm64.neon.fminnmv.f32.v4f32(<4 x float> %a)
ret float %0
}

View File

@ -0,0 +1,100 @@
; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
declare <8 x i8> @llvm.arm64.neon.addp.v8i8(<8 x i8>, <8 x i8>)
define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
; Using registers other than v0, v1 are possible, but would be odd.
; CHECK: test_addp_v8i8:
%tmp1 = call <8 x i8> @llvm.arm64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
; CHECK: addp v0.8b, v0.8b, v1.8b
ret <8 x i8> %tmp1
}
declare <16 x i8> @llvm.arm64.neon.addp.v16i8(<16 x i8>, <16 x i8>)
define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
; CHECK: test_addp_v16i8:
%tmp1 = call <16 x i8> @llvm.arm64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
; CHECK: addp v0.16b, v0.16b, v1.16b
ret <16 x i8> %tmp1
}
declare <4 x i16> @llvm.arm64.neon.addp.v4i16(<4 x i16>, <4 x i16>)
define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
; CHECK: test_addp_v4i16:
%tmp1 = call <4 x i16> @llvm.arm64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
; CHECK: addp v0.4h, v0.4h, v1.4h
ret <4 x i16> %tmp1
}
declare <8 x i16> @llvm.arm64.neon.addp.v8i16(<8 x i16>, <8 x i16>)
define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
; CHECK: test_addp_v8i16:
%tmp1 = call <8 x i16> @llvm.arm64.neon.addp.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
; CHECK: addp v0.8h, v0.8h, v1.8h
ret <8 x i16> %tmp1
}
declare <2 x i32> @llvm.arm64.neon.addp.v2i32(<2 x i32>, <2 x i32>)
define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; CHECK: test_addp_v2i32:
%tmp1 = call <2 x i32> @llvm.arm64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
; CHECK: addp v0.2s, v0.2s, v1.2s
ret <2 x i32> %tmp1
}
declare <4 x i32> @llvm.arm64.neon.addp.v4i32(<4 x i32>, <4 x i32>)
define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; CHECK: test_addp_v4i32:
%tmp1 = call <4 x i32> @llvm.arm64.neon.addp.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
; CHECK: addp v0.4s, v0.4s, v1.4s
ret <4 x i32> %tmp1
}
declare <2 x i64> @llvm.arm64.neon.addp.v2i64(<2 x i64>, <2 x i64>)
define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
; CHECK: test_addp_v2i64:
%val = call <2 x i64> @llvm.arm64.neon.addp.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
; CHECK: addp v0.2d, v0.2d, v1.2d
ret <2 x i64> %val
}
declare <2 x float> @llvm.arm64.neon.addp.v2f32(<2 x float>, <2 x float>)
declare <4 x float> @llvm.arm64.neon.addp.v4f32(<4 x float>, <4 x float>)
declare <2 x double> @llvm.arm64.neon.addp.v2f64(<2 x double>, <2 x double>)
define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
; CHECK: test_faddp_v2f32:
%val = call <2 x float> @llvm.arm64.neon.addp.v2f32(<2 x float> %lhs, <2 x float> %rhs)
; CHECK: faddp v0.2s, v0.2s, v1.2s
ret <2 x float> %val
}
define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
; CHECK: test_faddp_v4f32:
%val = call <4 x float> @llvm.arm64.neon.addp.v4f32(<4 x float> %lhs, <4 x float> %rhs)
; CHECK: faddp v0.4s, v0.4s, v1.4s
ret <4 x float> %val
}
define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
; CHECK: test_faddp_v2f64:
%val = call <2 x double> @llvm.arm64.neon.addp.v2f64(<2 x double> %lhs, <2 x double> %rhs)
; CHECK: faddp v0.2d, v0.2d, v1.2d
ret <2 x double> %val
}
define i32 @test_vaddv.v2i32(<2 x i32> %a) {
; CHECK-LABEL: test_vaddv.v2i32
; CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%1 = tail call i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32> %a)
ret i32 %1
}
declare i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32>)

View File

@ -0,0 +1,237 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -arm64-simd-scalar| FileCheck %s
define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = add <8 x i8> %A, %B;
ret <8 x i8> %tmp3
}
define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
;CHECK: add {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = add <16 x i8> %A, %B;
ret <16 x i8> %tmp3
}
define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = add <4 x i16> %A, %B;
ret <4 x i16> %tmp3
}
define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
;CHECK: add {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = add <8 x i16> %A, %B;
ret <8 x i16> %tmp3
}
define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = add <2 x i32> %A, %B;
ret <2 x i32> %tmp3
}
define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
;CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = add <4 x i32> %A, %B;
ret <4 x i32> %tmp3
}
define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = add <2 x i64> %A, %B;
ret <2 x i64> %tmp3
}
define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
;CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = fadd <2 x float> %A, %B;
ret <2 x float> %tmp3
}
define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
;CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = fadd <4 x float> %A, %B;
ret <4 x float> %tmp3
}
define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = fadd <2 x double> %A, %B;
ret <2 x double> %tmp3
}
define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
%tmp3 = sub <8 x i8> %A, %B;
ret <8 x i8> %tmp3
}
define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
;CHECK: sub {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
%tmp3 = sub <16 x i8> %A, %B;
ret <16 x i8> %tmp3
}
define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
%tmp3 = sub <4 x i16> %A, %B;
ret <4 x i16> %tmp3
}
define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
;CHECK: sub {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
%tmp3 = sub <8 x i16> %A, %B;
ret <8 x i16> %tmp3
}
define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = sub <2 x i32> %A, %B;
ret <2 x i32> %tmp3
}
define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
;CHECK: sub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = sub <4 x i32> %A, %B;
ret <4 x i32> %tmp3
}
define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = sub <2 x i64> %A, %B;
ret <2 x i64> %tmp3
}
define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
;CHECK: fsub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
%tmp3 = fsub <2 x float> %A, %B;
ret <2 x float> %tmp3
}
define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
;CHECK: fsub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
%tmp3 = fsub <4 x float> %A, %B;
ret <4 x float> %tmp3
}
define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
%tmp3 = fsub <2 x double> %A, %B;
ret <2 x double> %tmp3
}
define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vadd_f64
; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fadd <1 x double> %a, %b
ret <1 x double> %1
}
define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vmul_f64
; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fmul <1 x double> %a, %b
ret <1 x double> %1
}
define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vdiv_f64
; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fdiv <1 x double> %a, %b
ret <1 x double> %1
}
define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
; CHECK-LABEL: test_vmla_f64
; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fmul <1 x double> %b, %c
%2 = fadd <1 x double> %1, %a
ret <1 x double> %2
}
define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
; CHECK-LABEL: test_vmls_f64
; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fmul <1 x double> %b, %c
%2 = fsub <1 x double> %a, %1
ret <1 x double> %2
}
define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
; CHECK-LABEL: test_vfms_f64
; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fsub <1 x double> <double -0.000000e+00>, %b
%2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
ret <1 x double> %2
}
define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
; CHECK-LABEL: test_vfma_f64
; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
ret <1 x double> %1
}
define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vsub_f64
; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = fsub <1 x double> %a, %b
ret <1 x double> %1
}
define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vabd_f64
; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.arm64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b)
ret <1 x double> %1
}
define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vmax_f64
; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.arm64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b)
ret <1 x double> %1
}
define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vmin_f64
; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.arm64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b)
ret <1 x double> %1
}
define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vmaxnm_f64
; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.arm64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
ret <1 x double> %1
}
define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
; CHECK-LABEL: test_vminnm_f64
; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.arm64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b)
ret <1 x double> %1
}
define <1 x double> @test_vabs_f64(<1 x double> %a) {
; CHECK-LABEL: test_vabs_f64
; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}}
%1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
ret <1 x double> %1
}
define <1 x double> @test_vneg_f64(<1 x double> %a) {
; CHECK-LABEL: test_vneg_f64
; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}}
%1 = fsub <1 x double> <double -0.000000e+00>, %a
ret <1 x double> %1
}
declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
declare <1 x double> @llvm.arm64.neon.fminnm.v1f64(<1 x double>, <1 x double>)
declare <1 x double> @llvm.arm64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>)
declare <1 x double> @llvm.arm64.neon.fmin.v1f64(<1 x double>, <1 x double>)
declare <1 x double> @llvm.arm64.neon.fmax.v1f64(<1 x double>, <1 x double>)
declare <1 x double> @llvm.arm64.neon.fabd.v1f64(<1 x double>, <1 x double>)
declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)