From f539725734a15ffcc7018cb101d6b5aa207792aa Mon Sep 17 00:00:00 2001 From: Tim Northover Date: Wed, 16 Apr 2014 15:28:02 +0000 Subject: [PATCH] AArch64/ARM64: port some NEON tests to ARM64 These ones used completely different sets of intrinsics, so the only way to do it is create a separate ARM64 copy and change them all. Other than that, CodeGen was straightforward, no deficiencies detected here. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206392 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/AArch64/neon-2velem-high.ll | 2 +- test/CodeGen/AArch64/neon-2velem.ll | 1 + test/CodeGen/AArch64/neon-3vdiff.ll | 1 + test/CodeGen/AArch64/neon-aba-abd.ll | 1 + test/CodeGen/AArch64/neon-across.ll | 1 + test/CodeGen/AArch64/neon-add-pairwise.ll | 1 + test/CodeGen/AArch64/neon-add-sub.ll | 1 + test/CodeGen/AArch64/neon-bitcast.ll | 1 + .../CodeGen/ARM64/aarch64-neon-2velem-high.ll | 341 ++ test/CodeGen/ARM64/aarch64-neon-2velem.ll | 2853 +++++++++++++++++ test/CodeGen/ARM64/aarch64-neon-3vdiff.ll | 1829 +++++++++++ test/CodeGen/ARM64/aarch64-neon-aba-abd.ll | 236 ++ test/CodeGen/ARM64/aarch64-neon-across.ll | 460 +++ .../ARM64/aarch64-neon-add-pairwise.ll | 100 + test/CodeGen/ARM64/aarch64-neon-add-sub.ll | 237 ++ 15 files changed, 6064 insertions(+), 1 deletion(-) create mode 100644 test/CodeGen/ARM64/aarch64-neon-2velem-high.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-2velem.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-3vdiff.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-aba-abd.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-across.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-add-pairwise.ll create mode 100644 test/CodeGen/ARM64/aarch64-neon-add-sub.ll diff --git a/test/CodeGen/AArch64/neon-2velem-high.ll b/test/CodeGen/AArch64/neon-2velem-high.ll index 97031d98b7c..ebdb5b7132d 100644 --- a/test/CodeGen/AArch64/neon-2velem-high.ll +++ b/test/CodeGen/AArch64/neon-2velem-high.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s - +; arm64 has copied test in its directory due to differing intrinsics. declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) diff --git a/test/CodeGen/AArch64/neon-2velem.ll b/test/CodeGen/AArch64/neon-2velem.ll index acffb14edf5..b9d0e84f16c 100644 --- a/test/CodeGen/AArch64/neon-2velem.ll +++ b/test/CodeGen/AArch64/neon-2velem.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s +; arm64 has copied test in its directory due to differing intrinsics. declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>) diff --git a/test/CodeGen/AArch64/neon-3vdiff.ll b/test/CodeGen/AArch64/neon-3vdiff.ll index 96400eb3039..dbe2a726b90 100644 --- a/test/CodeGen/AArch64/neon-3vdiff.ll +++ b/test/CodeGen/AArch64/neon-3vdiff.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s +; arm64 has its own copy of this test in its directory. declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) diff --git a/test/CodeGen/AArch64/neon-aba-abd.ll b/test/CodeGen/AArch64/neon-aba-abd.ll index 54009849ef6..1fe52565afe 100644 --- a/test/CodeGen/AArch64/neon-aba-abd.ll +++ b/test/CodeGen/AArch64/neon-aba-abd.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s +; arm64 has copied test in its own directory (different intrinsic names). declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>) declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>) diff --git a/test/CodeGen/AArch64/neon-across.ll b/test/CodeGen/AArch64/neon-across.ll index 6d30c953022..98444d29a01 100644 --- a/test/CodeGen/AArch64/neon-across.ll +++ b/test/CodeGen/AArch64/neon-across.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s +; arm64 has copied test in its own directory. declare float @llvm.aarch64.neon.vminnmv(<4 x float>) diff --git a/test/CodeGen/AArch64/neon-add-pairwise.ll b/test/CodeGen/AArch64/neon-add-pairwise.ll index 26f91477485..d304094adb4 100644 --- a/test/CodeGen/AArch64/neon-add-pairwise.ll +++ b/test/CodeGen/AArch64/neon-add-pairwise.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s +; arm64 has a copy of this test in its own directory. declare <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>) diff --git a/test/CodeGen/AArch64/neon-add-sub.ll b/test/CodeGen/AArch64/neon-add-sub.ll index 25d2dcb7ceb..eebad4df106 100644 --- a/test/CodeGen/AArch64/neon-add-sub.ll +++ b/test/CodeGen/AArch64/neon-add-sub.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s +; arm64 has its own copy of this test define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) { ;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b diff --git a/test/CodeGen/AArch64/neon-bitcast.ll b/test/CodeGen/AArch64/neon-bitcast.ll index 61099d48fdd..25819b37932 100644 --- a/test/CodeGen/AArch64/neon-bitcast.ll +++ b/test/CodeGen/AArch64/neon-bitcast.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s ; From <8 x i8> diff --git a/test/CodeGen/ARM64/aarch64-neon-2velem-high.ll b/test/CodeGen/ARM64/aarch64-neon-2velem-high.ll new file mode 100644 index 00000000000..2013747713b --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-2velem-high.ll @@ -0,0 +1,341 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s + +declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) + +declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) + +declare <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) + +declare <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) + +declare <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16>, <4 x i16>) + +declare <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16>, <4 x i16>) + +define <4 x i32> @test_vmull_high_n_s16(<8 x i16> %a, i16 %b) { +; CHECK-LABEL: test_vmull_high_n_s16: +; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0 +; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3 + %vmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + ret <4 x i32> %vmull15.i.i +} + +define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) { +; CHECK-LABEL: test_vmull_high_n_s32: +; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0 +; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1 + %vmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + ret <2 x i64> %vmull9.i.i +} + +define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) { +; CHECK-LABEL: test_vmull_high_n_u16: +; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0 +; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3 + %vmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + ret <4 x i32> %vmull15.i.i +} + +define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) { +; CHECK-LABEL: test_vmull_high_n_u32: +; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0 +; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1 + %vmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + ret <2 x i64> %vmull9.i.i +} + +define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) { +; CHECK-LABEL: test_vqdmull_high_n_s16: +; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0 +; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3 + %vqdmull15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + ret <4 x i32> %vqdmull15.i.i +} + +define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) { +; CHECK-LABEL: test_vqdmull_high_n_s32: +; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0 +; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1 + %vqdmull9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + ret <2 x i64> %vqdmull9.i.i +} + +define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vmlal_high_n_s16: +; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0 +; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %add.i.i = add <4 x i32> %vmull2.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vmlal_high_n_s32: +; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0 +; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %add.i.i = add <2 x i64> %vmull2.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vmlal_high_n_u16: +; CHECK: dup [[REPLICATE:v[0-9]+]].8h, w0 +; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %add.i.i = add <4 x i32> %vmull2.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vmlal_high_n_u32: +; CHECK: dup [[REPLICATE:v[0-9]+]].4s, w0 +; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %add.i.i = add <2 x i64> %vmull2.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vqdmlal_high_n_s16: +; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vqdmlal15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %vqdmlal17.i.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i) + ret <4 x i32> %vqdmlal17.i.i +} + +define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vqdmlal_high_n_s32: +; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vqdmlal9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %vqdmlal11.i.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i) + ret <2 x i64> %vqdmlal11.i.i +} + +define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vmlsl_high_n_s16: +; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i + ret <4 x i32> %sub.i.i +} + +define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vmlsl_high_n_s32: +; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i + ret <2 x i64> %sub.i.i +} + +define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vmlsl_high_n_u16: +; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i + ret <4 x i32> %sub.i.i +} + +define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vmlsl_high_n_u32: +; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i + ret <2 x i64> %sub.i.i +} + +define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) { +; CHECK-LABEL: test_vqdmlsl_high_n_s16: +; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0 + %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1 + %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2 + %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3 + %vqdmlsl15.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i) + %vqdmlsl17.i.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i) + ret <4 x i32> %vqdmlsl17.i.i +} + +define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: test_vqdmlsl_high_n_s32: +; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0 + %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1 + %vqdmlsl9.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i) + %vqdmlsl11.i.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i) + ret <2 x i64> %vqdmlsl11.i.i +} + +define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) { +; CHECK-LABEL: test_vmul_n_f32: +; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +entry: + %vecinit.i = insertelement <2 x float> undef, float %b, i32 0 + %vecinit1.i = insertelement <2 x float> %vecinit.i, float %b, i32 1 + %mul.i = fmul <2 x float> %vecinit1.i, %a + ret <2 x float> %mul.i +} + +define <4 x float> @test_vmulq_n_f32(<4 x float> %a, float %b) { +; CHECK-LABEL: test_vmulq_n_f32: +; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +entry: + %vecinit.i = insertelement <4 x float> undef, float %b, i32 0 + %vecinit1.i = insertelement <4 x float> %vecinit.i, float %b, i32 1 + %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %b, i32 2 + %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %b, i32 3 + %mul.i = fmul <4 x float> %vecinit3.i, %a + ret <4 x float> %mul.i +} + +define <2 x double> @test_vmulq_n_f64(<2 x double> %a, double %b) { +; CHECK-LABEL: test_vmulq_n_f64: +; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +entry: + %vecinit.i = insertelement <2 x double> undef, double %b, i32 0 + %vecinit1.i = insertelement <2 x double> %vecinit.i, double %b, i32 1 + %mul.i = fmul <2 x double> %vecinit1.i, %a + ret <2 x double> %mul.i +} + +define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) { +; CHECK-LABEL: test_vfma_n_f32: +; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}] +entry: + %vecinit.i = insertelement <2 x float> undef, float %n, i32 0 + %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1 + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %b, <2 x float> %vecinit1.i, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) { +; CHECK-LABEL: test_vfmaq_n_f32: +; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}] +entry: + %vecinit.i = insertelement <4 x float> undef, float %n, i32 0 + %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1 + %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2 + %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3 + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %vecinit3.i, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfms_n_f32(<2 x float> %a, <2 x float> %b, float %n) { +; CHECK-LABEL: test_vfms_n_f32: +; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}] +entry: + %vecinit.i = insertelement <2 x float> undef, float %n, i32 0 + %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1 + %0 = fsub <2 x float> , %b + %1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %vecinit1.i, <2 x float> %a) + ret <2 x float> %1 +} + +define <4 x float> @test_vfmsq_n_f32(<4 x float> %a, <4 x float> %b, float %n) { +; CHECK-LABEL: test_vfmsq_n_f32: +; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}] +entry: + %vecinit.i = insertelement <4 x float> undef, float %n, i32 0 + %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1 + %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2 + %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3 + %0 = fsub <4 x float> , %b + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %vecinit3.i, <4 x float> %a) + ret <4 x float> %1 +} diff --git a/test/CodeGen/ARM64/aarch64-neon-2velem.ll b/test/CodeGen/ARM64/aarch64-neon-2velem.ll new file mode 100644 index 00000000000..4c6b72d55c3 --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-2velem.ll @@ -0,0 +1,2853 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s + +declare <2 x double> @llvm.arm64.neon.fmulx.v2f64(<2 x double>, <2 x double>) + +declare <4 x float> @llvm.arm64.neon.fmulx.v4f32(<4 x float>, <4 x float>) + +declare <2 x float> @llvm.arm64.neon.fmulx.v2f32(<2 x float>, <2 x float>) + +declare <4 x i32> @llvm.arm64.neon.sqrdmulh.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i32> @llvm.arm64.neon.sqrdmulh.v2i32(<2 x i32>, <2 x i32>) + +declare <8 x i16> @llvm.arm64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>) + +declare <4 x i16> @llvm.arm64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>) + +declare <4 x i32> @llvm.arm64.neon.sqdmulh.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i32> @llvm.arm64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) + +declare <8 x i16> @llvm.arm64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>) + +declare <4 x i16> @llvm.arm64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>) + +declare <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) + +declare <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16>, <4 x i16>) + +declare <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16>, <4 x i16>) + +define <4 x i16> @test_vmla_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmla_lane_s16: +; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %b + %add = add <4 x i16> %mul, %a + ret <4 x i16> %add +} + +define <8 x i16> @test_vmlaq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlaq_lane_s16: +; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %b + %add = add <8 x i16> %mul, %a + ret <8 x i16> %add +} + +define <2 x i32> @test_vmla_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmla_lane_s32: +; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %b + %add = add <2 x i32> %mul, %a + ret <2 x i32> %add +} + +define <4 x i32> @test_vmlaq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlaq_lane_s32: +; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %b + %add = add <4 x i32> %mul, %a + ret <4 x i32> %add +} + +define <4 x i16> @test_vmla_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmla_laneq_s16: +; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %b + %add = add <4 x i16> %mul, %a + ret <4 x i16> %add +} + +define <8 x i16> @test_vmlaq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlaq_laneq_s16: +; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %b + %add = add <8 x i16> %mul, %a + ret <8 x i16> %add +} + +define <2 x i32> @test_vmla_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmla_laneq_s32: +; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %b + %add = add <2 x i32> %mul, %a + ret <2 x i32> %add +} + +define <4 x i32> @test_vmlaq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlaq_laneq_s32: +; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %b + %add = add <4 x i32> %mul, %a + ret <4 x i32> %add +} + +define <4 x i16> @test_vmls_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmls_lane_s16: +; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %b + %sub = sub <4 x i16> %a, %mul + ret <4 x i16> %sub +} + +define <8 x i16> @test_vmlsq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsq_lane_s16: +; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %b + %sub = sub <8 x i16> %a, %mul + ret <8 x i16> %sub +} + +define <2 x i32> @test_vmls_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmls_lane_s32: +; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %b + %sub = sub <2 x i32> %a, %mul + ret <2 x i32> %sub +} + +define <4 x i32> @test_vmlsq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsq_lane_s32: +; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %b + %sub = sub <4 x i32> %a, %mul + ret <4 x i32> %sub +} + +define <4 x i16> @test_vmls_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmls_laneq_s16: +; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %b + %sub = sub <4 x i16> %a, %mul + ret <4 x i16> %sub +} + +define <8 x i16> @test_vmlsq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsq_laneq_s16: +; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %b + %sub = sub <8 x i16> %a, %mul + ret <8 x i16> %sub +} + +define <2 x i32> @test_vmls_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmls_laneq_s32: +; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %b + %sub = sub <2 x i32> %a, %mul + ret <2 x i32> %sub +} + +define <4 x i32> @test_vmlsq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsq_laneq_s32: +; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %b + %sub = sub <4 x i32> %a, %mul + ret <4 x i32> %sub +} + +define <4 x i16> @test_vmul_lane_s16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmul_lane_s16: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_lane_s16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmulq_lane_s16: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_lane_s32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmul_lane_s32: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_lane_s32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmulq_lane_s32: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_lane_u16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmul_lane_u16: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_lane_u16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmulq_lane_u16: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_lane_u32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmul_lane_u32: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_lane_u32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmulq_lane_u32: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_laneq_s16(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmul_laneq_s16: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_laneq_s16(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmulq_laneq_s16: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_laneq_s32(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmul_laneq_s32: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_laneq_s32(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmulq_laneq_s32: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_laneq_u16(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmul_laneq_u16: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_laneq_u16(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmulq_laneq_u16: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_laneq_u32(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmul_laneq_u32: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_laneq_u32(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmulq_laneq_u32: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <2 x float> @test_vfma_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfma_lane_f32: +; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) + +define <4 x float> @test_vfmaq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfmaq_lane_f32: +; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) + +define <2 x float> @test_vfma_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfma_laneq_f32: +; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmaq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmaq_laneq_f32: +; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfms_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfms_lane_f32: +; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x float> , %v + %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmsq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfmsq_lane_f32: +; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x float> , %v + %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfms_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfms_laneq_f32: +; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %sub = fsub <4 x float> , %v + %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmsq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmsq_laneq_f32: +; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %sub = fsub <4 x float> , %v + %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x double> @test_vfmaq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) { +; CHECK-LABEL: test_vfmaq_lane_f64: +; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) + +define <2 x double> @test_vfmaq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) { +; CHECK-LABEL: test_vfmaq_laneq_f64: +; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +define <2 x double> @test_vfmsq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) { +; CHECK-LABEL: test_vfmsq_lane_f64: +; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <1 x double> , %v + %lane = shufflevector <1 x double> %sub, <1 x double> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +define <2 x double> @test_vfmsq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) { +; CHECK-LABEL: test_vfmsq_laneq_f64: +; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x double> , %v + %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +define float @test_vfmas_laneq_f32(float %a, float %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmas_laneq_f32 +; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %extract = extractelement <4 x float> %v, i32 3 + %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a) + ret float %0 +} + +declare float @llvm.fma.f32(float, float, float) + +define double @test_vfmsd_lane_f64(double %a, double %b, <1 x double> %v) { +; CHECK-LABEL: test_vfmsd_lane_f64 +; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} +; CHECK-NEXT: ret +entry: + %extract.rhs = extractelement <1 x double> %v, i32 0 + %extract = fsub double -0.000000e+00, %extract.rhs + %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a) + ret double %0 +} + +declare double @llvm.fma.f64(double, double, double) + +define float @test_vfmss_laneq_f32(float %a, float %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmss_laneq_f32 +; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %extract.rhs = extractelement <4 x float> %v, i32 3 + %extract = fsub float -0.000000e+00, %extract.rhs + %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a) + ret float %0 +} + +define double @test_vfmsd_laneq_f64(double %a, double %b, <2 x double> %v) { +; CHECK-LABEL: test_vfmsd_laneq_f64 +; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %extract.rhs = extractelement <2 x double> %v, i32 1 + %extract = fsub double -0.000000e+00, %extract.rhs + %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a) + ret double %0 +} + +define <4 x i32> @test_vmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_lane_s16: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_lane_s32: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_laneq_s16: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_laneq_s32: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_lane_s16: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_lane_s32: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_s16: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_s32: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_lane_s16: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_lane_s32: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_laneq_s16: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_laneq_s32: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_s16: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_s32: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_s16: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_s32: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlal_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_lane_u16: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_lane_u32: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_laneq_u16: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_laneq_u32: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_lane_u16: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_lane_u32: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_u16: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_u32: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlsl_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_lane_u16: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_lane_u32: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_laneq_u16: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_laneq_u32: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_u16: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_u32: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_u16: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_u32: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmull_lane_s16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_lane_s16: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_lane_s32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_lane_s32: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_lane_u16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_lane_u16: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_lane_u32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_lane_u32: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_high_lane_s16: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_high_lane_s32: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_lane_u16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_high_lane_u16: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_lane_u32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_high_lane_u32: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_laneq_s16: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_laneq_s32: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_laneq_u16(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_laneq_u16: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_laneq_u32(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_laneq_u32: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_high_laneq_s16: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_high_laneq_s32: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_laneq_u16(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_high_laneq_u16: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_laneq_u32(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_high_laneq_u32: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vqdmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlal_lane_s16: +; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmlal2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %vqdmlal4.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i) + ret <4 x i32> %vqdmlal4.i +} + +define <2 x i64> @test_vqdmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlal_lane_s32: +; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmlal2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %vqdmlal4.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i) + ret <2 x i64> %vqdmlal4.i +} + +define <4 x i32> @test_vqdmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlal_high_lane_s16: +; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmlal2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %vqdmlal4.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i) + ret <4 x i32> %vqdmlal4.i +} + +define <2 x i64> @test_vqdmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlal_high_lane_s32: +; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmlal2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %vqdmlal4.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i) + ret <2 x i64> %vqdmlal4.i +} + +define <4 x i32> @test_vqdmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlsl_lane_s16: +; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmlsl2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %vqdmlsl4.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i) + ret <4 x i32> %vqdmlsl4.i +} + +define <2 x i64> @test_vqdmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlsl_lane_s32: +; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmlsl2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %vqdmlsl4.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i) + ret <2 x i64> %vqdmlsl4.i +} + +define <4 x i32> @test_vqdmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlsl_high_lane_s16: +; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmlsl2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %vqdmlsl4.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i) + ret <4 x i32> %vqdmlsl4.i +} + +define <2 x i64> @test_vqdmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlsl_high_lane_s32: +; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmlsl2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %vqdmlsl4.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i) + ret <2 x i64> %vqdmlsl4.i +} + +define <4 x i32> @test_vqdmull_lane_s16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmull_lane_s16: +; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_lane_s32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmull_lane_s32: +; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vqdmull_laneq_s16: +; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vqdmull_laneq_s32: +; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmull_high_lane_s16: +; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmull_high_lane_s32: +; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vqdmull_high_laneq_s16: +; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vqdmull_high_laneq_s32: +; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i16> @test_vqdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmulh_lane_s16: +; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqdmulh2.i = tail call <4 x i16> @llvm.arm64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i16> %vqdmulh2.i +} + +define <8 x i16> @test_vqdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmulhq_lane_s16: +; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %vqdmulh2.i = tail call <8 x i16> @llvm.arm64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle) + ret <8 x i16> %vqdmulh2.i +} + +define <2 x i32> @test_vqdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmulh_lane_s32: +; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqdmulh2.i = tail call <2 x i32> @llvm.arm64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i32> %vqdmulh2.i +} + +define <4 x i32> @test_vqdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmulhq_lane_s32: +; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %vqdmulh2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle) + ret <4 x i32> %vqdmulh2.i +} + +define <4 x i16> @test_vqrdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqrdmulh_lane_s16: +; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> + %vqrdmulh2.i = tail call <4 x i16> @llvm.arm64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i16> %vqrdmulh2.i +} + +define <8 x i16> @test_vqrdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqrdmulhq_lane_s16: +; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> + %vqrdmulh2.i = tail call <8 x i16> @llvm.arm64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle) + ret <8 x i16> %vqrdmulh2.i +} + +define <2 x i32> @test_vqrdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqrdmulh_lane_s32: +; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> + %vqrdmulh2.i = tail call <2 x i32> @llvm.arm64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i32> %vqrdmulh2.i +} + +define <4 x i32> @test_vqrdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqrdmulhq_lane_s32: +; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> + %vqrdmulh2.i = tail call <4 x i32> @llvm.arm64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle) + ret <4 x i32> %vqrdmulh2.i +} + +define <2 x float> @test_vmul_lane_f32(<2 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmul_lane_f32: +; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> + %mul = fmul <2 x float> %shuffle, %a + ret <2 x float> %mul +} + +define <1 x double> @test_vmul_lane_f64(<1 x double> %a, <1 x double> %v) { +; CHECK-LABEL: test_vmul_lane_f64: +; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} +; CHECK-NEXT: ret +entry: + %0 = bitcast <1 x double> %a to <8 x i8> + %1 = bitcast <8 x i8> %0 to double + %extract = extractelement <1 x double> %v, i32 0 + %2 = fmul double %1, %extract + %3 = insertelement <1 x double> undef, double %2, i32 0 + ret <1 x double> %3 +} + +define <4 x float> @test_vmulq_lane_f32(<4 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulq_lane_f32: +; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> + %mul = fmul <4 x float> %shuffle, %a + ret <4 x float> %mul +} + +define <2 x double> @test_vmulq_lane_f64(<2 x double> %a, <1 x double> %v) { +; CHECK-LABEL: test_vmulq_lane_f64: +; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer + %mul = fmul <2 x double> %shuffle, %a + ret <2 x double> %mul +} + +define <2 x float> @test_vmul_laneq_f32(<2 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmul_laneq_f32: +; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> + %mul = fmul <2 x float> %shuffle, %a + ret <2 x float> %mul +} + +define <1 x double> @test_vmul_laneq_f64(<1 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmul_laneq_f64: +; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %0 = bitcast <1 x double> %a to <8 x i8> + %1 = bitcast <8 x i8> %0 to double + %extract = extractelement <2 x double> %v, i32 1 + %2 = fmul double %1, %extract + %3 = insertelement <1 x double> undef, double %2, i32 0 + ret <1 x double> %3 +} + +define <4 x float> @test_vmulq_laneq_f32(<4 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulq_laneq_f32: +; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> + %mul = fmul <4 x float> %shuffle, %a + ret <4 x float> %mul +} + +define <2 x double> @test_vmulq_laneq_f64(<2 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulq_laneq_f64: +; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> + %mul = fmul <2 x double> %shuffle, %a + ret <2 x double> %mul +} + +define <2 x float> @test_vmulx_lane_f32(<2 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulx_lane_f32: +; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> + %vmulx2.i = tail call <2 x float> @llvm.arm64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle) + ret <2 x float> %vmulx2.i +} + +define <4 x float> @test_vmulxq_lane_f32(<4 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulxq_lane_f32: +; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> + %vmulx2.i = tail call <4 x float> @llvm.arm64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle) + ret <4 x float> %vmulx2.i +} + +define <2 x double> @test_vmulxq_lane_f64(<2 x double> %a, <1 x double> %v) { +; CHECK-LABEL: test_vmulxq_lane_f64: +; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer + %vmulx2.i = tail call <2 x double> @llvm.arm64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle) + ret <2 x double> %vmulx2.i +} + +define <2 x float> @test_vmulx_laneq_f32(<2 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulx_laneq_f32: +; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> + %vmulx2.i = tail call <2 x float> @llvm.arm64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle) + ret <2 x float> %vmulx2.i +} + +define <4 x float> @test_vmulxq_laneq_f32(<4 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulxq_laneq_f32: +; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> + %vmulx2.i = tail call <4 x float> @llvm.arm64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle) + ret <4 x float> %vmulx2.i +} + +define <2 x double> @test_vmulxq_laneq_f64(<2 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulxq_laneq_f64: +; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> + %vmulx2.i = tail call <2 x double> @llvm.arm64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle) + ret <2 x double> %vmulx2.i +} + +define <4 x i16> @test_vmla_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmla_lane_s16_0: +; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %b + %add = add <4 x i16> %mul, %a + ret <4 x i16> %add +} + +define <8 x i16> @test_vmlaq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlaq_lane_s16_0: +; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %b + %add = add <8 x i16> %mul, %a + ret <8 x i16> %add +} + +define <2 x i32> @test_vmla_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmla_lane_s32_0: +; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %b + %add = add <2 x i32> %mul, %a + ret <2 x i32> %add +} + +define <4 x i32> @test_vmlaq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlaq_lane_s32_0: +; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %b + %add = add <4 x i32> %mul, %a + ret <4 x i32> %add +} + +define <4 x i16> @test_vmla_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmla_laneq_s16_0: +; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %b + %add = add <4 x i16> %mul, %a + ret <4 x i16> %add +} + +define <8 x i16> @test_vmlaq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlaq_laneq_s16_0: +; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %b + %add = add <8 x i16> %mul, %a + ret <8 x i16> %add +} + +define <2 x i32> @test_vmla_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmla_laneq_s32_0: +; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %b + %add = add <2 x i32> %mul, %a + ret <2 x i32> %add +} + +define <4 x i32> @test_vmlaq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlaq_laneq_s32_0: +; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %b + %add = add <4 x i32> %mul, %a + ret <4 x i32> %add +} + +define <4 x i16> @test_vmls_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmls_lane_s16_0: +; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %b + %sub = sub <4 x i16> %a, %mul + ret <4 x i16> %sub +} + +define <8 x i16> @test_vmlsq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsq_lane_s16_0: +; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %b + %sub = sub <8 x i16> %a, %mul + ret <8 x i16> %sub +} + +define <2 x i32> @test_vmls_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmls_lane_s32_0: +; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %b + %sub = sub <2 x i32> %a, %mul + ret <2 x i32> %sub +} + +define <4 x i32> @test_vmlsq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsq_lane_s32_0: +; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %b + %sub = sub <4 x i32> %a, %mul + ret <4 x i32> %sub +} + +define <4 x i16> @test_vmls_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmls_laneq_s16_0: +; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %b + %sub = sub <4 x i16> %a, %mul + ret <4 x i16> %sub +} + +define <8 x i16> @test_vmlsq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsq_laneq_s16_0: +; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %b + %sub = sub <8 x i16> %a, %mul + ret <8 x i16> %sub +} + +define <2 x i32> @test_vmls_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmls_laneq_s32_0: +; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %b + %sub = sub <2 x i32> %a, %mul + ret <2 x i32> %sub +} + +define <4 x i32> @test_vmlsq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsq_laneq_s32_0: +; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %b + %sub = sub <4 x i32> %a, %mul + ret <4 x i32> %sub +} + +define <4 x i16> @test_vmul_lane_s16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmul_lane_s16_0: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmulq_lane_s16_0: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_lane_s32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmul_lane_s32_0: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmulq_lane_s32_0: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_lane_u16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmul_lane_u16_0: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_lane_u16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmulq_lane_u16_0: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_lane_u32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmul_lane_u32_0: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_lane_u32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmulq_lane_u32_0: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmul_laneq_s16_0: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmulq_laneq_s16_0: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmul_laneq_s32_0: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmulq_laneq_s32_0: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <4 x i16> @test_vmul_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmul_laneq_u16_0: +; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i16> %shuffle, %a + ret <4 x i16> %mul +} + +define <8 x i16> @test_vmulq_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmulq_laneq_u16_0: +; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer + %mul = mul <8 x i16> %shuffle, %a + ret <8 x i16> %mul +} + +define <2 x i32> @test_vmul_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmul_laneq_u32_0: +; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %mul = mul <2 x i32> %shuffle, %a + ret <2 x i32> %mul +} + +define <4 x i32> @test_vmulq_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmulq_laneq_u32_0: +; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer + %mul = mul <4 x i32> %shuffle, %a + ret <4 x i32> %mul +} + +define <2 x float> @test_vfma_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfma_lane_f32_0: +; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmaq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfmaq_lane_f32_0: +; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfma_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfma_laneq_f32_0: +; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmaq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmaq_laneq_f32_0: +; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfms_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfms_lane_f32_0: +; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x float> , %v + %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmsq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) { +; CHECK-LABEL: test_vfmsq_lane_f32_0: +; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x float> , %v + %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> zeroinitializer + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x float> @test_vfms_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfms_laneq_f32_0: +; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <4 x float> , %v + %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a) + ret <2 x float> %0 +} + +define <4 x float> @test_vfmsq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) { +; CHECK-LABEL: test_vfmsq_laneq_f32_0: +; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <4 x float> , %v + %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> zeroinitializer + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a) + ret <4 x float> %0 +} + +define <2 x double> @test_vfmaq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) { +; CHECK-LABEL: test_vfmaq_laneq_f64_0: +; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +define <2 x double> @test_vfmsq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) { +; CHECK-LABEL: test_vfmsq_laneq_f64_0: +; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %sub = fsub <2 x double> , %v + %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> zeroinitializer + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a) + ret <2 x double> %0 +} + +define <4 x i32> @test_vmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_lane_s16_0: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_lane_s32_0: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_laneq_s16_0: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_laneq_s32_0: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_lane_s16_0: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_lane_s32_0: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_s16_0: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_s32_0: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_lane_s16_0: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_lane_s32_0: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_laneq_s16_0: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_laneq_s32_0: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_s16_0: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_s32_0: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_s16_0: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_s32_0: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlal_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_lane_u16_0: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_lane_u32_0: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_laneq_u16_0: +; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_laneq_u32_0: +; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_lane_u16_0: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_lane_u32_0: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlal_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_u16_0: +; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %add = add <4 x i32> %vmull2.i, %a + ret <4 x i32> %add +} + +define <2 x i64> @test_vmlal_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlal_high_laneq_u32_0: +; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %add = add <2 x i64> %vmull2.i, %a + ret <2 x i64> %add +} + +define <4 x i32> @test_vmlsl_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_lane_u16_0: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_lane_u32_0: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_laneq_u16_0: +; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_laneq_u32_0: +; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_u16_0: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_lane_u32_0: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmlsl_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_u16_0: +; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %sub = sub <4 x i32> %a, %vmull2.i + ret <4 x i32> %sub +} + +define <2 x i64> @test_vmlsl_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) { +; CHECK-LABEL: test_vmlsl_high_laneq_u32_0: +; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %sub = sub <2 x i64> %a, %vmull2.i + ret <2 x i64> %sub +} + +define <4 x i32> @test_vmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_lane_s16_0: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_lane_s32_0: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_lane_u16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_lane_u16_0: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_lane_u32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_lane_u32_0: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_high_lane_s16_0: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_high_lane_s32_0: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_lane_u16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vmull_high_lane_u16_0: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_lane_u32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vmull_high_lane_u32_0: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_laneq_s16_0: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_laneq_s32_0: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_laneq_u16_0: +; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_laneq_u32_0: +; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_high_laneq_s16_0: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_high_laneq_s32_0: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vmull_high_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vmull_high_laneq_u16_0: +; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_high_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vmull_high_laneq_u32_0: +; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vmull2.i +} + +define <4 x i32> @test_vqdmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlal_lane_s16_0: +; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmlal2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %vqdmlal4.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i) + ret <4 x i32> %vqdmlal4.i +} + +define <2 x i64> @test_vqdmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlal_lane_s32_0: +; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmlal2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %vqdmlal4.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i) + ret <2 x i64> %vqdmlal4.i +} + +define <4 x i32> @test_vqdmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlal_high_lane_s16_0: +; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmlal2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %vqdmlal4.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i) + ret <4 x i32> %vqdmlal4.i +} + +define <2 x i64> @test_vqdmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlal_high_lane_s32_0: +; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmlal2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %vqdmlal4.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i) + ret <2 x i64> %vqdmlal4.i +} + +define <4 x i32> @test_vqdmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlsl_lane_s16_0: +; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmlsl2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle) + %vqdmlsl4.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i) + ret <4 x i32> %vqdmlsl4.i +} + +define <2 x i64> @test_vqdmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlsl_lane_s32_0: +; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmlsl2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle) + %vqdmlsl4.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i) + ret <2 x i64> %vqdmlsl4.i +} + +define <4 x i32> @test_vqdmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmlsl_high_lane_s16_0: +; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmlsl2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + %vqdmlsl4.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i) + ret <4 x i32> %vqdmlsl4.i +} + +define <2 x i64> @test_vqdmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmlsl_high_lane_s32_0: +; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmlsl2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + %vqdmlsl4.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i) + ret <2 x i64> %vqdmlsl4.i +} + +define <4 x i32> @test_vqdmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmull_lane_s16_0: +; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmull_lane_s32_0: +; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vqdmull_laneq_s16_0: +; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vqdmull_laneq_s32_0: +; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmull_high_lane_s16_0: +; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmull_high_lane_s32_0: +; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) { +; CHECK-LABEL: test_vqdmull_high_laneq_s16_0: +; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) { +; CHECK-LABEL: test_vqdmull_high_laneq_s32_0: +; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i16> @test_vqdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmulh_lane_s16_0: +; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqdmulh2.i = tail call <4 x i16> @llvm.arm64.neon.sqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i16> %vqdmulh2.i +} + +define <8 x i16> @test_vqdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqdmulhq_lane_s16_0: +; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %vqdmulh2.i = tail call <8 x i16> @llvm.arm64.neon.sqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle) + ret <8 x i16> %vqdmulh2.i +} + +define <2 x i32> @test_vqdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmulh_lane_s32_0: +; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqdmulh2.i = tail call <2 x i32> @llvm.arm64.neon.sqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i32> %vqdmulh2.i +} + +define <4 x i32> @test_vqdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqdmulhq_lane_s32_0: +; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %vqdmulh2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle) + ret <4 x i32> %vqdmulh2.i +} + +define <4 x i16> @test_vqrdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqrdmulh_lane_s16_0: +; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer + %vqrdmulh2.i = tail call <4 x i16> @llvm.arm64.neon.sqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle) + ret <4 x i16> %vqrdmulh2.i +} + +define <8 x i16> @test_vqrdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) { +; CHECK-LABEL: test_vqrdmulhq_lane_s16_0: +; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer + %vqrdmulh2.i = tail call <8 x i16> @llvm.arm64.neon.sqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle) + ret <8 x i16> %vqrdmulh2.i +} + +define <2 x i32> @test_vqrdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqrdmulh_lane_s32_0: +; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer + %vqrdmulh2.i = tail call <2 x i32> @llvm.arm64.neon.sqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle) + ret <2 x i32> %vqrdmulh2.i +} + +define <4 x i32> @test_vqrdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) { +; CHECK-LABEL: test_vqrdmulhq_lane_s32_0: +; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer + %vqrdmulh2.i = tail call <4 x i32> @llvm.arm64.neon.sqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle) + ret <4 x i32> %vqrdmulh2.i +} + +define <2 x float> @test_vmul_lane_f32_0(<2 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmul_lane_f32_0: +; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer + %mul = fmul <2 x float> %shuffle, %a + ret <2 x float> %mul +} + +define <4 x float> @test_vmulq_lane_f32_0(<4 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulq_lane_f32_0: +; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer + %mul = fmul <4 x float> %shuffle, %a + ret <4 x float> %mul +} + +define <2 x float> @test_vmul_laneq_f32_0(<2 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmul_laneq_f32_0: +; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer + %mul = fmul <2 x float> %shuffle, %a + ret <2 x float> %mul +} + +define <1 x double> @test_vmul_laneq_f64_0(<1 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmul_laneq_f64_0: +; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %0 = bitcast <1 x double> %a to <8 x i8> + %1 = bitcast <8 x i8> %0 to double + %extract = extractelement <2 x double> %v, i32 0 + %2 = fmul double %1, %extract + %3 = insertelement <1 x double> undef, double %2, i32 0 + ret <1 x double> %3 +} + +define <4 x float> @test_vmulq_laneq_f32_0(<4 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulq_laneq_f32_0: +; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer + %mul = fmul <4 x float> %shuffle, %a + ret <4 x float> %mul +} + +define <2 x double> @test_vmulq_laneq_f64_0(<2 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulq_laneq_f64_0: +; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer + %mul = fmul <2 x double> %shuffle, %a + ret <2 x double> %mul +} + +define <2 x float> @test_vmulx_lane_f32_0(<2 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulx_lane_f32_0: +; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer + %vmulx2.i = tail call <2 x float> @llvm.arm64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle) + ret <2 x float> %vmulx2.i +} + +define <4 x float> @test_vmulxq_lane_f32_0(<4 x float> %a, <2 x float> %v) { +; CHECK-LABEL: test_vmulxq_lane_f32_0: +; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer + %vmulx2.i = tail call <4 x float> @llvm.arm64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle) + ret <4 x float> %vmulx2.i +} + +define <2 x double> @test_vmulxq_lane_f64_0(<2 x double> %a, <1 x double> %v) { +; CHECK-LABEL: test_vmulxq_lane_f64_0: +; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer + %vmulx2.i = tail call <2 x double> @llvm.arm64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle) + ret <2 x double> %vmulx2.i +} + +define <2 x float> @test_vmulx_laneq_f32_0(<2 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulx_laneq_f32_0: +; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer + %vmulx2.i = tail call <2 x float> @llvm.arm64.neon.fmulx.v2f32(<2 x float> %a, <2 x float> %shuffle) + ret <2 x float> %vmulx2.i +} + +define <4 x float> @test_vmulxq_laneq_f32_0(<4 x float> %a, <4 x float> %v) { +; CHECK-LABEL: test_vmulxq_laneq_f32_0: +; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer + %vmulx2.i = tail call <4 x float> @llvm.arm64.neon.fmulx.v4f32(<4 x float> %a, <4 x float> %shuffle) + ret <4 x float> %vmulx2.i +} + +define <2 x double> @test_vmulxq_laneq_f64_0(<2 x double> %a, <2 x double> %v) { +; CHECK-LABEL: test_vmulxq_laneq_f64_0: +; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0] +; CHECK-NEXT: ret +entry: + %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer + %vmulx2.i = tail call <2 x double> @llvm.arm64.neon.fmulx.v2f64(<2 x double> %a, <2 x double> %shuffle) + ret <2 x double> %vmulx2.i +} + diff --git a/test/CodeGen/ARM64/aarch64-neon-3vdiff.ll b/test/CodeGen/ARM64/aarch64-neon-3vdiff.ll new file mode 100644 index 00000000000..a479844de8d --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-3vdiff.ll @@ -0,0 +1,1829 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s + +declare <8 x i16> @llvm.arm64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) + +declare <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) + +declare <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) + +declare <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>) + +declare <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>) + +declare <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16>, <4 x i16>) + +declare <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8>, <8 x i8>) + +declare <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32>, <2 x i32>) + +declare <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16>, <4 x i16>) + +declare <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8>, <8 x i8>) + +declare <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32>, <2 x i32>) + +declare <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16>, <4 x i16>) + +declare <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8>, <8 x i8>) + +declare <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32>, <2 x i32>) + +declare <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16>, <4 x i16>) + +declare <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8>, <8 x i8>) + +declare <2 x i32> @llvm.arm64.neon.rsubhn.v2i32(<2 x i64>, <2 x i64>) + +declare <4 x i16> @llvm.arm64.neon.rsubhn.v4i16(<4 x i32>, <4 x i32>) + +declare <8 x i8> @llvm.arm64.neon.rsubhn.v8i8(<8 x i16>, <8 x i16>) + +declare <2 x i32> @llvm.arm64.neon.raddhn.v2i32(<2 x i64>, <2 x i64>) + +declare <4 x i16> @llvm.arm64.neon.raddhn.v4i16(<4 x i32>, <4 x i32>) + +declare <8 x i8> @llvm.arm64.neon.raddhn.v8i8(<8 x i16>, <8 x i16>) + +define <8 x i16> @test_vaddl_s8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vaddl_s8: +; CHECK: saddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = sext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = sext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddl_s16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vaddl_s16: +; CHECK: saddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = sext <4 x i16> %a to <4 x i32> + %vmovl.i2.i = sext <4 x i16> %b to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddl_s32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vaddl_s32: +; CHECK: saddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = sext <2 x i32> %a to <2 x i64> + %vmovl.i2.i = sext <2 x i32> %b to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vaddl_u8: +; CHECK: uaddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = zext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = zext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddl_u16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vaddl_u16: +; CHECK: uaddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = zext <4 x i16> %a to <4 x i32> + %vmovl.i2.i = zext <4 x i16> %b to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddl_u32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vaddl_u32: +; CHECK: uaddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = zext <2 x i32> %a to <2 x i64> + %vmovl.i2.i = zext <2 x i32> %b to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddl_high_s8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vaddl_high_s8: +; CHECK: saddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16> + %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16> + %add.i = add <8 x i16> %0, %1 + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddl_high_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddl_high_s16: +; CHECK: saddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32> + %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32> + %add.i = add <4 x i32> %0, %1 + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddl_high_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddl_high_s32: +; CHECK: saddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64> + %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64> + %add.i = add <2 x i64> %0, %1 + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddl_high_u8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vaddl_high_u8: +; CHECK: uaddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16> + %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16> + %add.i = add <8 x i16> %0, %1 + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddl_high_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddl_high_u16: +; CHECK: uaddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32> + %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32> + %add.i = add <4 x i32> %0, %1 + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddl_high_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddl_high_u32: +; CHECK: uaddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64> + %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64> + %add.i = add <2 x i64> %0, %1 + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddw_s8(<8 x i16> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vaddw_s8: +; CHECK: saddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = sext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddw_s16(<4 x i32> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vaddw_s16: +; CHECK: saddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = sext <4 x i16> %b to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddw_s32(<2 x i64> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vaddw_s32: +; CHECK: saddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = sext <2 x i32> %b to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddw_u8(<8 x i16> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vaddw_u8: +; CHECK: uaddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = zext <8 x i8> %b to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddw_u16(<4 x i32> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vaddw_u16: +; CHECK: uaddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = zext <4 x i16> %b to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddw_u32(<2 x i64> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vaddw_u32: +; CHECK: uaddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = zext <2 x i32> %b to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddw_high_s8(<8 x i16> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vaddw_high_s8: +; CHECK: saddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16> + %add.i = add <8 x i16> %0, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddw_high_s16(<4 x i32> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddw_high_s16: +; CHECK: saddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32> + %add.i = add <4 x i32> %0, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddw_high_s32(<2 x i64> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddw_high_s32: +; CHECK: saddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64> + %add.i = add <2 x i64> %0, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vaddw_high_u8(<8 x i16> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vaddw_high_u8: +; CHECK: uaddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16> + %add.i = add <8 x i16> %0, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vaddw_high_u16(<4 x i32> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddw_high_u16: +; CHECK: uaddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32> + %add.i = add <4 x i32> %0, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vaddw_high_u32(<2 x i64> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddw_high_u32: +; CHECK: uaddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64> + %add.i = add <2 x i64> %0, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vsubl_s8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vsubl_s8: +; CHECK: ssubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = sext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = sext <8 x i8> %b to <8 x i16> + %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubl_s16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vsubl_s16: +; CHECK: ssubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = sext <4 x i16> %a to <4 x i32> + %vmovl.i2.i = sext <4 x i16> %b to <4 x i32> + %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubl_s32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vsubl_s32: +; CHECK: ssubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = sext <2 x i32> %a to <2 x i64> + %vmovl.i2.i = sext <2 x i32> %b to <2 x i64> + %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubl_u8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vsubl_u8: +; CHECK: usubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = zext <8 x i8> %a to <8 x i16> + %vmovl.i2.i = zext <8 x i8> %b to <8 x i16> + %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubl_u16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vsubl_u16: +; CHECK: usubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = zext <4 x i16> %a to <4 x i32> + %vmovl.i2.i = zext <4 x i16> %b to <4 x i32> + %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubl_u32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vsubl_u32: +; CHECK: usubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = zext <2 x i32> %a to <2 x i64> + %vmovl.i2.i = zext <2 x i32> %b to <2 x i64> + %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubl_high_s8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vsubl_high_s8: +; CHECK: ssubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16> + %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16> + %sub.i = sub <8 x i16> %0, %1 + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubl_high_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubl_high_s16: +; CHECK: ssubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32> + %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32> + %sub.i = sub <4 x i32> %0, %1 + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubl_high_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubl_high_s32: +; CHECK: ssubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64> + %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64> + %sub.i = sub <2 x i64> %0, %1 + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubl_high_u8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vsubl_high_u8: +; CHECK: usubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16> + %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16> + %sub.i = sub <8 x i16> %0, %1 + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubl_high_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubl_high_u16: +; CHECK: usubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32> + %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32> + %sub.i = sub <4 x i32> %0, %1 + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubl_high_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubl_high_u32: +; CHECK: usubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64> + %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64> + %sub.i = sub <2 x i64> %0, %1 + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubw_s8(<8 x i16> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vsubw_s8: +; CHECK: ssubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = sext <8 x i8> %b to <8 x i16> + %sub.i = sub <8 x i16> %a, %vmovl.i.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubw_s16(<4 x i32> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vsubw_s16: +; CHECK: ssubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = sext <4 x i16> %b to <4 x i32> + %sub.i = sub <4 x i32> %a, %vmovl.i.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubw_s32(<2 x i64> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vsubw_s32: +; CHECK: ssubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = sext <2 x i32> %b to <2 x i64> + %sub.i = sub <2 x i64> %a, %vmovl.i.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubw_u8(<8 x i16> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vsubw_u8: +; CHECK: usubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b +entry: + %vmovl.i.i = zext <8 x i8> %b to <8 x i16> + %sub.i = sub <8 x i16> %a, %vmovl.i.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubw_u16(<4 x i32> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vsubw_u16: +; CHECK: usubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h +entry: + %vmovl.i.i = zext <4 x i16> %b to <4 x i32> + %sub.i = sub <4 x i32> %a, %vmovl.i.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubw_u32(<2 x i64> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vsubw_u32: +; CHECK: usubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s +entry: + %vmovl.i.i = zext <2 x i32> %b to <2 x i64> + %sub.i = sub <2 x i64> %a, %vmovl.i.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubw_high_s8(<8 x i16> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vsubw_high_s8: +; CHECK: ssubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16> + %sub.i = sub <8 x i16> %a, %0 + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubw_high_s16(<4 x i32> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubw_high_s16: +; CHECK: ssubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32> + %sub.i = sub <4 x i32> %a, %0 + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubw_high_s32(<2 x i64> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubw_high_s32: +; CHECK: ssubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64> + %sub.i = sub <2 x i64> %a, %0 + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vsubw_high_u8(<8 x i16> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vsubw_high_u8: +; CHECK: usubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b +entry: + %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16> + %sub.i = sub <8 x i16> %a, %0 + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vsubw_high_u16(<4 x i32> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubw_high_u16: +; CHECK: usubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h +entry: + %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32> + %sub.i = sub <4 x i32> %a, %0 + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vsubw_high_u32(<2 x i64> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubw_high_u32: +; CHECK: usubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s +entry: + %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64> + %sub.i = sub <2 x i64> %a, %0 + ret <2 x i64> %sub.i +} + +define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddhn_s16: +; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vaddhn.i = add <8 x i16> %a, %b + %vaddhn1.i = lshr <8 x i16> %vaddhn.i, + %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8> + ret <8 x i8> %vaddhn2.i +} + +define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddhn_s32: +; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vaddhn.i = add <4 x i32> %a, %b + %vaddhn1.i = lshr <4 x i32> %vaddhn.i, + %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16> + ret <4 x i16> %vaddhn2.i +} + +define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vaddhn_s64: +; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vaddhn.i = add <2 x i64> %a, %b + %vaddhn1.i = lshr <2 x i64> %vaddhn.i, + %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32> + ret <2 x i32> %vaddhn2.i +} + +define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddhn_u16: +; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vaddhn.i = add <8 x i16> %a, %b + %vaddhn1.i = lshr <8 x i16> %vaddhn.i, + %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8> + ret <8 x i8> %vaddhn2.i +} + +define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddhn_u32: +; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vaddhn.i = add <4 x i32> %a, %b + %vaddhn1.i = lshr <4 x i32> %vaddhn.i, + %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16> + ret <4 x i16> %vaddhn2.i +} + +define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vaddhn_u64: +; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vaddhn.i = add <2 x i64> %a, %b + %vaddhn1.i = lshr <2 x i64> %vaddhn.i, + %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32> + ret <2 x i32> %vaddhn2.i +} + +define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddhn_high_s16: +; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vaddhn.i.i = add <8 x i16> %a, %b + %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, + %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8> + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddhn_high_s32: +; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vaddhn.i.i = add <4 x i32> %a, %b + %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, + %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16> + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vaddhn_high_s64: +; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vaddhn.i.i = add <2 x i64> %a, %b + %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, + %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32> + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vaddhn_high_u16: +; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vaddhn.i.i = add <8 x i16> %a, %b + %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, + %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8> + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddhn_high_u32: +; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vaddhn.i.i = add <4 x i32> %a, %b + %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, + %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16> + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vaddhn_high_u64: +; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vaddhn.i.i = add <2 x i64> %a, %b + %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, + %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32> + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i8> @test_vraddhn_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vraddhn_s16: +; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vraddhn2.i = tail call <8 x i8> @llvm.arm64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) + ret <8 x i8> %vraddhn2.i +} + +define <4 x i16> @test_vraddhn_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vraddhn_s32: +; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vraddhn2.i = tail call <4 x i16> @llvm.arm64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) + ret <4 x i16> %vraddhn2.i +} + +define <2 x i32> @test_vraddhn_s64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vraddhn_s64: +; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vraddhn2.i = tail call <2 x i32> @llvm.arm64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) + ret <2 x i32> %vraddhn2.i +} + +define <8 x i8> @test_vraddhn_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vraddhn_u16: +; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vraddhn2.i = tail call <8 x i8> @llvm.arm64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) + ret <8 x i8> %vraddhn2.i +} + +define <4 x i16> @test_vraddhn_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vraddhn_u32: +; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vraddhn2.i = tail call <4 x i16> @llvm.arm64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) + ret <4 x i16> %vraddhn2.i +} + +define <2 x i32> @test_vraddhn_u64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vraddhn_u64: +; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vraddhn2.i = tail call <2 x i32> @llvm.arm64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) + ret <2 x i32> %vraddhn2.i +} + +define <16 x i8> @test_vraddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vraddhn_high_s16: +; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vraddhn2.i.i = tail call <8 x i8> @llvm.arm64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vraddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vraddhn_high_s32: +; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vraddhn2.i.i = tail call <4 x i16> @llvm.arm64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vraddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vraddhn_high_s64: +; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vraddhn2.i.i = tail call <2 x i32> @llvm.arm64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <16 x i8> @test_vraddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vraddhn_high_u16: +; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vraddhn2.i.i = tail call <8 x i8> @llvm.arm64.neon.raddhn.v8i8(<8 x i16> %a, <8 x i16> %b) + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vraddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vraddhn_high_u32: +; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vraddhn2.i.i = tail call <4 x i16> @llvm.arm64.neon.raddhn.v4i16(<4 x i32> %a, <4 x i32> %b) + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vraddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vraddhn_high_u64: +; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vraddhn2.i.i = tail call <2 x i32> @llvm.arm64.neon.raddhn.v2i32(<2 x i64> %a, <2 x i64> %b) + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubhn_s16: +; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vsubhn.i = sub <8 x i16> %a, %b + %vsubhn1.i = lshr <8 x i16> %vsubhn.i, + %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8> + ret <8 x i8> %vsubhn2.i +} + +define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubhn_s32: +; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vsubhn.i = sub <4 x i32> %a, %b + %vsubhn1.i = lshr <4 x i32> %vsubhn.i, + %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16> + ret <4 x i16> %vsubhn2.i +} + +define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vsubhn_s64: +; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vsubhn.i = sub <2 x i64> %a, %b + %vsubhn1.i = lshr <2 x i64> %vsubhn.i, + %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32> + ret <2 x i32> %vsubhn2.i +} + +define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubhn_u16: +; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vsubhn.i = sub <8 x i16> %a, %b + %vsubhn1.i = lshr <8 x i16> %vsubhn.i, + %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8> + ret <8 x i8> %vsubhn2.i +} + +define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubhn_u32: +; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vsubhn.i = sub <4 x i32> %a, %b + %vsubhn1.i = lshr <4 x i32> %vsubhn.i, + %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16> + ret <4 x i16> %vsubhn2.i +} + +define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vsubhn_u64: +; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vsubhn.i = sub <2 x i64> %a, %b + %vsubhn1.i = lshr <2 x i64> %vsubhn.i, + %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32> + ret <2 x i32> %vsubhn2.i +} + +define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubhn_high_s16: +; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vsubhn.i.i = sub <8 x i16> %a, %b + %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, + %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8> + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubhn_high_s32: +; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vsubhn.i.i = sub <4 x i32> %a, %b + %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, + %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16> + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vsubhn_high_s64: +; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vsubhn.i.i = sub <2 x i64> %a, %b + %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, + %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32> + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vsubhn_high_u16: +; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vsubhn.i.i = sub <8 x i16> %a, %b + %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, + %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8> + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vsubhn_high_u32: +; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vsubhn.i.i = sub <4 x i32> %a, %b + %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, + %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16> + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vsubhn_high_u64: +; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vsubhn.i.i = sub <2 x i64> %a, %b + %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, + %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32> + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i8> @test_vrsubhn_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vrsubhn_s16: +; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vrsubhn2.i = tail call <8 x i8> @llvm.arm64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) + ret <8 x i8> %vrsubhn2.i +} + +define <4 x i16> @test_vrsubhn_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vrsubhn_s32: +; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vrsubhn2.i = tail call <4 x i16> @llvm.arm64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) + ret <4 x i16> %vrsubhn2.i +} + +define <2 x i32> @test_vrsubhn_s64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vrsubhn_s64: +; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vrsubhn2.i = tail call <2 x i32> @llvm.arm64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) + ret <2 x i32> %vrsubhn2.i +} + +define <8 x i8> @test_vrsubhn_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vrsubhn_u16: +; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vrsubhn2.i = tail call <8 x i8> @llvm.arm64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) + ret <8 x i8> %vrsubhn2.i +} + +define <4 x i16> @test_vrsubhn_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vrsubhn_u32: +; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vrsubhn2.i = tail call <4 x i16> @llvm.arm64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) + ret <4 x i16> %vrsubhn2.i +} + +define <2 x i32> @test_vrsubhn_u64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vrsubhn_u64: +; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vrsubhn2.i = tail call <2 x i32> @llvm.arm64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) + ret <2 x i32> %vrsubhn2.i +} + +define <16 x i8> @test_vrsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vrsubhn_high_s16: +; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vrsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vrsubhn_high_s32: +; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vrsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vrsubhn_high_s64: +; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <16 x i8> @test_vrsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vrsubhn_high_u16: +; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm64.neon.rsubhn.v8i8(<8 x i16> %a, <8 x i16> %b) + %0 = bitcast <8 x i8> %r to <1 x i64> + %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8> + ret <16 x i8> %2 +} + +define <8 x i16> @test_vrsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vrsubhn_high_u32: +; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm64.neon.rsubhn.v4i16(<4 x i32> %a, <4 x i32> %b) + %0 = bitcast <4 x i16> %r to <1 x i64> + %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16> + ret <8 x i16> %2 +} + +define <4 x i32> @test_vrsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vrsubhn_high_u64: +; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm64.neon.rsubhn.v2i32(<2 x i64> %a, <2 x i64> %b) + %0 = bitcast <2 x i32> %r to <1 x i64> + %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64> + %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> + %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32> + ret <4 x i32> %2 +} + +define <8 x i16> @test_vabdl_s8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vabdl_s8: +; CHECK: sabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vabd.i.i = tail call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b) + %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16> + ret <8 x i16> %vmovl.i.i +} + +define <4 x i32> @test_vabdl_s16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vabdl_s16: +; CHECK: sabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vabd2.i.i = tail call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b) + %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32> + ret <4 x i32> %vmovl.i.i +} + +define <2 x i64> @test_vabdl_s32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vabdl_s32: +; CHECK: sabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vabd2.i.i = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %a, <2 x i32> %b) + %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64> + ret <2 x i64> %vmovl.i.i +} + +define <8 x i16> @test_vabdl_u8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vabdl_u8: +; CHECK: uabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vabd.i.i = tail call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b) + %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16> + ret <8 x i16> %vmovl.i.i +} + +define <4 x i32> @test_vabdl_u16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vabdl_u16: +; CHECK: uabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vabd2.i.i = tail call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b) + %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32> + ret <4 x i32> %vmovl.i.i +} + +define <2 x i64> @test_vabdl_u32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vabdl_u32: +; CHECK: uabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vabd2.i.i = tail call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b) + %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64> + ret <2 x i64> %vmovl.i.i +} + +define <8 x i16> @test_vabal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vabal_s8: +; CHECK: sabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vabd.i.i.i = tail call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %b, <8 x i8> %c) + %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vabal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vabal_s16: +; CHECK: sabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vabd2.i.i.i = tail call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %b, <4 x i16> %c) + %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vabal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vabal_s32: +; CHECK: sabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vabd2.i.i.i = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %b, <2 x i32> %c) + %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vabal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vabal_u8: +; CHECK: uabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vabd.i.i.i = tail call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %b, <8 x i8> %c) + %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16> + %add.i = add <8 x i16> %vmovl.i.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vabal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vabal_u16: +; CHECK: uabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vabd2.i.i.i = tail call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %b, <4 x i16> %c) + %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32> + %add.i = add <4 x i32> %vmovl.i.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vabal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vabal_u32: +; CHECK: uabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vabd2.i.i.i = tail call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %b, <2 x i32> %c) + %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64> + %add.i = add <2 x i64> %vmovl.i.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vabdl_high_s8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vabdl_high_s8: +; CHECK: sabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %vabd.i.i.i = tail call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16> + ret <8 x i16> %vmovl.i.i.i +} + +define <4 x i32> @test_vabdl_high_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vabdl_high_s16: +; CHECK: sabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vabd2.i.i.i = tail call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32> + ret <4 x i32> %vmovl.i.i.i +} + +define <2 x i64> @test_vabdl_high_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vabdl_high_s32: +; CHECK: sabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vabd2.i.i.i = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64> + ret <2 x i64> %vmovl.i.i.i +} + +define <8 x i16> @test_vabdl_high_u8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vabdl_high_u8: +; CHECK: uabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %vabd.i.i.i = tail call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16> + ret <8 x i16> %vmovl.i.i.i +} + +define <4 x i32> @test_vabdl_high_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vabdl_high_u16: +; CHECK: uabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vabd2.i.i.i = tail call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32> + ret <4 x i32> %vmovl.i.i.i +} + +define <2 x i64> @test_vabdl_high_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vabdl_high_u32: +; CHECK: uabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vabd2.i.i.i = tail call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64> + ret <2 x i64> %vmovl.i.i.i +} + +define <8 x i16> @test_vabal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vabal_high_s8: +; CHECK: sabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16> + %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a + ret <8 x i16> %add.i.i +} + +define <4 x i32> @test_vabal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vabal_high_s16: +; CHECK: sabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32> + %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vabal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vabal_high_s32: +; CHECK: sabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64> + %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <8 x i16> @test_vabal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vabal_high_u8: +; CHECK: uabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16> + %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a + ret <8 x i16> %add.i.i +} + +define <4 x i32> @test_vabal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vabal_high_u16: +; CHECK: uabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32> + %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vabal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vabal_high_u32: +; CHECK: uabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64> + %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <8 x i16> @test_vmull_s8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vmull_s8: +; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %a, <8 x i8> %b) + ret <8 x i16> %vmull.i +} + +define <4 x i32> @test_vmull_s16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vmull_s16: +; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %a, <4 x i16> %b) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_s32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vmull_s32: +; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %a, <2 x i32> %b) + ret <2 x i64> %vmull2.i +} + +define <8 x i16> @test_vmull_u8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vmull_u8: +; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %a, <8 x i8> %b) + ret <8 x i16> %vmull.i +} + +define <4 x i32> @test_vmull_u16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vmull_u16: +; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %a, <4 x i16> %b) + ret <4 x i32> %vmull2.i +} + +define <2 x i64> @test_vmull_u32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vmull_u32: +; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %a, <2 x i32> %b) + ret <2 x i64> %vmull2.i +} + +define <8 x i16> @test_vmull_high_s8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vmull_high_s8: +; CHECK: smull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + ret <8 x i16> %vmull.i.i +} + +define <4 x i32> @test_vmull_high_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vmull_high_s16: +; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + ret <4 x i32> %vmull2.i.i +} + +define <2 x i64> @test_vmull_high_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vmull_high_s32: +; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + ret <2 x i64> %vmull2.i.i +} + +define <8 x i16> @test_vmull_high_u8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vmull_high_u8: +; CHECK: umull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + ret <8 x i16> %vmull.i.i +} + +define <4 x i32> @test_vmull_high_u16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vmull_high_u16: +; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + ret <4 x i32> %vmull2.i.i +} + +define <2 x i64> @test_vmull_high_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vmull_high_u32: +; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + ret <2 x i64> %vmull2.i.i +} + +define <8 x i16> @test_vmlal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vmlal_s8: +; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c) + %add.i = add <8 x i16> %vmull.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vmlal_s16: +; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c) + %add.i = add <4 x i32> %vmull2.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vmlal_s32: +; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c) + %add.i = add <2 x i64> %vmull2.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vmlal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vmlal_u8: +; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c) + %add.i = add <8 x i16> %vmull.i.i, %a + ret <8 x i16> %add.i +} + +define <4 x i32> @test_vmlal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vmlal_u16: +; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c) + %add.i = add <4 x i32> %vmull2.i.i, %a + ret <4 x i32> %add.i +} + +define <2 x i64> @test_vmlal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vmlal_u32: +; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c) + %add.i = add <2 x i64> %vmull2.i.i, %a + ret <2 x i64> %add.i +} + +define <8 x i16> @test_vmlal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vmlal_high_s8: +; CHECK: smlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vmull.i.i.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %add.i.i = add <8 x i16> %vmull.i.i.i, %a + ret <8 x i16> %add.i.i +} + +define <4 x i32> @test_vmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vmlal_high_s16: +; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %add.i.i = add <4 x i32> %vmull2.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vmlal_high_s32: +; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %add.i.i = add <2 x i64> %vmull2.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <8 x i16> @test_vmlal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vmlal_high_u8: +; CHECK: umlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vmull.i.i.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %add.i.i = add <8 x i16> %vmull.i.i.i, %a + ret <8 x i16> %add.i.i +} + +define <4 x i32> @test_vmlal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vmlal_high_u16: +; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %add.i.i = add <4 x i32> %vmull2.i.i.i, %a + ret <4 x i32> %add.i.i +} + +define <2 x i64> @test_vmlal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vmlal_high_u32: +; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %add.i.i = add <2 x i64> %vmull2.i.i.i, %a + ret <2 x i64> %add.i.i +} + +define <8 x i16> @test_vmlsl_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vmlsl_s8: +; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %b, <8 x i8> %c) + %sub.i = sub <8 x i16> %a, %vmull.i.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vmlsl_s16: +; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %b, <4 x i16> %c) + %sub.i = sub <4 x i32> %a, %vmull2.i.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vmlsl_s32: +; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %b, <2 x i32> %c) + %sub.i = sub <2 x i64> %a, %vmull2.i.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vmlsl_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) { +; CHECK-LABEL: test_vmlsl_u8: +; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %b, <8 x i8> %c) + %sub.i = sub <8 x i16> %a, %vmull.i.i + ret <8 x i16> %sub.i +} + +define <4 x i32> @test_vmlsl_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vmlsl_u16: +; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %b, <4 x i16> %c) + %sub.i = sub <4 x i32> %a, %vmull2.i.i + ret <4 x i32> %sub.i +} + +define <2 x i64> @test_vmlsl_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vmlsl_u32: +; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %b, <2 x i32> %c) + %sub.i = sub <2 x i64> %a, %vmull2.i.i + ret <2 x i64> %sub.i +} + +define <8 x i16> @test_vmlsl_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vmlsl_high_s8: +; CHECK: smlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vmull.i.i.i = tail call <8 x i16> @llvm.arm64.neon.smull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i + ret <8 x i16> %sub.i.i +} + +define <4 x i32> @test_vmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vmlsl_high_s16: +; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i + ret <4 x i32> %sub.i.i +} + +define <2 x i64> @test_vmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vmlsl_high_s32: +; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i + ret <2 x i64> %sub.i.i +} + +define <8 x i16> @test_vmlsl_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: test_vmlsl_high_u8: +; CHECK: umlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> + %vmull.i.i.i = tail call <8 x i16> @llvm.arm64.neon.umull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i + ret <8 x i16> %sub.i.i +} + +define <4 x i32> @test_vmlsl_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vmlsl_high_u16: +; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vmull2.i.i.i = tail call <4 x i32> @llvm.arm64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i + ret <4 x i32> %sub.i.i +} + +define <2 x i64> @test_vmlsl_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vmlsl_high_u32: +; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vmull2.i.i.i = tail call <2 x i64> @llvm.arm64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i + ret <2 x i64> %sub.i.i +} + +define <4 x i32> @test_vqdmull_s16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: test_vqdmull_s16: +; CHECK: sqdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vqdmull2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %a, <4 x i16> %b) + ret <4 x i32> %vqdmull2.i +} + +define <2 x i64> @test_vqdmull_s32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: test_vqdmull_s32: +; CHECK: sqdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vqdmull2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %a, <2 x i32> %b) + ret <2 x i64> %vqdmull2.i +} + +define <4 x i32> @test_vqdmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vqdmlal_s16: +; CHECK: sqdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vqdmlal2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) + %vqdmlal4.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i) + ret <4 x i32> %vqdmlal4.i +} + +define <2 x i64> @test_vqdmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vqdmlal_s32: +; CHECK: sqdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vqdmlal2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) + %vqdmlal4.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i) + ret <2 x i64> %vqdmlal4.i +} + +define <4 x i32> @test_vqdmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) { +; CHECK-LABEL: test_vqdmlsl_s16: +; CHECK: sqdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h +entry: + %vqdmlsl2.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %b, <4 x i16> %c) + %vqdmlsl4.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i) + ret <4 x i32> %vqdmlsl4.i +} + +define <2 x i64> @test_vqdmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { +; CHECK-LABEL: test_vqdmlsl_s32: +; CHECK: sqdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s +entry: + %vqdmlsl2.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %b, <2 x i32> %c) + %vqdmlsl4.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i) + ret <2 x i64> %vqdmlsl4.i +} + +define <4 x i32> @test_vqdmull_high_s16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test_vqdmull_high_s16: +; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %vqdmull2.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + ret <4 x i32> %vqdmull2.i.i +} + +define <2 x i64> @test_vqdmull_high_s32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vqdmull_high_s32: +; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %vqdmull2.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + ret <2 x i64> %vqdmull2.i.i +} + +define <4 x i32> @test_vqdmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vqdmlal_high_s16: +; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vqdmlal2.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vqdmlal4.i.i = tail call <4 x i32> @llvm.arm64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i.i) + ret <4 x i32> %vqdmlal4.i.i +} + +define <2 x i64> @test_vqdmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vqdmlal_high_s32: +; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vqdmlal2.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vqdmlal4.i.i = tail call <2 x i64> @llvm.arm64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i.i) + ret <2 x i64> %vqdmlal4.i.i +} + +define <4 x i32> @test_vqdmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: test_vqdmlsl_high_s16: +; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h +entry: + %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> + %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %vqdmlsl2.i.i = tail call <4 x i32> @llvm.arm64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i) + %vqdmlsl4.i.i = tail call <4 x i32> @llvm.arm64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i.i) + ret <4 x i32> %vqdmlsl4.i.i +} + +define <2 x i64> @test_vqdmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: test_vqdmlsl_high_s32: +; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +entry: + %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> + %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %vqdmlsl2.i.i = tail call <2 x i64> @llvm.arm64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i) + %vqdmlsl4.i.i = tail call <2 x i64> @llvm.arm64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i.i) + ret <2 x i64> %vqdmlsl4.i.i +} + +define <8 x i16> @test_vmull_p8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: test_vmull_p8: +; CHECK: pmull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b +entry: + %vmull.i = tail call <8 x i16> @llvm.arm64.neon.pmull.v8i16(<8 x i8> %a, <8 x i8> %b) + ret <8 x i16> %vmull.i +} + +define <8 x i16> @test_vmull_high_p8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test_vmull_high_p8: +; CHECK: pmull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b +entry: + %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> + %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> + %vmull.i.i = tail call <8 x i16> @llvm.arm64.neon.pmull.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i) + ret <8 x i16> %vmull.i.i +} + +define i128 @test_vmull_p64(i64 %a, i64 %b) #4 { +; CHECK-LABEL: test_vmull_p64 +; CHECK: pmull {{v[0-9]+}}.1q, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d +entry: + %vmull2.i = tail call <16 x i8> @llvm.arm64.neon.pmull64(i64 %a, i64 %b) + %vmull3.i = bitcast <16 x i8> %vmull2.i to i128 + ret i128 %vmull3.i +} + +define i128 @test_vmull_high_p64(<2 x i64> %a, <2 x i64> %b) #4 { +; CHECK-LABEL: test_vmull_high_p64 +; CHECK: pmull2 {{v[0-9]+}}.1q, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +entry: + %0 = extractelement <2 x i64> %a, i32 1 + %1 = extractelement <2 x i64> %b, i32 1 + %vmull2.i.i = tail call <16 x i8> @llvm.arm64.neon.pmull64(i64 %0, i64 %1) #1 + %vmull3.i.i = bitcast <16 x i8> %vmull2.i.i to i128 + ret i128 %vmull3.i.i +} + +declare <16 x i8> @llvm.arm64.neon.pmull64(i64, i64) #5 + + diff --git a/test/CodeGen/ARM64/aarch64-neon-aba-abd.ll b/test/CodeGen/ARM64/aarch64-neon-aba-abd.ll new file mode 100644 index 00000000000..9d321b23976 --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-aba-abd.ll @@ -0,0 +1,236 @@ +; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s + +declare <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8>, <8 x i8>) +declare <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8>, <8 x i8>) + +define <8 x i8> @test_uabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) { +; CHECK: test_uabd_v8i8: + %abd = call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs) +; CHECK: uabd v0.8b, v0.8b, v1.8b + ret <8 x i8> %abd +} + +define <8 x i8> @test_uaba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) { +; CHECK: test_uaba_v8i8: + %abd = call <8 x i8> @llvm.arm64.neon.uabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs) + %aba = add <8 x i8> %lhs, %abd +; CHECK: uaba v0.8b, v0.8b, v1.8b + ret <8 x i8> %aba +} + +define <8 x i8> @test_sabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) { +; CHECK: test_sabd_v8i8: + %abd = call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs) +; CHECK: sabd v0.8b, v0.8b, v1.8b + ret <8 x i8> %abd +} + +define <8 x i8> @test_saba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) { +; CHECK: test_saba_v8i8: + %abd = call <8 x i8> @llvm.arm64.neon.sabd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs) + %aba = add <8 x i8> %lhs, %abd +; CHECK: saba v0.8b, v0.8b, v1.8b + ret <8 x i8> %aba +} + +declare <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8>, <16 x i8>) +declare <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8>, <16 x i8>) + +define <16 x i8> @test_uabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK: test_uabd_v16i8: + %abd = call <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) +; CHECK: uabd v0.16b, v0.16b, v1.16b + ret <16 x i8> %abd +} + +define <16 x i8> @test_uaba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK: test_uaba_v16i8: + %abd = call <16 x i8> @llvm.arm64.neon.uabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) + %aba = add <16 x i8> %lhs, %abd +; CHECK: uaba v0.16b, v0.16b, v1.16b + ret <16 x i8> %aba +} + +define <16 x i8> @test_sabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK: test_sabd_v16i8: + %abd = call <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) +; CHECK: sabd v0.16b, v0.16b, v1.16b + ret <16 x i8> %abd +} + +define <16 x i8> @test_saba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK: test_saba_v16i8: + %abd = call <16 x i8> @llvm.arm64.neon.sabd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) + %aba = add <16 x i8> %lhs, %abd +; CHECK: saba v0.16b, v0.16b, v1.16b + ret <16 x i8> %aba +} + +declare <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16>, <4 x i16>) +declare <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16>, <4 x i16>) + +define <4 x i16> @test_uabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) { +; CHECK: test_uabd_v4i16: + %abd = call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs) +; CHECK: uabd v0.4h, v0.4h, v1.4h + ret <4 x i16> %abd +} + +define <4 x i16> @test_uaba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) { +; CHECK: test_uaba_v4i16: + %abd = call <4 x i16> @llvm.arm64.neon.uabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs) + %aba = add <4 x i16> %lhs, %abd +; CHECK: uaba v0.4h, v0.4h, v1.4h + ret <4 x i16> %aba +} + +define <4 x i16> @test_sabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) { +; CHECK: test_sabd_v4i16: + %abd = call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs) +; CHECK: sabd v0.4h, v0.4h, v1.4h + ret <4 x i16> %abd +} + +define <4 x i16> @test_saba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) { +; CHECK: test_saba_v4i16: + %abd = call <4 x i16> @llvm.arm64.neon.sabd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs) + %aba = add <4 x i16> %lhs, %abd +; CHECK: saba v0.4h, v0.4h, v1.4h + ret <4 x i16> %aba +} + +declare <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16>, <8 x i16>) +declare <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16>, <8 x i16>) + +define <8 x i16> @test_uabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) { +; CHECK: test_uabd_v8i16: + %abd = call <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs) +; CHECK: uabd v0.8h, v0.8h, v1.8h + ret <8 x i16> %abd +} + +define <8 x i16> @test_uaba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) { +; CHECK: test_uaba_v8i16: + %abd = call <8 x i16> @llvm.arm64.neon.uabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs) + %aba = add <8 x i16> %lhs, %abd +; CHECK: uaba v0.8h, v0.8h, v1.8h + ret <8 x i16> %aba +} + +define <8 x i16> @test_sabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) { +; CHECK: test_sabd_v8i16: + %abd = call <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs) +; CHECK: sabd v0.8h, v0.8h, v1.8h + ret <8 x i16> %abd +} + +define <8 x i16> @test_saba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) { +; CHECK: test_saba_v8i16: + %abd = call <8 x i16> @llvm.arm64.neon.sabd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs) + %aba = add <8 x i16> %lhs, %abd +; CHECK: saba v0.8h, v0.8h, v1.8h + ret <8 x i16> %aba +} + +declare <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32>, <2 x i32>) +declare <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32>, <2 x i32>) + +define <2 x i32> @test_uabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { +; CHECK: test_uabd_v2i32: + %abd = call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs) +; CHECK: uabd v0.2s, v0.2s, v1.2s + ret <2 x i32> %abd +} + +define <2 x i32> @test_uaba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { +; CHECK: test_uaba_v2i32: + %abd = call <2 x i32> @llvm.arm64.neon.uabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs) + %aba = add <2 x i32> %lhs, %abd +; CHECK: uaba v0.2s, v0.2s, v1.2s + ret <2 x i32> %aba +} + +define <2 x i32> @test_sabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { +; CHECK: test_sabd_v2i32: + %abd = call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs) +; CHECK: sabd v0.2s, v0.2s, v1.2s + ret <2 x i32> %abd +} + +define <2 x i32> @test_sabd_v2i32_const() { +; CHECK: test_sabd_v2i32_const: +; CHECK: movi d1, #0x00ffffffff0000 +; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s + %1 = tail call <2 x i32> @llvm.arm64.neon.sabd.v2i32( + <2 x i32> , + <2 x i32> ) + ret <2 x i32> %1 +} + +define <2 x i32> @test_saba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { +; CHECK: test_saba_v2i32: + %abd = call <2 x i32> @llvm.arm64.neon.sabd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs) + %aba = add <2 x i32> %lhs, %abd +; CHECK: saba v0.2s, v0.2s, v1.2s + ret <2 x i32> %aba +} + +declare <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32>, <4 x i32>) + +define <4 x i32> @test_uabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { +; CHECK: test_uabd_v4i32: + %abd = call <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs) +; CHECK: uabd v0.4s, v0.4s, v1.4s + ret <4 x i32> %abd +} + +define <4 x i32> @test_uaba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { +; CHECK: test_uaba_v4i32: + %abd = call <4 x i32> @llvm.arm64.neon.uabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs) + %aba = add <4 x i32> %lhs, %abd +; CHECK: uaba v0.4s, v0.4s, v1.4s + ret <4 x i32> %aba +} + +define <4 x i32> @test_sabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { +; CHECK: test_sabd_v4i32: + %abd = call <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs) +; CHECK: sabd v0.4s, v0.4s, v1.4s + ret <4 x i32> %abd +} + +define <4 x i32> @test_saba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { +; CHECK: test_saba_v4i32: + %abd = call <4 x i32> @llvm.arm64.neon.sabd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs) + %aba = add <4 x i32> %lhs, %abd +; CHECK: saba v0.4s, v0.4s, v1.4s + ret <4 x i32> %aba +} + +declare <2 x float> @llvm.arm64.neon.fabd.v2f32(<2 x float>, <2 x float>) + +define <2 x float> @test_fabd_v2f32(<2 x float> %lhs, <2 x float> %rhs) { +; CHECK: test_fabd_v2f32: + %abd = call <2 x float> @llvm.arm64.neon.fabd.v2f32(<2 x float> %lhs, <2 x float> %rhs) +; CHECK: fabd v0.2s, v0.2s, v1.2s + ret <2 x float> %abd +} + +declare <4 x float> @llvm.arm64.neon.fabd.v4f32(<4 x float>, <4 x float>) + +define <4 x float> @test_fabd_v4f32(<4 x float> %lhs, <4 x float> %rhs) { +; CHECK: test_fabd_v4f32: + %abd = call <4 x float> @llvm.arm64.neon.fabd.v4f32(<4 x float> %lhs, <4 x float> %rhs) +; CHECK: fabd v0.4s, v0.4s, v1.4s + ret <4 x float> %abd +} + +declare <2 x double> @llvm.arm64.neon.fabd.v2f64(<2 x double>, <2 x double>) + +define <2 x double> @test_fabd_v2f64(<2 x double> %lhs, <2 x double> %rhs) { +; CHECK: test_fabd_v2f64: + %abd = call <2 x double> @llvm.arm64.neon.fabd.v2f64(<2 x double> %lhs, <2 x double> %rhs) +; CHECK: fabd v0.2d, v0.2d, v1.2d + ret <2 x double> %abd +} diff --git a/test/CodeGen/ARM64/aarch64-neon-across.ll b/test/CodeGen/ARM64/aarch64-neon-across.ll new file mode 100644 index 00000000000..5a3538ba88e --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-across.ll @@ -0,0 +1,460 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s + +declare float @llvm.arm64.neon.fminnmv.f32.v4f32(<4 x float>) + +declare float @llvm.arm64.neon.fmaxnmv.f32.v4f32(<4 x float>) + +declare float @llvm.arm64.neon.fminv.f32.v4f32(<4 x float>) + +declare float @llvm.arm64.neon.fmaxv.f32.v4f32(<4 x float>) + +declare i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.uminv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.uminv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.uminv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.sminv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.sminv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.sminv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.uminv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.uminv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.sminv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.sminv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.umaxv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.umaxv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.umaxv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.smaxv.i32.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.smaxv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.smaxv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.umaxv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.umaxv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.smaxv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.smaxv.i32.v8i8(<8 x i8>) + +declare i64 @llvm.arm64.neon.uaddlv.i64.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.uaddlv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.uaddlv.i32.v16i8(<16 x i8>) + +declare i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32>) + +declare i32 @llvm.arm64.neon.saddlv.i32.v8i16(<8 x i16>) + +declare i32 @llvm.arm64.neon.saddlv.i32.v16i8(<16 x i8>) + +declare i32 @llvm.arm64.neon.uaddlv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.uaddlv.i32.v8i8(<8 x i8>) + +declare i32 @llvm.arm64.neon.saddlv.i32.v4i16(<4 x i16>) + +declare i32 @llvm.arm64.neon.saddlv.i32.v8i8(<8 x i8>) + +define i16 @test_vaddlv_s8(<8 x i8> %a) { +; CHECK: test_vaddlv_s8: +; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %saddlvv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddlv_s16(<4 x i16> %a) { +; CHECK: test_vaddlv_s16: +; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v4i16(<4 x i16> %a) + ret i32 %saddlvv.i +} + +define i16 @test_vaddlv_u8(<8 x i8> %a) { +; CHECK: test_vaddlv_u8: +; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %uaddlvv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddlv_u16(<4 x i16> %a) { +; CHECK: test_vaddlv_u16: +; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v4i16(<4 x i16> %a) + ret i32 %uaddlvv.i +} + +define i16 @test_vaddlvq_s8(<16 x i8> %a) { +; CHECK: test_vaddlvq_s8: +; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %saddlvv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddlvq_s16(<8 x i16> %a) { +; CHECK: test_vaddlvq_s16: +; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %saddlvv.i = tail call i32 @llvm.arm64.neon.saddlv.i32.v8i16(<8 x i16> %a) + ret i32 %saddlvv.i +} + +define i64 @test_vaddlvq_s32(<4 x i32> %a) { +; CHECK: test_vaddlvq_s32: +; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %saddlvv.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v4i32(<4 x i32> %a) + ret i64 %saddlvv.i +} + +define i16 @test_vaddlvq_u8(<16 x i8> %a) { +; CHECK: test_vaddlvq_u8: +; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %uaddlvv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddlvq_u16(<8 x i16> %a) { +; CHECK: test_vaddlvq_u16: +; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %uaddlvv.i = tail call i32 @llvm.arm64.neon.uaddlv.i32.v8i16(<8 x i16> %a) + ret i32 %uaddlvv.i +} + +define i64 @test_vaddlvq_u32(<4 x i32> %a) { +; CHECK: test_vaddlvq_u32: +; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %uaddlvv.i = tail call i64 @llvm.arm64.neon.uaddlv.i64.v4i32(<4 x i32> %a) + ret i64 %uaddlvv.i +} + +define i8 @test_vmaxv_s8(<8 x i8> %a) { +; CHECK: test_vmaxv_s8: +; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %smaxv.i to i8 + ret i8 %0 +} + +define i16 @test_vmaxv_s16(<4 x i16> %a) { +; CHECK: test_vmaxv_s16: +; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %smaxv.i to i16 + ret i16 %0 +} + +define i8 @test_vmaxv_u8(<8 x i8> %a) { +; CHECK: test_vmaxv_u8: +; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %umaxv.i to i8 + ret i8 %0 +} + +define i16 @test_vmaxv_u16(<4 x i16> %a) { +; CHECK: test_vmaxv_u16: +; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %umaxv.i to i16 + ret i16 %0 +} + +define i8 @test_vmaxvq_s8(<16 x i8> %a) { +; CHECK: test_vmaxvq_s8: +; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %smaxv.i to i8 + ret i8 %0 +} + +define i16 @test_vmaxvq_s16(<8 x i16> %a) { +; CHECK: test_vmaxvq_s16: +; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %smaxv.i to i16 + ret i16 %0 +} + +define i32 @test_vmaxvq_s32(<4 x i32> %a) { +; CHECK: test_vmaxvq_s32: +; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %smaxv.i = tail call i32 @llvm.arm64.neon.smaxv.i32.v4i32(<4 x i32> %a) + ret i32 %smaxv.i +} + +define i8 @test_vmaxvq_u8(<16 x i8> %a) { +; CHECK: test_vmaxvq_u8: +; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %umaxv.i to i8 + ret i8 %0 +} + +define i16 @test_vmaxvq_u16(<8 x i16> %a) { +; CHECK: test_vmaxvq_u16: +; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %umaxv.i to i16 + ret i16 %0 +} + +define i32 @test_vmaxvq_u32(<4 x i32> %a) { +; CHECK: test_vmaxvq_u32: +; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %umaxv.i = tail call i32 @llvm.arm64.neon.umaxv.i32.v4i32(<4 x i32> %a) + ret i32 %umaxv.i +} + +define i8 @test_vminv_s8(<8 x i8> %a) { +; CHECK: test_vminv_s8: +; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %sminv.i to i8 + ret i8 %0 +} + +define i16 @test_vminv_s16(<4 x i16> %a) { +; CHECK: test_vminv_s16: +; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %sminv.i to i16 + ret i16 %0 +} + +define i8 @test_vminv_u8(<8 x i8> %a) { +; CHECK: test_vminv_u8: +; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %uminv.i to i8 + ret i8 %0 +} + +define i16 @test_vminv_u16(<4 x i16> %a) { +; CHECK: test_vminv_u16: +; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %uminv.i to i16 + ret i16 %0 +} + +define i8 @test_vminvq_s8(<16 x i8> %a) { +; CHECK: test_vminvq_s8: +; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %sminv.i to i8 + ret i8 %0 +} + +define i16 @test_vminvq_s16(<8 x i16> %a) { +; CHECK: test_vminvq_s16: +; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %sminv.i to i16 + ret i16 %0 +} + +define i32 @test_vminvq_s32(<4 x i32> %a) { +; CHECK: test_vminvq_s32: +; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %sminv.i = tail call i32 @llvm.arm64.neon.sminv.i32.v4i32(<4 x i32> %a) + ret i32 %sminv.i +} + +define i8 @test_vminvq_u8(<16 x i8> %a) { +; CHECK: test_vminvq_u8: +; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %uminv.i to i8 + ret i8 %0 +} + +define i16 @test_vminvq_u16(<8 x i16> %a) { +; CHECK: test_vminvq_u16: +; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %uminv.i to i16 + ret i16 %0 +} + +define i32 @test_vminvq_u32(<4 x i32> %a) { +; CHECK: test_vminvq_u32: +; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %uminv.i = tail call i32 @llvm.arm64.neon.uminv.i32.v4i32(<4 x i32> %a) + ret i32 %uminv.i +} + +define i8 @test_vaddv_s8(<8 x i8> %a) { +; CHECK: test_vaddv_s8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define i16 @test_vaddv_s16(<4 x i16> %a) { +; CHECK: test_vaddv_s16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i8 @test_vaddv_u8(<8 x i8> %a) { +; CHECK: test_vaddv_u8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i8(<8 x i8> %a) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define i16 @test_vaddv_u16(<4 x i16> %a) { +; CHECK: test_vaddv_u16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i16(<4 x i16> %a) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i8 @test_vaddvq_s8(<16 x i8> %a) { +; CHECK: test_vaddvq_s8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define i16 @test_vaddvq_s16(<8 x i16> %a) { +; CHECK: test_vaddvq_s16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddvq_s32(<4 x i32> %a) { +; CHECK: test_vaddvq_s32: +; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32> %a) + ret i32 %vaddv.i +} + +define i8 @test_vaddvq_u8(<16 x i8> %a) { +; CHECK: test_vaddvq_u8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v16i8(<16 x i8> %a) + %0 = trunc i32 %vaddv.i to i8 + ret i8 %0 +} + +define i16 @test_vaddvq_u16(<8 x i16> %a) { +; CHECK: test_vaddvq_u16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v8i16(<8 x i16> %a) + %0 = trunc i32 %vaddv.i to i16 + ret i16 %0 +} + +define i32 @test_vaddvq_u32(<4 x i32> %a) { +; CHECK: test_vaddvq_u32: +; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vaddv.i = tail call i32 @llvm.arm64.neon.saddv.i32.v4i32(<4 x i32> %a) + ret i32 %vaddv.i +} + +define float @test_vmaxvq_f32(<4 x float> %a) { +; CHECK: test_vmaxvq_f32: +; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %0 = call float @llvm.arm64.neon.fmaxv.f32.v4f32(<4 x float> %a) + ret float %0 +} + +define float @test_vminvq_f32(<4 x float> %a) { +; CHECK: test_vminvq_f32: +; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %0 = call float @llvm.arm64.neon.fminv.f32.v4f32(<4 x float> %a) + ret float %0 +} + +define float @test_vmaxnmvq_f32(<4 x float> %a) { +; CHECK: test_vmaxnmvq_f32: +; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %0 = call float @llvm.arm64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) + ret float %0 +} + +define float @test_vminnmvq_f32(<4 x float> %a) { +; CHECK: test_vminnmvq_f32: +; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %0 = call float @llvm.arm64.neon.fminnmv.f32.v4f32(<4 x float> %a) + ret float %0 +} + diff --git a/test/CodeGen/ARM64/aarch64-neon-add-pairwise.ll b/test/CodeGen/ARM64/aarch64-neon-add-pairwise.ll new file mode 100644 index 00000000000..9cd76ff1b7e --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-add-pairwise.ll @@ -0,0 +1,100 @@ +; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=+neon < %s | FileCheck %s + +declare <8 x i8> @llvm.arm64.neon.addp.v8i8(<8 x i8>, <8 x i8>) + +define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) { +; Using registers other than v0, v1 are possible, but would be odd. +; CHECK: test_addp_v8i8: + %tmp1 = call <8 x i8> @llvm.arm64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs) +; CHECK: addp v0.8b, v0.8b, v1.8b + ret <8 x i8> %tmp1 +} + +declare <16 x i8> @llvm.arm64.neon.addp.v16i8(<16 x i8>, <16 x i8>) + +define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) { +; CHECK: test_addp_v16i8: + %tmp1 = call <16 x i8> @llvm.arm64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs) +; CHECK: addp v0.16b, v0.16b, v1.16b + ret <16 x i8> %tmp1 +} + +declare <4 x i16> @llvm.arm64.neon.addp.v4i16(<4 x i16>, <4 x i16>) + +define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) { +; CHECK: test_addp_v4i16: + %tmp1 = call <4 x i16> @llvm.arm64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs) +; CHECK: addp v0.4h, v0.4h, v1.4h + ret <4 x i16> %tmp1 +} + +declare <8 x i16> @llvm.arm64.neon.addp.v8i16(<8 x i16>, <8 x i16>) + +define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) { +; CHECK: test_addp_v8i16: + %tmp1 = call <8 x i16> @llvm.arm64.neon.addp.v8i16(<8 x i16> %lhs, <8 x i16> %rhs) +; CHECK: addp v0.8h, v0.8h, v1.8h + ret <8 x i16> %tmp1 +} + +declare <2 x i32> @llvm.arm64.neon.addp.v2i32(<2 x i32>, <2 x i32>) + +define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) { +; CHECK: test_addp_v2i32: + %tmp1 = call <2 x i32> @llvm.arm64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs) +; CHECK: addp v0.2s, v0.2s, v1.2s + ret <2 x i32> %tmp1 +} + +declare <4 x i32> @llvm.arm64.neon.addp.v4i32(<4 x i32>, <4 x i32>) + +define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) { +; CHECK: test_addp_v4i32: + %tmp1 = call <4 x i32> @llvm.arm64.neon.addp.v4i32(<4 x i32> %lhs, <4 x i32> %rhs) +; CHECK: addp v0.4s, v0.4s, v1.4s + ret <4 x i32> %tmp1 +} + + +declare <2 x i64> @llvm.arm64.neon.addp.v2i64(<2 x i64>, <2 x i64>) + +define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) { +; CHECK: test_addp_v2i64: + %val = call <2 x i64> @llvm.arm64.neon.addp.v2i64(<2 x i64> %lhs, <2 x i64> %rhs) +; CHECK: addp v0.2d, v0.2d, v1.2d + ret <2 x i64> %val +} + +declare <2 x float> @llvm.arm64.neon.addp.v2f32(<2 x float>, <2 x float>) +declare <4 x float> @llvm.arm64.neon.addp.v4f32(<4 x float>, <4 x float>) +declare <2 x double> @llvm.arm64.neon.addp.v2f64(<2 x double>, <2 x double>) + +define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) { +; CHECK: test_faddp_v2f32: + %val = call <2 x float> @llvm.arm64.neon.addp.v2f32(<2 x float> %lhs, <2 x float> %rhs) +; CHECK: faddp v0.2s, v0.2s, v1.2s + ret <2 x float> %val +} + +define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) { +; CHECK: test_faddp_v4f32: + %val = call <4 x float> @llvm.arm64.neon.addp.v4f32(<4 x float> %lhs, <4 x float> %rhs) +; CHECK: faddp v0.4s, v0.4s, v1.4s + ret <4 x float> %val +} + +define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) { +; CHECK: test_faddp_v2f64: + %val = call <2 x double> @llvm.arm64.neon.addp.v2f64(<2 x double> %lhs, <2 x double> %rhs) +; CHECK: faddp v0.2d, v0.2d, v1.2d + ret <2 x double> %val +} + +define i32 @test_vaddv.v2i32(<2 x i32> %a) { +; CHECK-LABEL: test_vaddv.v2i32 +; CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s + %1 = tail call i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32> %a) + ret i32 %1 +} + +declare i32 @llvm.arm64.neon.saddv.i32.v2i32(<2 x i32>) diff --git a/test/CodeGen/ARM64/aarch64-neon-add-sub.ll b/test/CodeGen/ARM64/aarch64-neon-add-sub.ll new file mode 100644 index 00000000000..241025eca33 --- /dev/null +++ b/test/CodeGen/ARM64/aarch64-neon-add-sub.ll @@ -0,0 +1,237 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -arm64-simd-scalar| FileCheck %s + +define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) { +;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b + %tmp3 = add <8 x i8> %A, %B; + ret <8 x i8> %tmp3 +} + +define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) { +;CHECK: add {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b + %tmp3 = add <16 x i8> %A, %B; + ret <16 x i8> %tmp3 +} + +define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) { +;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h + %tmp3 = add <4 x i16> %A, %B; + ret <4 x i16> %tmp3 +} + +define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) { +;CHECK: add {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h + %tmp3 = add <8 x i16> %A, %B; + ret <8 x i16> %tmp3 +} + +define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) { +;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s + %tmp3 = add <2 x i32> %A, %B; + ret <2 x i32> %tmp3 +} + +define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) { +;CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s + %tmp3 = add <4 x i32> %A, %B; + ret <4 x i32> %tmp3 +} + +define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) { +;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %tmp3 = add <2 x i64> %A, %B; + ret <2 x i64> %tmp3 +} + +define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) { +;CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s + %tmp3 = fadd <2 x float> %A, %B; + ret <2 x float> %tmp3 +} + +define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) { +;CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s + %tmp3 = fadd <4 x float> %A, %B; + ret <4 x float> %tmp3 +} +define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) { +;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %tmp3 = fadd <2 x double> %A, %B; + ret <2 x double> %tmp3 +} + +define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) { +;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b + %tmp3 = sub <8 x i8> %A, %B; + ret <8 x i8> %tmp3 +} + +define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) { +;CHECK: sub {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b + %tmp3 = sub <16 x i8> %A, %B; + ret <16 x i8> %tmp3 +} + +define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) { +;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h + %tmp3 = sub <4 x i16> %A, %B; + ret <4 x i16> %tmp3 +} + +define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) { +;CHECK: sub {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h + %tmp3 = sub <8 x i16> %A, %B; + ret <8 x i16> %tmp3 +} + +define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) { +;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s + %tmp3 = sub <2 x i32> %A, %B; + ret <2 x i32> %tmp3 +} + +define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) { +;CHECK: sub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s + %tmp3 = sub <4 x i32> %A, %B; + ret <4 x i32> %tmp3 +} + +define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) { +;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %tmp3 = sub <2 x i64> %A, %B; + ret <2 x i64> %tmp3 +} + +define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) { +;CHECK: fsub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s + %tmp3 = fsub <2 x float> %A, %B; + ret <2 x float> %tmp3 +} + +define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) { +;CHECK: fsub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s + %tmp3 = fsub <4 x float> %A, %B; + ret <4 x float> %tmp3 +} +define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) { +;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %tmp3 = fsub <2 x double> %A, %B; + ret <2 x double> %tmp3 +} + +define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vadd_f64 +; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fadd <1 x double> %a, %b + ret <1 x double> %1 +} + +define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vmul_f64 +; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fmul <1 x double> %a, %b + ret <1 x double> %1 +} + +define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vdiv_f64 +; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fdiv <1 x double> %a, %b + ret <1 x double> %1 +} + +define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { +; CHECK-LABEL: test_vmla_f64 +; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fmul <1 x double> %b, %c + %2 = fadd <1 x double> %1, %a + ret <1 x double> %2 +} + +define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { +; CHECK-LABEL: test_vmls_f64 +; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fmul <1 x double> %b, %c + %2 = fsub <1 x double> %a, %1 + ret <1 x double> %2 +} + +define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { +; CHECK-LABEL: test_vfms_f64 +; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fsub <1 x double> , %b + %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a) + ret <1 x double> %2 +} + +define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { +; CHECK-LABEL: test_vfma_f64 +; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a) + ret <1 x double> %1 +} + +define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vsub_f64 +; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = fsub <1 x double> %a, %b + ret <1 x double> %1 +} + +define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vabd_f64 +; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.arm64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b) + ret <1 x double> %1 +} + +define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vmax_f64 +; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.arm64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b) + ret <1 x double> %1 +} + +define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vmin_f64 +; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.arm64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b) + ret <1 x double> %1 +} + +define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vmaxnm_f64 +; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.arm64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b) + ret <1 x double> %1 +} + +define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: test_vminnm_f64 +; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.arm64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b) + ret <1 x double> %1 +} + +define <1 x double> @test_vabs_f64(<1 x double> %a) { +; CHECK-LABEL: test_vabs_f64 +; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}} + %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a) + ret <1 x double> %1 +} + +define <1 x double> @test_vneg_f64(<1 x double> %a) { +; CHECK-LABEL: test_vneg_f64 +; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}} + %1 = fsub <1 x double> , %a + ret <1 x double> %1 +} + +declare <1 x double> @llvm.fabs.v1f64(<1 x double>) +declare <1 x double> @llvm.arm64.neon.fminnm.v1f64(<1 x double>, <1 x double>) +declare <1 x double> @llvm.arm64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>) +declare <1 x double> @llvm.arm64.neon.fmin.v1f64(<1 x double>, <1 x double>) +declare <1 x double> @llvm.arm64.neon.fmax.v1f64(<1 x double>, <1 x double>) +declare <1 x double> @llvm.arm64.neon.fabd.v1f64(<1 x double>, <1 x double>) +declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)