Implemented aarch64 Neon scalar vmulx_lane intrinsics

Implemented aarch64 Neon scalar vfma_lane intrinsics
Implemented aarch64 Neon scalar vfms_lane intrinsics

Implemented legacy vmul_n_f64, vmul_lane_f64, vmul_laneq_f64
intrinsics (v1f64 parameter type) using Neon scalar instructions.

Implemented legacy vfma_lane_f64, vfms_lane_f64,
vfma_laneq_f64, vfms_laneq_f64 intrinsics (v1f64 parameter type)
using Neon scalar instructions.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194888 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Ana Pazos 2013-11-15 23:32:10 +00:00
parent 6bc810a499
commit a53bf06f7a
5 changed files with 410 additions and 16 deletions

View File

@ -67,8 +67,9 @@ def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic;
// Vector Pairwise minNum (Floating Point)
def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic;
// Vector Multiply Extended (Floating Point)
def int_aarch64_neon_vmulx : Neon_2Arg_Intrinsic;
// Vector Multiply Extended and Scalar Multiply Extended (Floating Point)
def int_aarch64_neon_vmulx :
Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>;
class Neon_N2V_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty],

View File

@ -3653,6 +3653,8 @@ defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
// End of post-index vector load/store multiple N-element structure
// (class SIMD lselem-post)
// Neon Scalar instructions implementation
// Scalar Three Same
class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
@ -4360,8 +4362,17 @@ defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
// Patterns to match llvm.aarch64.* intrinsic for
// Scalar Floating-point Multiply Extended,
defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vmulx, FMULXsss,
FMULXddd>;
multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
Instruction INSTS,
Instruction INSTD> {
def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
(INSTS FPR32:$Rn, FPR32:$Rm)>;
def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
(INSTD FPR64:$Rn, FPR64:$Rm)>;
}
defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
FMULXsss,FMULXddd>;
// Scalar Integer Shift Left (Signed, Unsigned)
def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
@ -4794,6 +4805,51 @@ def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
let Inst{20-16} = MRm;
}
multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
SDPatternOperator opnode,
Instruction INST,
ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
(ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
(ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
(ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
(ResTy (INST (ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
// swapped operands
def : Pat<(ResTy (opnode
(ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
(ResTy FPRC:$Rn))),
(ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (opnode
(ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
(ResTy FPRC:$Rn))),
(ResTy (INST (ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
}
// Patterns for Scalar Floating Point multiply (scalar, by element)
defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
// Patterns for Scalar Floating Point multiply extended (scalar, by element)
defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
v2f32, v4f32, neon_uimm1_bare>;
defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
v1f64, v2f64, neon_uimm0_bare>;
// Scalar Floating Point fused multiply-add (scalar, by element)
def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
@ -4821,6 +4877,83 @@ def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
let Inst{21} = 0b0; // l
let Inst{20-16} = MRm;
}
// We are allowed to match the fma instruction regardless of compile options.
multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
Instruction FMLAI, Instruction FMLSI,
ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
// fmla
def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
(ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
(ResTy FPRC:$Ra))),
(ResTy (FMLAI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
(ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
(ResTy FPRC:$Ra))),
(ResTy (FMLAI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
// swapped fmla operands
def : Pat<(ResTy (fma
(ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
(ResTy FPRC:$Rn),
(ResTy FPRC:$Ra))),
(ResTy (FMLAI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (fma
(ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
(ResTy FPRC:$Rn),
(ResTy FPRC:$Ra))),
(ResTy (FMLAI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
// fmls
def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
(fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
(ResTy FPRC:$Ra))),
(ResTy (FMLSI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (fma (ResTy FPRC:$Rn),
(fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
(ResTy FPRC:$Ra))),
(ResTy (FMLSI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
// swapped fmls operands
def : Pat<(ResTy (fma
(fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
(ResTy FPRC:$Rn),
(ResTy FPRC:$Ra))),
(ResTy (FMLSI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
def : Pat<(ResTy (fma
(fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
(ResTy FPRC:$Rn),
(ResTy FPRC:$Ra))),
(ResTy (FMLSI (ResTy FPRC:$Ra),
(ResTy FPRC:$Rn),
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
OpNImm:$Imm))>;
}
// Scalar Floating Point fused multiply-add and multiply-subtract (scalar, by element)
defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
// Scalar Signed saturating doubling multiply-add long (scalar, by element)
def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
@ -4990,6 +5123,40 @@ def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
}
multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
ValueType OpTy, Operand OpImm,
ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
(ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
(ResTy (DUPI
(ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
OpNImm:$Imm))>;
}
// Patterns for vector extract of FP data using scalar DUP instructions
defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
defm : NeonI_Scalar_DUP_Elt_pattern<DUPdv_D, f64,
v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
Instruction DUPI, Operand OpImm,
RegisterClass ResRC> {
def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn." # asmlane # "[$Imm]"),
(DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
}
// Aliases for Scalar copy - DUP element (scalar)
// FIXME: This is actually the preferred syntax but TableGen can't deal with
// custom printing of aliases.
defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns

View File

@ -0,0 +1,108 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmla_ss4S
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
ret float %tmp2
}
define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmla_ss4S_swap
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
ret float %tmp2
}
define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
; CHECK: test_fmla_ss2S
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
ret float %tmp2
}
define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) {
; CHECK: test_fmla_ddD
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
ret double %tmp2
}
define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmla_dd2D
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
ret double %tmp2
}
define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmla_dd2D_swap
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
ret double %tmp2
}
define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmls_ss4S
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
ret float %tmp3
}
define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmls_ss4S_swap
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a)
ret float %tmp3
}
define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
; CHECK: test_fmls_ss2S
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = fsub float -0.0, %tmp1
%tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
ret float %tmp3
}
define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
; CHECK: test_fmls_ddD
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
ret double %tmp3
}
define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmls_dd2D
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
ret double %tmp3
}
define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmls_dd2D_swap
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a)
ret double %tmp3
}

View File

@ -0,0 +1,124 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) {
; CHECK: test_fmul_lane_ss2S
; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = fmul float %a, %tmp1;
ret float %tmp2;
}
define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) {
; CHECK: test_fmul_lane_ss2S_swap
; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = fmul float %tmp1, %a;
ret float %tmp2;
}
define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) {
; CHECK: test_fmul_lane_ss4S
; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fmul float %a, %tmp1;
ret float %tmp2;
}
define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) {
; CHECK: test_fmul_lane_ss4S_swap
; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fmul float %tmp1, %a;
ret float %tmp2;
}
define double @test_fmul_lane_ddD(double %a, <1 x double> %v) {
; CHECK: test_fmul_lane_ddD
; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = fmul double %a, %tmp1;
ret double %tmp2;
}
define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) {
; CHECK: test_fmul_lane_dd2D
; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fmul double %a, %tmp1;
ret double %tmp2;
}
define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) {
; CHECK: test_fmul_lane_dd2D_swap
; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fmul double %tmp1, %a;
ret double %tmp2;
}
declare float @llvm.aarch64.neon.vmulx.f32(float, float)
define float @test_fmulx_lane_f32(float %a, <2 x float> %v) {
; CHECK: test_fmulx_lane_f32
; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
ret float %tmp2;
}
define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) {
; CHECK: test_fmulx_laneq_f32
; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
ret float %tmp2;
}
define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) {
; CHECK: test_fmulx_laneq_f32_swap
; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %tmp1, float %a)
ret float %tmp2;
}
declare double @llvm.aarch64.neon.vmulx.f64(double, double)
define double @test_fmulx_lane_f64(double %a, <1 x double> %v) {
; CHECK: test_fmulx_lane_f64
; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
%tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
ret double %tmp2;
}
define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) {
; CHECK: test_fmulx_laneq_f64_0
; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
%tmp1 = extractelement <2 x double> %v, i32 0
%tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
ret double %tmp2;
}
define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) {
; CHECK: test_fmulx_laneq_f64_1
; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
ret double %tmp2;
}
define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) {
; CHECK: test_fmulx_laneq_f64_1_swap
; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %tmp1, double %a)
ret double %tmp2;
}

View File

@ -49,25 +49,19 @@ declare <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32>, <1 x i32>)
define float @test_vmulxs_f32(float %a, float %b) {
; CHECK: test_vmulxs_f32
; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
%1 = insertelement <1 x float> undef, float %a, i32 0
%2 = insertelement <1 x float> undef, float %b, i32 0
%3 = call <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float> %1, <1 x float> %2)
%4 = extractelement <1 x float> %3, i32 0
ret float %4
%1 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %b)
ret float %1
}
define double @test_vmulxd_f64(double %a, double %b) {
; CHECK: test_vmulxd_f64
; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
%1 = insertelement <1 x double> undef, double %a, i32 0
%2 = insertelement <1 x double> undef, double %b, i32 0
%3 = call <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double> %1, <1 x double> %2)
%4 = extractelement <1 x double> %3, i32 0
ret double %4
%1 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %b)
ret double %1
}
declare <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float>, <1 x float>)
declare <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double>, <1 x double>)
declare float @llvm.aarch64.neon.vmulx.f32(float, float)
declare double @llvm.aarch64.neon.vmulx.f64(double, double)
define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) {
; CHECK: test_vqdmlalh_s16