From a53bf06f7a998f9ea9e13ba844efc2460a2185dd Mon Sep 17 00:00:00 2001 From: Ana Pazos Date: Fri, 15 Nov 2013 23:32:10 +0000 Subject: [PATCH] Implemented aarch64 Neon scalar vmulx_lane intrinsics Implemented aarch64 Neon scalar vfma_lane intrinsics Implemented aarch64 Neon scalar vfms_lane intrinsics Implemented legacy vmul_n_f64, vmul_lane_f64, vmul_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. Implemented legacy vfma_lane_f64, vfms_lane_f64, vfma_laneq_f64, vfms_laneq_f64 intrinsics (v1f64 parameter type) using Neon scalar instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194888 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsAArch64.td | 5 +- lib/Target/AArch64/AArch64InstrNEON.td | 171 +++++++++++++++++- .../AArch64/neon-scalar-by-elem-fma.ll | 108 +++++++++++ .../AArch64/neon-scalar-by-elem-mul.ll | 124 +++++++++++++ test/CodeGen/AArch64/neon-scalar-mul.ll | 18 +- 5 files changed, 410 insertions(+), 16 deletions(-) create mode 100644 test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll create mode 100644 test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td index 29026f66ab6..27e78a5c1a0 100644 --- a/include/llvm/IR/IntrinsicsAArch64.td +++ b/include/llvm/IR/IntrinsicsAArch64.td @@ -67,8 +67,9 @@ def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic; // Vector Pairwise minNum (Floating Point) def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic; -// Vector Multiply Extended (Floating Point) -def int_aarch64_neon_vmulx : Neon_2Arg_Intrinsic; +// Vector Multiply Extended and Scalar Multiply Extended (Floating Point) +def int_aarch64_neon_vmulx : + Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>; class Neon_N2V_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty], diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 4b8bb8e1efc..09d28768953 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -3653,6 +3653,8 @@ defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand, // End of post-index vector load/store multiple N-element structure // (class SIMD lselem-post) + +// Neon Scalar instructions implementation // Scalar Three Same class NeonI_Scalar3Same_size size, bits<5> opcode, string asmop, @@ -4360,8 +4362,17 @@ defm : Neon_Scalar3Same_SD_size_patterns; +multiclass Neon_Scalar3Same_MULX_SD_size_patterns { + def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))), + (INSTS FPR32:$Rn, FPR32:$Rm)>; + def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))), + (INSTD FPR64:$Rn, FPR64:$Rm)>; +} + +defm : Neon_Scalar3Same_MULX_SD_size_patterns; // Scalar Integer Shift Left (Signed, Unsigned) def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">; @@ -4794,6 +4805,51 @@ def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx", let Inst{20-16} = MRm; } +multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns< + SDPatternOperator opnode, + Instruction INST, + ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm, + ValueType OpNTy, ValueType ExTy, Operand OpNImm> { + + def : Pat<(ResTy (opnode (ResTy FPRC:$Rn), + (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))), + (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (opnode (ResTy FPRC:$Rn), + (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))), + (ResTy (INST (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; + + // swapped operands + def : Pat<(ResTy (opnode + (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)), + (ResTy FPRC:$Rn))), + (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (opnode + (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)), + (ResTy FPRC:$Rn))), + (ResTy (INST (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; +} + +// Patterns for Scalar Floating Point multiply (scalar, by element) +defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns; +defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns; + +// Patterns for Scalar Floating Point multiply extended (scalar, by element) +defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns; +defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns; + + // Scalar Floating Point fused multiply-add (scalar, by element) def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla", 0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> { @@ -4821,6 +4877,83 @@ def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls", let Inst{21} = 0b0; // l let Inst{20-16} = MRm; } +// We are allowed to match the fma instruction regardless of compile options. +multiclass Neon_ScalarXIndexedElem_FMA_Patterns< + Instruction FMLAI, Instruction FMLSI, + ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm, + ValueType OpNTy, ValueType ExTy, Operand OpNImm> { + // fmla + def : Pat<(ResTy (fma (ResTy FPRC:$Rn), + (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)), + (ResTy FPRC:$Ra))), + (ResTy (FMLAI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (fma (ResTy FPRC:$Rn), + (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)), + (ResTy FPRC:$Ra))), + (ResTy (FMLAI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; + + // swapped fmla operands + def : Pat<(ResTy (fma + (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)), + (ResTy FPRC:$Rn), + (ResTy FPRC:$Ra))), + (ResTy (FMLAI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (fma + (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)), + (ResTy FPRC:$Rn), + (ResTy FPRC:$Ra))), + (ResTy (FMLAI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; + + // fmls + def : Pat<(ResTy (fma (ResTy FPRC:$Rn), + (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))), + (ResTy FPRC:$Ra))), + (ResTy (FMLSI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (fma (ResTy FPRC:$Rn), + (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))), + (ResTy FPRC:$Ra))), + (ResTy (FMLSI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; + + // swapped fmls operands + def : Pat<(ResTy (fma + (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))), + (ResTy FPRC:$Rn), + (ResTy FPRC:$Ra))), + (ResTy (FMLSI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>; + + def : Pat<(ResTy (fma + (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))), + (ResTy FPRC:$Rn), + (ResTy FPRC:$Ra))), + (ResTy (FMLSI (ResTy FPRC:$Ra), + (ResTy FPRC:$Rn), + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)), + OpNImm:$Imm))>; +} + +// Scalar Floating Point fused multiply-add and multiply-subtract (scalar, by element) +defm : Neon_ScalarXIndexedElem_FMA_Patterns; +defm : Neon_ScalarXIndexedElem_FMA_Patterns; +defm : Neon_ScalarXIndexedElem_FMA_Patterns; // Scalar Signed saturating doubling multiply-add long (scalar, by element) def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal", @@ -4990,6 +5123,40 @@ def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> { let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0}; } +multiclass NeonI_Scalar_DUP_Elt_pattern { + + def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)), + (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>; + + def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)), + (ResTy (DUPI + (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), + OpNImm:$Imm))>; +} + +// Patterns for vector extract of FP data using scalar DUP instructions +defm : NeonI_Scalar_DUP_Elt_pattern; +defm : NeonI_Scalar_DUP_Elt_pattern; + +multiclass NeonI_Scalar_DUP_alias { + def : NeonInstAlias; +} + +// Aliases for Scalar copy - DUP element (scalar) +// FIXME: This is actually the preferred syntax but TableGen can't deal with +// custom printing of aliases. +defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>; +defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>; +defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>; +defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>; + //===----------------------------------------------------------------------===// // Non-Instruction Patterns diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll new file mode 100644 index 00000000000..8ce42def409 --- /dev/null +++ b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll @@ -0,0 +1,108 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s + +declare float @llvm.fma.f32(float, float, float) +declare double @llvm.fma.f64(double, double, double) + +define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) { + ; CHECK: test_fmla_ss4S + ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) + ret float %tmp2 +} + +define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) { + ; CHECK: test_fmla_ss4S_swap + ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a) + ret float %tmp2 +} + +define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) { + ; CHECK: test_fmla_ss2S + ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1] + %tmp1 = extractelement <2 x float> %v, i32 1 + %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) + ret float %tmp2 +} + +define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) { + ; CHECK: test_fmla_ddD + ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0] + %tmp1 = extractelement <1 x double> %v, i32 0 + %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) + ret double %tmp2 +} + +define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) { + ; CHECK: test_fmla_dd2D + ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) + ret double %tmp2 +} + +define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) { + ; CHECK: test_fmla_dd2D_swap + ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a) + ret double %tmp2 +} + +define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) { + ; CHECK: test_fmls_ss4S + ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = fsub float -0.0, %tmp1 + %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a) + ret float %tmp3 +} + +define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) { + ; CHECK: test_fmls_ss4S_swap + ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = fsub float -0.0, %tmp1 + %tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a) + ret float %tmp3 +} + + +define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) { + ; CHECK: test_fmls_ss2S + ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1] + %tmp1 = extractelement <2 x float> %v, i32 1 + %tmp2 = fsub float -0.0, %tmp1 + %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a) + ret float %tmp3 +} + +define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) { + ; CHECK: test_fmls_ddD + ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0] + %tmp1 = extractelement <1 x double> %v, i32 0 + %tmp2 = fsub double -0.0, %tmp1 + %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a) + ret double %tmp3 +} + +define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) { + ; CHECK: test_fmls_dd2D + ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = fsub double -0.0, %tmp1 + %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a) + ret double %tmp3 +} + +define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) { + ; CHECK: test_fmls_dd2D_swap + ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = fsub double -0.0, %tmp1 + %tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a) + ret double %tmp3 +} + diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll new file mode 100644 index 00000000000..968ad3e8cf7 --- /dev/null +++ b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll @@ -0,0 +1,124 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s + +define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) { + ; CHECK: test_fmul_lane_ss2S + ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1] + %tmp1 = extractelement <2 x float> %v, i32 1 + %tmp2 = fmul float %a, %tmp1; + ret float %tmp2; +} + +define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) { + ; CHECK: test_fmul_lane_ss2S_swap + ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1] + %tmp1 = extractelement <2 x float> %v, i32 1 + %tmp2 = fmul float %tmp1, %a; + ret float %tmp2; +} + + +define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) { + ; CHECK: test_fmul_lane_ss4S + ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = fmul float %a, %tmp1; + ret float %tmp2; +} + +define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) { + ; CHECK: test_fmul_lane_ss4S_swap + ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = fmul float %tmp1, %a; + ret float %tmp2; +} + + +define double @test_fmul_lane_ddD(double %a, <1 x double> %v) { + ; CHECK: test_fmul_lane_ddD + ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0] + %tmp1 = extractelement <1 x double> %v, i32 0 + %tmp2 = fmul double %a, %tmp1; + ret double %tmp2; +} + + + +define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) { + ; CHECK: test_fmul_lane_dd2D + ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = fmul double %a, %tmp1; + ret double %tmp2; +} + + +define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) { + ; CHECK: test_fmul_lane_dd2D_swap + ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = fmul double %tmp1, %a; + ret double %tmp2; +} + +declare float @llvm.aarch64.neon.vmulx.f32(float, float) + +define float @test_fmulx_lane_f32(float %a, <2 x float> %v) { + ; CHECK: test_fmulx_lane_f32 + ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1] + %tmp1 = extractelement <2 x float> %v, i32 1 + %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1) + ret float %tmp2; +} + +define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) { + ; CHECK: test_fmulx_laneq_f32 + ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1) + ret float %tmp2; +} + +define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) { + ; CHECK: test_fmulx_laneq_f32_swap + ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3] + %tmp1 = extractelement <4 x float> %v, i32 3 + %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %tmp1, float %a) + ret float %tmp2; +} + +declare double @llvm.aarch64.neon.vmulx.f64(double, double) + +define double @test_fmulx_lane_f64(double %a, <1 x double> %v) { + ; CHECK: test_fmulx_lane_f64 + ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0] + %tmp1 = extractelement <1 x double> %v, i32 0 + %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1) + ret double %tmp2; +} + +define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) { + ; CHECK: test_fmulx_laneq_f64_0 + ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0] + %tmp1 = extractelement <2 x double> %v, i32 0 + %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1) + ret double %tmp2; +} + + +define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) { + ; CHECK: test_fmulx_laneq_f64_1 + ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1) + ret double %tmp2; +} + +define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) { + ; CHECK: test_fmulx_laneq_f64_1_swap + ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1] + %tmp1 = extractelement <2 x double> %v, i32 1 + %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %tmp1, double %a) + ret double %tmp2; +} + diff --git a/test/CodeGen/AArch64/neon-scalar-mul.ll b/test/CodeGen/AArch64/neon-scalar-mul.ll index a58294b209c..4992a51fe11 100644 --- a/test/CodeGen/AArch64/neon-scalar-mul.ll +++ b/test/CodeGen/AArch64/neon-scalar-mul.ll @@ -49,25 +49,19 @@ declare <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32>, <1 x i32>) define float @test_vmulxs_f32(float %a, float %b) { ; CHECK: test_vmulxs_f32 ; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}} - %1 = insertelement <1 x float> undef, float %a, i32 0 - %2 = insertelement <1 x float> undef, float %b, i32 0 - %3 = call <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float> %1, <1 x float> %2) - %4 = extractelement <1 x float> %3, i32 0 - ret float %4 + %1 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %b) + ret float %1 } define double @test_vmulxd_f64(double %a, double %b) { ; CHECK: test_vmulxd_f64 ; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} - %1 = insertelement <1 x double> undef, double %a, i32 0 - %2 = insertelement <1 x double> undef, double %b, i32 0 - %3 = call <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double> %1, <1 x double> %2) - %4 = extractelement <1 x double> %3, i32 0 - ret double %4 + %1 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %b) + ret double %1 } -declare <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float>, <1 x float>) -declare <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double>, <1 x double>) +declare float @llvm.aarch64.neon.vmulx.f32(float, float) +declare double @llvm.aarch64.neon.vmulx.f64(double, double) define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) { ; CHECK: test_vqdmlalh_s16