diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td index fa8fabb882d..14ae8a9b795 100644 --- a/include/llvm/IR/IntrinsicsAArch64.td +++ b/include/llvm/IR/IntrinsicsAArch64.td @@ -349,28 +349,20 @@ def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic; def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic; // Scalar Signed Fixed-point Convert To Floating-Point (Immediate) -def int_aarch64_neon_vcvtf32_n_s32 : - Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>; -def int_aarch64_neon_vcvtf64_n_s64 : - Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; +def int_aarch64_neon_vcvtfxs2fp_n : + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>; // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate) -def int_aarch64_neon_vcvtf32_n_u32 : - Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>; -def int_aarch64_neon_vcvtf64_n_u64 : - Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; +def int_aarch64_neon_vcvtfxu2fp_n : + Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>; // Scalar Floating-point Convert To Signed Fixed-point (Immediate) -def int_aarch64_neon_vcvts_n_s32_f32 : - Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>; -def int_aarch64_neon_vcvtd_n_s64_f64 : - Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>; +def int_aarch64_neon_vcvtfp2fxs_n : + Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>; // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate) -def int_aarch64_neon_vcvts_n_u32_f32 : - Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>; -def int_aarch64_neon_vcvtd_n_u64_f64 : - Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>; +def int_aarch64_neon_vcvtfp2fxu_n : + Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>; class Neon_SHA_Intrinsic : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty], diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index c2098a8130a..82264da6b62 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -4627,23 +4627,21 @@ multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns< (INSTD FPR64:$Rn, imm:$Imm)>; } -multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns { - def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))), + def ssi : Pat<(f32 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))), (INSTS FPR32:$Rn, imm:$Imm)>; - def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))), + def ddi : Pat<(f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))), (INSTD FPR64:$Rn, imm:$Imm)>; } -multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns { - def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))), + def ssi : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))), (INSTS FPR32:$Rn, imm:$Imm)>; - def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))), + def ddi : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))), (INSTD FPR64:$Rn, imm:$Imm)>; } @@ -4763,26 +4761,22 @@ defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns; -defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns; // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate) defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">; -defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns; // Scalar Floating-point Convert To Signed Fixed-point (Immediate) defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">; -defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns; // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate) defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">; -defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns; // Patterns For Convert Instructions Between v1f64 and v1i64 diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll index 392b911379c..ef91c2e4a88 100644 --- a/test/CodeGen/AArch64/neon-scalar-cvt.ll +++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll @@ -49,89 +49,89 @@ define float @test_vcvts_n_f32_s32(i32 %a) { ; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 - %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1) + %0 = call float @llvm.aarch64.neon.vcvtfxs2fp.n.f32.v1i32(<1 x i32> %vcvtf, i32 1) ret float %0 } -declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32) +declare float @llvm.aarch64.neon.vcvtfxs2fp.n.f32.v1i32(<1 x i32>, i32) define double @test_vcvtd_n_f64_s64(i64 %a) { ; CHECK: test_vcvtd_n_f64_s64 ; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 - %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1) + %0 = call double @llvm.aarch64.neon.vcvtfxs2fp.n.f64.v1i64(<1 x i64> %vcvtf, i32 1) ret double %0 } -declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32) +declare double @llvm.aarch64.neon.vcvtfxs2fp.n.f64.v1i64(<1 x i64>, i32) define float @test_vcvts_n_f32_u32(i32 %a) { ; CHECK: test_vcvts_n_f32_u32 ; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 - %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1) + %0 = call float @llvm.aarch64.neon.vcvtfxu2fp.n.f32.v1i32(<1 x i32> %vcvtf, i32 1) ret float %0 } -declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32) +declare float @llvm.aarch64.neon.vcvtfxu2fp.n.f32.v1i32(<1 x i32>, i32) define double @test_vcvtd_n_f64_u64(i64 %a) { ; CHECK: test_vcvtd_n_f64_u64 ; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 - %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1) + %0 = call double @llvm.aarch64.neon.vcvtfxu2fp.n.f64.v1i64(<1 x i64> %vcvtf, i32 1) ret double %0 } -declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32) +declare double @llvm.aarch64.neon.vcvtfxu2fp.n.f64.v1i64(<1 x i64>, i32) define i32 @test_vcvts_n_s32_f32(float %a) { ; CHECK: test_vcvts_n_s32_f32 ; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1 entry: %fcvtzs = insertelement <1 x float> undef, float %a, i32 0 - %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 1) + %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i32.v1f32(<1 x float> %fcvtzs, i32 1) %0 = extractelement <1 x i32> %fcvtzs1, i32 0 ret i32 %0 } -declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32) +declare <1 x i32> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i32.v1f32(<1 x float>, i32) define i64 @test_vcvtd_n_s64_f64(double %a) { ; CHECK: test_vcvtd_n_s64_f64 ; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1 entry: %fcvtzs = insertelement <1 x double> undef, double %a, i32 0 - %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 1) + %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i64.v1f64(<1 x double> %fcvtzs, i32 1) %0 = extractelement <1 x i64> %fcvtzs1, i32 0 ret i64 %0 } -declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32) +declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i64.v1f64(<1 x double>, i32) define i32 @test_vcvts_n_u32_f32(float %a) { ; CHECK: test_vcvts_n_u32_f32 ; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32 entry: %fcvtzu = insertelement <1 x float> undef, float %a, i32 0 - %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 32) + %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i32.v1f32(<1 x float> %fcvtzu, i32 32) %0 = extractelement <1 x i32> %fcvtzu1, i32 0 ret i32 %0 } -declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32) +declare <1 x i32> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i32.v1f32(<1 x float>, i32) define i64 @test_vcvtd_n_u64_f64(double %a) { ; CHECK: test_vcvtd_n_u64_f64 ; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64 entry: %fcvtzu = insertelement <1 x double> undef, double %a, i32 0 - %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 64) + %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i64.v1f64(<1 x double> %fcvtzu, i32 64) %0 = extractelement <1 x i64> %fcvtzu1, i32 0 ret i64 %0 } -declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double>, i32) +declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i64.v1f64(<1 x double>, i32)