[AArch64] Overload NEON signed/unsigned floating-point convert to fixed-point

and fixed-point convert to floating-point LLVM AArch64 intrinsics.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196963 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chad Rosier 2013-12-10 21:33:53 +00:00
parent d096a5c237
commit c000d11d5d
3 changed files with 34 additions and 48 deletions

View File

@ -349,28 +349,20 @@ def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic;
def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic; def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate) // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
def int_aarch64_neon_vcvtf32_n_s32 : def int_aarch64_neon_vcvtfxs2fp_n :
Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>; Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_n_s64 :
Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate) // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
def int_aarch64_neon_vcvtf32_n_u32 : def int_aarch64_neon_vcvtfxu2fp_n :
Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>; Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtf64_n_u64 :
Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate) // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
def int_aarch64_neon_vcvts_n_s32_f32 : def int_aarch64_neon_vcvtfp2fxs_n :
Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>; Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtd_n_s64_f64 :
Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate) // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
def int_aarch64_neon_vcvts_n_u32_f32 : def int_aarch64_neon_vcvtfp2fxu_n :
Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>; Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtd_n_u64_f64 :
Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
class Neon_SHA_Intrinsic class Neon_SHA_Intrinsic
: Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty], : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty],

View File

@ -4627,23 +4627,21 @@ multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
(INSTD FPR64:$Rn, imm:$Imm)>; (INSTD FPR64:$Rn, imm:$Imm)>;
} }
multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode, multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator opnode,
SDPatternOperator Dopnode,
Instruction INSTS, Instruction INSTS,
Instruction INSTD> { Instruction INSTD> {
def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))), def ssi : Pat<(f32 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
(INSTS FPR32:$Rn, imm:$Imm)>; (INSTS FPR32:$Rn, imm:$Imm)>;
def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))), def ddi : Pat<(f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
(INSTD FPR64:$Rn, imm:$Imm)>; (INSTD FPR64:$Rn, imm:$Imm)>;
} }
multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode, multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator opnode,
SDPatternOperator Dopnode,
Instruction INSTS, Instruction INSTS,
Instruction INSTD> { Instruction INSTD> {
def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))), def ssi : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
(INSTS FPR32:$Rn, imm:$Imm)>; (INSTS FPR32:$Rn, imm:$Imm)>;
def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))), def ddi : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
(INSTD FPR64:$Rn, imm:$Imm)>; (INSTD FPR64:$Rn, imm:$Imm)>;
} }
@ -4763,26 +4761,22 @@ defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate) // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">; defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32, defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxs2fp_n,
int_aarch64_neon_vcvtf64_n_s64,
SCVTF_Nssi, SCVTF_Nddi>; SCVTF_Nssi, SCVTF_Nddi>;
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate) // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">; defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32, defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxu2fp_n,
int_aarch64_neon_vcvtf64_n_u64,
UCVTF_Nssi, UCVTF_Nddi>; UCVTF_Nssi, UCVTF_Nddi>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate) // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">; defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32, defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxs_n,
int_aarch64_neon_vcvtd_n_s64_f64,
FCVTZS_Nssi, FCVTZS_Nddi>; FCVTZS_Nssi, FCVTZS_Nddi>;
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate) // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">; defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32, defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxu_n,
int_aarch64_neon_vcvtd_n_u64_f64,
FCVTZU_Nssi, FCVTZU_Nddi>; FCVTZU_Nssi, FCVTZU_Nddi>;
// Patterns For Convert Instructions Between v1f64 and v1i64 // Patterns For Convert Instructions Between v1f64 and v1i64

View File

@ -49,89 +49,89 @@ define float @test_vcvts_n_f32_s32(i32 %a) {
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 ; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry: entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
%0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1) %0 = call float @llvm.aarch64.neon.vcvtfxs2fp.n.f32.v1i32(<1 x i32> %vcvtf, i32 1)
ret float %0 ret float %0
} }
declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32) declare float @llvm.aarch64.neon.vcvtfxs2fp.n.f32.v1i32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_s64(i64 %a) { define double @test_vcvtd_n_f64_s64(i64 %a) {
; CHECK: test_vcvtd_n_f64_s64 ; CHECK: test_vcvtd_n_f64_s64
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 ; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry: entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
%0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1) %0 = call double @llvm.aarch64.neon.vcvtfxs2fp.n.f64.v1i64(<1 x i64> %vcvtf, i32 1)
ret double %0 ret double %0
} }
declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32) declare double @llvm.aarch64.neon.vcvtfxs2fp.n.f64.v1i64(<1 x i64>, i32)
define float @test_vcvts_n_f32_u32(i32 %a) { define float @test_vcvts_n_f32_u32(i32 %a) {
; CHECK: test_vcvts_n_f32_u32 ; CHECK: test_vcvts_n_f32_u32
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1 ; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry: entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0 %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
%0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1) %0 = call float @llvm.aarch64.neon.vcvtfxu2fp.n.f32.v1i32(<1 x i32> %vcvtf, i32 1)
ret float %0 ret float %0
} }
declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32) declare float @llvm.aarch64.neon.vcvtfxu2fp.n.f32.v1i32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_u64(i64 %a) { define double @test_vcvtd_n_f64_u64(i64 %a) {
; CHECK: test_vcvtd_n_f64_u64 ; CHECK: test_vcvtd_n_f64_u64
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1 ; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry: entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0 %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
%0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1) %0 = call double @llvm.aarch64.neon.vcvtfxu2fp.n.f64.v1i64(<1 x i64> %vcvtf, i32 1)
ret double %0 ret double %0
} }
declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32) declare double @llvm.aarch64.neon.vcvtfxu2fp.n.f64.v1i64(<1 x i64>, i32)
define i32 @test_vcvts_n_s32_f32(float %a) { define i32 @test_vcvts_n_s32_f32(float %a) {
; CHECK: test_vcvts_n_s32_f32 ; CHECK: test_vcvts_n_s32_f32
; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1 ; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1
entry: entry:
%fcvtzs = insertelement <1 x float> undef, float %a, i32 0 %fcvtzs = insertelement <1 x float> undef, float %a, i32 0
%fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 1) %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i32.v1f32(<1 x float> %fcvtzs, i32 1)
%0 = extractelement <1 x i32> %fcvtzs1, i32 0 %0 = extractelement <1 x i32> %fcvtzs1, i32 0
ret i32 %0 ret i32 %0
} }
declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32) declare <1 x i32> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i32.v1f32(<1 x float>, i32)
define i64 @test_vcvtd_n_s64_f64(double %a) { define i64 @test_vcvtd_n_s64_f64(double %a) {
; CHECK: test_vcvtd_n_s64_f64 ; CHECK: test_vcvtd_n_s64_f64
; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1 ; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1
entry: entry:
%fcvtzs = insertelement <1 x double> undef, double %a, i32 0 %fcvtzs = insertelement <1 x double> undef, double %a, i32 0
%fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 1) %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i64.v1f64(<1 x double> %fcvtzs, i32 1)
%0 = extractelement <1 x i64> %fcvtzs1, i32 0 %0 = extractelement <1 x i64> %fcvtzs1, i32 0
ret i64 %0 ret i64 %0
} }
declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32) declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.n.v1i64.v1f64(<1 x double>, i32)
define i32 @test_vcvts_n_u32_f32(float %a) { define i32 @test_vcvts_n_u32_f32(float %a) {
; CHECK: test_vcvts_n_u32_f32 ; CHECK: test_vcvts_n_u32_f32
; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32 ; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32
entry: entry:
%fcvtzu = insertelement <1 x float> undef, float %a, i32 0 %fcvtzu = insertelement <1 x float> undef, float %a, i32 0
%fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 32) %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i32.v1f32(<1 x float> %fcvtzu, i32 32)
%0 = extractelement <1 x i32> %fcvtzu1, i32 0 %0 = extractelement <1 x i32> %fcvtzu1, i32 0
ret i32 %0 ret i32 %0
} }
declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32) declare <1 x i32> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i32.v1f32(<1 x float>, i32)
define i64 @test_vcvtd_n_u64_f64(double %a) { define i64 @test_vcvtd_n_u64_f64(double %a) {
; CHECK: test_vcvtd_n_u64_f64 ; CHECK: test_vcvtd_n_u64_f64
; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64 ; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64
entry: entry:
%fcvtzu = insertelement <1 x double> undef, double %a, i32 0 %fcvtzu = insertelement <1 x double> undef, double %a, i32 0
%fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 64) %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i64.v1f64(<1 x double> %fcvtzu, i32 64)
%0 = extractelement <1 x i64> %fcvtzu1, i32 0 %0 = extractelement <1 x i64> %fcvtzu1, i32 0
ret i64 %0 ret i64 %0
} }
declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double>, i32) declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.n.v1i64.v1f64(<1 x double>, i32)