[AArch64] Add support for NEON scalar floating-point convert to fixed-point instructions.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194394 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chad Rosier 2013-11-11 18:04:07 +00:00
parent 028e4d27b1
commit 30b2a19f3b
6 changed files with 165 additions and 11 deletions

View File

@ -272,6 +272,18 @@ def int_aarch64_neon_vcvtf32_n_u32 :
def int_aarch64_neon_vcvtf64_n_u64 :
Intrinsic<[llvm_v1f64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
def int_aarch64_neon_vcvts_n_s32_f32 :
Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtd_n_s64_f64 :
Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
def int_aarch64_neon_vcvts_n_u32_f32 :
Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_neon_vcvtd_n_u64_f64 :
Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
class Neon_SHA_Intrinsic
: Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;

View File

@ -4058,7 +4058,7 @@ multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
}
}
multiclass NeonI_ScalarShiftImm_scvtf_SD_size<bit u, bits<5> opcode, string asmop> {
multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
bits<5> Imm;
let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
@ -4119,6 +4119,16 @@ multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
(INSTD FPR64:$Rn, imm:$Imm)>;
}
multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode,
SDPatternOperator Dopnode,
Instruction INSTS,
Instruction INSTD> {
def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 imm:$Imm))),
(INSTS FPR32:$Rn, imm:$Imm)>;
def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 imm:$Imm))),
(INSTD FPR64:$Rn, imm:$Imm)>;
}
// Scalar Signed Shift Right (Immediate)
defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
@ -4218,17 +4228,29 @@ defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
SQRSHRUNsdi>;
// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
defm SCVTF_N : NeonI_ScalarShiftImm_scvtf_SD_size<0b0, 0b11100, "scvtf">;
defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32,
int_aarch64_neon_vcvtf64_n_s64,
SCVTF_Nssi, SCVTF_Nddi>;
// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
defm UCVTF_N : NeonI_ScalarShiftImm_scvtf_SD_size<0b1, 0b11100, "ucvtf">;
defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32,
int_aarch64_neon_vcvtf64_n_u64,
UCVTF_Nssi, UCVTF_Nddi>;
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32,
int_aarch64_neon_vcvtd_n_s64_f64,
FCVTZS_Nssi, FCVTZS_Nddi>;
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32,
int_aarch64_neon_vcvtd_n_u64_f64,
FCVTZU_Nssi, FCVTZU_Nddi>;
// Scalar Integer Add
let isCommutable = 1 in {
def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;

View File

@ -5,7 +5,7 @@ define float @test_vcvts_f32_s32(i32 %a) {
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
%vcvtf1.i = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
%vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
%0 = extractelement <1 x float> %vcvtf1.i, i32 0
ret float %0
}
@ -17,7 +17,7 @@ define double @test_vcvtd_f64_s64(i64 %a) {
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
%vcvtf1.i = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
%vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
%0 = extractelement <1 x double> %vcvtf1.i, i32 0
ret double %0
}
@ -29,7 +29,7 @@ define float @test_vcvts_f32_u32(i32 %a) {
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
%vcvtf1.i = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
%vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
%0 = extractelement <1 x float> %vcvtf1.i, i32 0
ret float %0
}
@ -41,7 +41,7 @@ define double @test_vcvtd_f64_u64(i64 %a) {
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
%vcvtf1.i = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
%vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
%0 = extractelement <1 x double> %vcvtf1.i, i32 0
ret double %0
}
@ -53,7 +53,7 @@ define float @test_vcvts_n_f32_s32(i32 %a) {
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
%vcvtf1 = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
%vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
%0 = extractelement <1 x float> %vcvtf1, i32 0
ret float %0
}
@ -65,7 +65,7 @@ define double @test_vcvtd_n_f64_s64(i64 %a) {
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
%vcvtf1 = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
%vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
%0 = extractelement <1 x double> %vcvtf1, i32 0
ret double %0
}
@ -77,7 +77,7 @@ define float @test_vcvts_n_f32_u32(i32 %a) {
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
%vcvtf1 = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
%vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
%0 = extractelement <1 x float> %vcvtf1, i32 0
ret float %0
}
@ -89,9 +89,57 @@ define double @test_vcvtd_n_f64_u64(i64 %a) {
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
%vcvtf1 = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
%vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
%0 = extractelement <1 x double> %vcvtf1, i32 0
ret double %0
}
declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
define i32 @test_vcvts_n_s32_f32(float %a) {
; CHECK: test_vcvts_n_s32_f32
; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #0
entry:
%fcvtzs = insertelement <1 x float> undef, float %a, i32 0
%fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 0)
%0 = extractelement <1 x i32> %fcvtzs1, i32 0
ret i32 %0
}
declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32)
define i64 @test_vcvtd_n_s64_f64(double %a) {
; CHECK: test_vcvtd_n_s64_f64
; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #0
entry:
%fcvtzs = insertelement <1 x double> undef, double %a, i32 0
%fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 0)
%0 = extractelement <1 x i64> %fcvtzs1, i32 0
ret i64 %0
}
declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32)
define i32 @test_vcvts_n_u32_f32(float %a) {
; CHECK: test_vcvts_n_u32_f32
; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #0
entry:
%fcvtzu = insertelement <1 x float> undef, float %a, i32 0
%fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 0)
%0 = extractelement <1 x i32> %fcvtzu1, i32 0
ret i32 %0
}
declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32)
define i64 @test_vcvtd_n_u64_f64(double %a) {
; CHECK: test_vcvtd_n_u64_f64
; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #0
entry:
%fcvtzu = insertelement <1 x double> undef, double %a, i32 0
%fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 0)
%0 = extractelement <1 x i64> %fcvtzu1, i32 0
ret i64 %0
}
declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double>, i32)

View File

@ -5089,6 +5089,42 @@
// CHECK-ERROR: ucvtf d21, s14, #64
// CHECK-ERROR: ^
//----------------------------------------------------------------------
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
//----------------------------------------------------------------------
fcvtzs s21, s12, #0
fcvtzs d21, d12, #65
fcvtzs s21, d12, #1
// CHECK-ERROR: error: expected integer in range [1, 32]
// CHECK-ERROR: fcvtzs s21, s12, #0
// CHECK-ERROR: ^
// CHECK-ERROR: error: expected integer in range [1, 64]
// CHECK-ERROR: fcvtzs d21, d12, #65
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcvtzs s21, d12, #1
// CHECK-ERROR: ^
//----------------------------------------------------------------------
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
//----------------------------------------------------------------------
fcvtzu s21, s12, #33
fcvtzu d21, d12, #0
fcvtzu s21, d12, #1
// CHECK-ERROR: error: expected integer in range [1, 32]
// CHECK-ERROR: fcvtzu s21, s12, #33
// CHECK-ERROR: ^
// CHECK-ERROR: error: expected integer in range [1, 64]
// CHECK-ERROR: fcvtzu d21, d12, #0
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: fcvtzu s21, d12, #1
// CHECK-ERROR: ^
//----------------------------------------------------------------------
// Scalar Unsigned Saturating Extract Narrow
//----------------------------------------------------------------------

View File

@ -41,3 +41,23 @@
// CHECK: ucvtf s22, s13, #32 // encoding: [0xb6,0xe5,0x20,0x7f]
// CHECK: ucvtf d21, d14, #64 // encoding: [0xd5,0xe5,0x40,0x7f]
//----------------------------------------------------------------------
// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
//----------------------------------------------------------------------
fcvtzs s21, s12, #1
fcvtzs d21, d12, #1
// CHECK: fcvtzs s21, s12, #1 // encoding: [0x95,0xfd,0x3f,0x5f]
// CHECK: fcvtzs d21, d12, #1 // encoding: [0x95,0xfd,0x7f,0x5f]
//----------------------------------------------------------------------
// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
//----------------------------------------------------------------------
fcvtzu s21, s12, #1
fcvtzu d21, d12, #1
// CHECK: fcvtzu s21, s12, #1 // encoding: [0x95,0xfd,0x3f,0x7f]
// CHECK: fcvtzu d21, d12, #1 // encoding: [0x95,0xfd,0x7f,0x7f]

View File

@ -1972,6 +1972,22 @@ G# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -disassemble < %s |
0xb6,0xe5,0x20,0x7f
0xd5,0xe5,0x40,0x7f
#----------------------------------------------------------------------
# Scalar Floating-point Convert To Signed Fixed-point (Immediate)
#----------------------------------------------------------------------
# CHECK: fcvtzs s21, s12, #1
# CHECK: fcvtzs d21, d12, #1
0x95,0xfd,0x3f,0x5f
0x95,0xfd,0x7f,0x5f
#----------------------------------------------------------------------
# Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
#----------------------------------------------------------------------
# CHECK: fcvtzu s21, s12, #1
# CHECK: fcvtzu d21, d12, #1
0x95,0xfd,0x3f,0x7f
0x95,0xfd,0x7f,0x7f
#----------------------------------------------------------------------
# Vector load/store multiple N-element structure
#----------------------------------------------------------------------