mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
[AArch64 Neon] Add ACLE intrinsic vceqz_f64.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196362 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
c7f14e3d8c
commit
dd302615b1
@ -4328,7 +4328,7 @@ multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
|
||||
[],
|
||||
NoItinerary>;
|
||||
def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
|
||||
(outs FPR64:$Rd), (ins FPR64:$Rn, fpz64movi:$FPImm),
|
||||
(outs FPR64:$Rd), (ins FPR64:$Rn, fpz32:$FPImm),
|
||||
!strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
|
||||
[],
|
||||
NoItinerary>;
|
||||
@ -4350,11 +4350,11 @@ multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
|
||||
Instruction INSTS,
|
||||
Instruction INSTD> {
|
||||
def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
|
||||
(v1f32 (scalar_to_vector (f32 fpimm:$FPImm))))),
|
||||
(INSTS FPR32:$Rn, fpimm:$FPImm)>;
|
||||
(v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
|
||||
(INSTS FPR32:$Rn, fpz32:$FPImm)>;
|
||||
def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
|
||||
(v1f64 (bitconvert (v8i8 Neon_AllZero))))),
|
||||
(INSTD FPR64:$Rn, 0)>;
|
||||
(v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
|
||||
(INSTD FPR64:$Rn, fpz32:$FPImm)>;
|
||||
}
|
||||
|
||||
multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
|
||||
@ -5119,6 +5119,8 @@ def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
|
||||
defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
|
||||
FCMEQZssi, FCMEQZddi>;
|
||||
def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpz32:$FPImm), SETEQ)),
|
||||
(FCMEQZddi FPR64:$Rn, fpz32:$FPImm)>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Greater Than Or Equal
|
||||
defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
|
||||
|
@ -24,6 +24,15 @@ entry:
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define <1 x i64> @test_vceqz_f64(<1 x double> %a) #0 {
|
||||
; CHECK: test_vceqz_f64
|
||||
; CHECK: fcmeq {{d[0-9]+}}, {{d[0-9]+}}, #0.0
|
||||
entry:
|
||||
%0 = fcmp oeq <1 x double> %a, zeroinitializer
|
||||
%vceqz.i = zext <1 x i1> %0 to <1 x i64>
|
||||
ret <1 x i64> %vceqz.i
|
||||
}
|
||||
|
||||
define i32 @test_vceqzs_f32(float %a) {
|
||||
; CHECK: test_vceqzs_f32
|
||||
; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
@ -39,7 +48,7 @@ define i64 @test_vceqzd_f64(double %a) {
|
||||
; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vceq.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vceq1.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> zeroinitializer)
|
||||
%vceq1.i = tail call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double> %vceq.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vceq1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
@ -81,7 +90,7 @@ define i64 @test_vcgezd_f64(double %a) {
|
||||
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcge1.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> zeroinitializer)
|
||||
%vcge1.i = tail call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double> %vcge.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcge1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
@ -123,7 +132,7 @@ define i64 @test_vcgtzd_f64(double %a) {
|
||||
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcgt1.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> zeroinitializer)
|
||||
%vcgt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double> %vcgt.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcgt1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
@ -165,7 +174,7 @@ define i64 @test_vclezd_f64(double %a) {
|
||||
; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcle.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcle1.i = call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f64(<1 x double> %vcle.i, <1 x double> zeroinitializer)
|
||||
%vcle1.i = tail call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double> %vcle.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcle1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
@ -207,7 +216,7 @@ define i64 @test_vcltzd_f64(double %a) {
|
||||
; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vclt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vclt1.i = call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f64(<1 x double> %vclt.i, <1 x double> zeroinitializer)
|
||||
%vclt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double> %vclt.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vclt1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
@ -301,15 +310,18 @@ entry:
|
||||
}
|
||||
|
||||
declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
|
Loading…
Reference in New Issue
Block a user