From f853a034a1fdccd194da04ca1e2e1aa8bcbd16b4 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Wed, 30 Oct 2013 15:19:37 +0000 Subject: [PATCH] [AArch64] Add support for NEON scalar floating-point compare instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193691 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsAArch64.td | 33 +- lib/Target/AArch64/AArch64ISelLowering.cpp | 5 +- lib/Target/AArch64/AArch64InstrInfo.td | 7 + lib/Target/AArch64/AArch64InstrNEON.td | 95 +++++- test/CodeGen/AArch64/neon-scalar-compare.ll | 38 +-- .../CodeGen/AArch64/neon-scalar-fp-compare.ll | 316 ++++++++++++++++++ test/MC/AArch64/neon-diagnostics.s | 140 ++++++++ test/MC/AArch64/neon-scalar-fp-compare.s | 103 ++++++ .../AArch64/neon-instructions.txt | 80 +++++ 9 files changed, 780 insertions(+), 37 deletions(-) create mode 100644 test/CodeGen/AArch64/neon-scalar-fp-compare.ll create mode 100644 test/MC/AArch64/neon-scalar-fp-compare.s diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td index 944c1449ebe..76b9215a833 100644 --- a/include/llvm/IR/IntrinsicsAArch64.td +++ b/include/llvm/IR/IntrinsicsAArch64.td @@ -168,28 +168,35 @@ def int_aarch64_neon_vcvtf64_u64 : // Scalar Floating-point Reciprocal Exponent def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic; -class Neon_ICmp_Intrinsic - : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; +class Neon_Cmp_Intrinsic + : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty], + [IntrNoMem]>; -// Scalar Integer Compare Equal -def int_aarch64_neon_vceq : Neon_ICmp_Intrinsic; +// Scalar Compare Equal +def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic; -// Scalar Integer Compare Greater-Than or Equal -def int_aarch64_neon_vcge : Neon_ICmp_Intrinsic; -def int_aarch64_neon_vchs : Neon_ICmp_Intrinsic; +// Scalar Compare Greater-Than or Equal +def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic; +def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic; -// Scalar Integer Compare Less-Than or Equal -def int_aarch64_neon_vclez : Neon_ICmp_Intrinsic; +// Scalar Compare Less-Than or Equal +def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic; // Scalar Compare Less-Than -def int_aarch64_neon_vcltz : Neon_ICmp_Intrinsic; +def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic; // Scalar Compare Greater-Than -def int_aarch64_neon_vcgt : Neon_ICmp_Intrinsic; -def int_aarch64_neon_vchi : Neon_ICmp_Intrinsic; +def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic; +def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic; // Scalar Compare Bitwise Test Bits -def int_aarch64_neon_vtstd : Neon_ICmp_Intrinsic; +def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic; + +// Scalar Floating-point Absolute Compare Greater Than Or Equal +def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic; + +// Scalar Floating-point Absolute Compare Greater Than +def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic; // Scalar Signed Saturating Accumulated of Unsigned Value def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic; diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 77aadee6caf..87bb847993e 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3492,12 +3492,15 @@ AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, unsigned SplatBitSize; bool HasAnyUndefs; + unsigned UseNeonMov = VT.getSizeInBits() >= 64; + // Note we favor lowering MOVI over MVNI. // This has implications on the definition of patterns in TableGen to select // BIC immediate instructions but not ORR immediate instructions. // If this lowering order is changed, TableGen patterns for BIC immediate and // ORR immediate instructions have to be updated. - if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { + if (UseNeonMov && + BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { if (SplatBitSize <= 64) { // First attempt to use vector immediate-form MOVI EVT NeonMovVT; diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index 233279954a1..43df2b43622 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -1980,6 +1980,13 @@ def fpz64 : Operand, let DecoderMethod = "DecodeFPZeroOperand"; } +def fpz64movi : Operand, + ComplexPattern { + let ParserMatchClass = fpzero_asmoperand; + let PrintMethod = "printFPZeroOperand"; + let DecoderMethod = "DecodeFPZeroOperand"; +} + multiclass A64I_fpcmpSignal type, bit imm, dag ins, dag pattern> { def _quiet : A64I_fpcmp<0b0, 0b0, type, 0b00, {0b0, imm, 0b0, 0b0, 0b0}, (outs), ins, "fcmp\t$Rn, $Rm", [pattern], diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 10dde19318e..b9f83f7b9f4 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -3210,8 +3210,8 @@ multiclass Neon_Scalar3Same_BHSD_size_patterns - : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))), - (INSTD VPR64:$Rn, VPR64:$Rm)>; + : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))), + (INSTD FPR64:$Rn, FPR64:$Rm)>; multiclass Neon_Scalar3Same_HS_size_patterns; } +multiclass Neon_Scalar3Same_cmp_SD_size_patterns { + def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))), + (INSTS FPR32:$Rn, FPR32:$Rm)>; + def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))), + (INSTD FPR64:$Rn, FPR64:$Rm)>; +} + // Scalar Three Different multiclass NeonI_Scalar3Diff_HS_size opcode, string asmop> { @@ -3381,10 +3390,36 @@ class NeonI_Scalar2SameMisc_cmpz_D_size opcode, string asmop> [], NoItinerary>; +multiclass NeonI_Scalar2SameMisc_cmpz_SD_size opcode, + string asmop> { + def ssi : NeonI_Scalar2SameMisc; + def ddi : NeonI_Scalar2SameMisc; +} + class Neon_Scalar2SameMisc_cmpz_D_size_patterns - : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 (bitconvert (v8i8 Neon_immAllZeros))))), - (INSTD VPR64:$Rn, 0)>; + : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), + (v1i64 (bitconvert (v8i8 Neon_immAllZeros))))), + (INSTD FPR64:$Rn, 0)>; + +multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns { + def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), + (v1f32 (scalar_to_vector (f32 fpimm:$FPImm))))), + (INSTS FPR32:$Rn, fpimm:$FPImm)>; + def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), + (v1f64 (bitconvert (v8i8 Neon_immAllZeros))))), + (INSTD FPR64:$Rn, 0)>; +} multiclass Neon_Scalar2SameMisc_D_size_patterns { @@ -3669,6 +3704,58 @@ def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">; def : Neon_Scalar2SameMisc_cmpz_D_size_patterns; +// Scalar Floating-point Compare + +// Scalar Floating-point Compare Mask Equal +defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">; +defm : Neon_Scalar3Same_cmp_SD_size_patterns; + +// Scalar Floating-point Compare Mask Equal To Zero +defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">; +defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns; + +// Scalar Floating-point Compare Mask Greater Than Or Equal +defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">; +defm : Neon_Scalar3Same_cmp_SD_size_patterns; + +// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero +defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">; +defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns; + +// Scalar Floating-point Compare Mask Greather Than +defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">; +defm : Neon_Scalar3Same_cmp_SD_size_patterns; + +// Scalar Floating-point Compare Mask Greather Than Zero +defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">; +defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns; + +// Scalar Floating-point Compare Mask Less Than Or Equal To Zero +defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">; +defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns; + +// Scalar Floating-point Compare Mask Less Than Zero +defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">; +defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns; + +// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal +defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">; +defm : Neon_Scalar3Same_cmp_SD_size_patterns; + +// Scalar Floating-point Absolute Compare Mask Greater Than +defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">; +defm : Neon_Scalar3Same_cmp_SD_size_patterns; + // Scalar Absolute Value defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">; defm : Neon_Scalar2SameMisc_D_size_patterns; diff --git a/test/CodeGen/AArch64/neon-scalar-compare.ll b/test/CodeGen/AArch64/neon-scalar-compare.ll index 831c10bf4b8..a1cfdf0b5c7 100644 --- a/test/CodeGen/AArch64/neon-scalar-compare.ll +++ b/test/CodeGen/AArch64/neon-scalar-compare.ll @@ -8,7 +8,7 @@ define i64 @test_vceqd(i64 %a, i64 %b) { entry: %vceq.i = insertelement <1 x i64> undef, i64 %a, i32 0 %vceq1.i = insertelement <1 x i64> undef, i64 %b, i32 0 - %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64> %vceq.i, <1 x i64> %vceq1.i) + %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceq.i, <1 x i64> %vceq1.i) %0 = extractelement <1 x i64> %vceq2.i, i32 0 ret i64 %0 } @@ -18,7 +18,7 @@ define i64 @test_vceqzd(i64 %a) { ; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0 entry: %vceqz.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vceqz1.i = call <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64> %vceqz.i, <1 x i64> zeroinitializer) + %vceqz1.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceqz.i, <1 x i64> zeroinitializer) %0 = extractelement <1 x i64> %vceqz1.i, i32 0 ret i64 %0 } @@ -29,7 +29,7 @@ define i64 @test_vcged(i64 %a, i64 %b) { entry: %vcge.i = insertelement <1 x i64> undef, i64 %a, i32 0 %vcge1.i = insertelement <1 x i64> undef, i64 %b, i32 0 - %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcge.i, <1 x i64> %vcge1.i) + %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i) %0 = extractelement <1 x i64> %vcge2.i, i32 0 ret i64 %0 } @@ -39,7 +39,7 @@ define i64 @test_vcgezd(i64 %a) { ; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0 entry: %vcgez.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcgez1.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcgez.i, <1 x i64> zeroinitializer) + %vcgez1.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcgez.i, <1 x i64> zeroinitializer) %0 = extractelement <1 x i64> %vcgez1.i, i32 0 ret i64 %0 } @@ -50,7 +50,7 @@ define i64 @test_vcgtd(i64 %a, i64 %b) { entry: %vcgt.i = insertelement <1 x i64> undef, i64 %a, i32 0 %vcgt1.i = insertelement <1 x i64> undef, i64 %b, i32 0 - %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i) + %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i) %0 = extractelement <1 x i64> %vcgt2.i, i32 0 ret i64 %0 } @@ -60,7 +60,7 @@ define i64 @test_vcgtzd(i64 %a) { ; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0 entry: %vcgtz.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcgtz1.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgtz.i, <1 x i64> zeroinitializer) + %vcgtz1.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgtz.i, <1 x i64> zeroinitializer) %0 = extractelement <1 x i64> %vcgtz1.i, i32 0 ret i64 %0 } @@ -71,7 +71,7 @@ define i64 @test_vcled(i64 %a, i64 %b) { entry: %vcgt.i = insertelement <1 x i64> undef, i64 %b, i32 0 %vcgt1.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i) + %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i) %0 = extractelement <1 x i64> %vcgt2.i, i32 0 ret i64 %0 } @@ -81,7 +81,7 @@ define i64 @test_vclezd(i64 %a) { ; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0 entry: %vclez.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vclez1.i = call <1 x i64> @llvm.aarch64.neon.vclez(<1 x i64> %vclez.i, <1 x i64> zeroinitializer) + %vclez1.i = call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64> %vclez.i, <1 x i64> zeroinitializer) %0 = extractelement <1 x i64> %vclez1.i, i32 0 ret i64 %0 } @@ -92,7 +92,7 @@ define i64 @test_vcltd(i64 %a, i64 %b) { entry: %vcge.i = insertelement <1 x i64> undef, i64 %b, i32 0 %vcge1.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcge.i, <1 x i64> %vcge1.i) + %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i) %0 = extractelement <1 x i64> %vcge2.i, i32 0 ret i64 %0 } @@ -102,7 +102,7 @@ define i64 @test_vcltzd(i64 %a) { ; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0x0 entry: %vcltz.i = insertelement <1 x i64> undef, i64 %a, i32 0 - %vcltz1.i = call <1 x i64> @llvm.aarch64.neon.vcltz(<1 x i64> %vcltz.i, <1 x i64> zeroinitializer) + %vcltz1.i = call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64> %vcltz.i, <1 x i64> zeroinitializer) %0 = extractelement <1 x i64> %vcltz1.i, i32 0 ret i64 %0 } @@ -113,16 +113,16 @@ define i64 @test_vtstd(i64 %a, i64 %b) { entry: %vtst.i = insertelement <1 x i64> undef, i64 %a, i32 0 %vtst1.i = insertelement <1 x i64> undef, i64 %b, i32 0 - %vtst2.i = call <1 x i64> @llvm.aarch64.neon.vtstd(<1 x i64> %vtst.i, <1 x i64> %vtst1.i) + %vtst2.i = call <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64> %vtst.i, <1 x i64> %vtst1.i) %0 = extractelement <1 x i64> %vtst2.i, i32 0 ret i64 %0 } -declare <1 x i64> @llvm.aarch64.neon.vtstd(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vcltz(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vchs(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vclez(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vchi(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64>, <1 x i64>) -declare <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vchs.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vchi.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) +declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) diff --git a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll new file mode 100644 index 00000000000..aa6348d11b2 --- /dev/null +++ b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll @@ -0,0 +1,316 @@ +; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s + +;; Scalar Floating-point Compare + +define i32 @test_vceqs_f32(float %a, float %b) { +; CHECK: test_vceqs_f32 +; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vceq.i = insertelement <1 x float> undef, float %a, i32 0 + %vceq1.i = insertelement <1 x float> undef, float %b, i32 0 + %vceq2.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> %vceq1.i) + %0 = extractelement <1 x i32> %vceq2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vceqd_f64(double %a, double %b) { +; CHECK: test_vceqd_f64 +; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vceq.i = insertelement <1 x double> undef, double %a, i32 0 + %vceq1.i = insertelement <1 x double> undef, double %b, i32 0 + %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> %vceq1.i) + %0 = extractelement <1 x i64> %vceq2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vceqzs_f32(float %a) { +; CHECK: test_vceqzs_f32 +; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0 +entry: + %vceq.i = insertelement <1 x float> undef, float %a, i32 0 + %vceq1.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> zeroinitializer) + %0 = extractelement <1 x i32> %vceq1.i, i32 0 + ret i32 %0 +} + +define i64 @test_vceqzd_f64(double %a) { +; CHECK: test_vceqzd_f64 +; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0 +entry: + %vceq.i = insertelement <1 x double> undef, double %a, i32 0 + %vceq1.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> zeroinitializer) + %0 = extractelement <1 x i64> %vceq1.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcges_f32(float %a, float %b) { +; CHECK: test_vcges_f32 +; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcge.i = insertelement <1 x float> undef, float %a, i32 0 + %vcge1.i = insertelement <1 x float> undef, float %b, i32 0 + %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i) + %0 = extractelement <1 x i32> %vcge2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcged_f64(double %a, double %b) { +; CHECK: test_vcged_f64 +; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcge.i = insertelement <1 x double> undef, double %a, i32 0 + %vcge1.i = insertelement <1 x double> undef, double %b, i32 0 + %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i) + %0 = extractelement <1 x i64> %vcge2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcgezs_f32(float %a) { +; CHECK: test_vcgezs_f32 +; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, #0.0 +entry: + %vcge.i = insertelement <1 x float> undef, float %a, i32 0 + %vcge1.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> zeroinitializer) + %0 = extractelement <1 x i32> %vcge1.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcgezd_f64(double %a) { +; CHECK: test_vcgezd_f64 +; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0 +entry: + %vcge.i = insertelement <1 x double> undef, double %a, i32 0 + %vcge1.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> zeroinitializer) + %0 = extractelement <1 x i64> %vcge1.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcgts_f32(float %a, float %b) { +; CHECK: test_vcgts_f32 +; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcgt.i = insertelement <1 x float> undef, float %a, i32 0 + %vcgt1.i = insertelement <1 x float> undef, float %b, i32 0 + %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i) + %0 = extractelement <1 x i32> %vcgt2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcgtd_f64(double %a, double %b) { +; CHECK: test_vcgtd_f64 +; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcgt.i = insertelement <1 x double> undef, double %a, i32 0 + %vcgt1.i = insertelement <1 x double> undef, double %b, i32 0 + %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i) + %0 = extractelement <1 x i64> %vcgt2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcgtzs_f32(float %a) { +; CHECK: test_vcgtzs_f32 +; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, #0.0 +entry: + %vcgt.i = insertelement <1 x float> undef, float %a, i32 0 + %vcgt1.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> zeroinitializer) + %0 = extractelement <1 x i32> %vcgt1.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcgtzd_f64(double %a) { +; CHECK: test_vcgtzd_f64 +; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0 +entry: + %vcgt.i = insertelement <1 x double> undef, double %a, i32 0 + %vcgt1.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> zeroinitializer) + %0 = extractelement <1 x i64> %vcgt1.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcles_f32(float %a, float %b) { +; CHECK: test_vcles_f32 +; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcge.i = insertelement <1 x float> undef, float %a, i32 0 + %vcge1.i = insertelement <1 x float> undef, float %b, i32 0 + %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i) + %0 = extractelement <1 x i32> %vcge2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcled_f64(double %a, double %b) { +; CHECK: test_vcled_f64 +; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcge.i = insertelement <1 x double> undef, double %a, i32 0 + %vcge1.i = insertelement <1 x double> undef, double %b, i32 0 + %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i) + %0 = extractelement <1 x i64> %vcge2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vclezs_f32(float %a) { +; CHECK: test_vclezs_f32 +; CHECK: fcmle {{s[0-9]}}, {{s[0-9]}}, #0.0 +entry: + %vcle.i = insertelement <1 x float> undef, float %a, i32 0 + %vcle1.i = call <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float> %vcle.i, <1 x float> zeroinitializer) + %0 = extractelement <1 x i32> %vcle1.i, i32 0 + ret i32 %0 +} + +define i64 @test_vclezd_f64(double %a) { +; CHECK: test_vclezd_f64 +; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0 +entry: + %vcle.i = insertelement <1 x double> undef, double %a, i32 0 + %vcle1.i = call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f64(<1 x double> %vcle.i, <1 x double> zeroinitializer) + %0 = extractelement <1 x i64> %vcle1.i, i32 0 + ret i64 %0 +} + +define i32 @test_vclts_f32(float %a, float %b) { +; CHECK: test_vclts_f32 +; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcgt.i = insertelement <1 x float> undef, float %b, i32 0 + %vcgt1.i = insertelement <1 x float> undef, float %a, i32 0 + %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i) + %0 = extractelement <1 x i32> %vcgt2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcltd_f64(double %a, double %b) { +; CHECK: test_vcltd_f64 +; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcgt.i = insertelement <1 x double> undef, double %b, i32 0 + %vcgt1.i = insertelement <1 x double> undef, double %a, i32 0 + %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i) + %0 = extractelement <1 x i64> %vcgt2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcltzs_f32(float %a) { +; CHECK: test_vcltzs_f32 +; CHECK: fcmlt {{s[0-9]}}, {{s[0-9]}}, #0.0 +entry: + %vclt.i = insertelement <1 x float> undef, float %a, i32 0 + %vclt1.i = call <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float> %vclt.i, <1 x float> zeroinitializer) + %0 = extractelement <1 x i32> %vclt1.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcltzd_f64(double %a) { +; CHECK: test_vcltzd_f64 +; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0 +entry: + %vclt.i = insertelement <1 x double> undef, double %a, i32 0 + %vclt1.i = call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f64(<1 x double> %vclt.i, <1 x double> zeroinitializer) + %0 = extractelement <1 x i64> %vclt1.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcages_f32(float %a, float %b) { +; CHECK: test_vcages_f32 +; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcage.i = insertelement <1 x float> undef, float %a, i32 0 + %vcage1.i = insertelement <1 x float> undef, float %b, i32 0 + %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i) + %0 = extractelement <1 x i32> %vcage2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcaged_f64(double %a, double %b) { +; CHECK: test_vcaged_f64 +; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcage.i = insertelement <1 x double> undef, double %a, i32 0 + %vcage1.i = insertelement <1 x double> undef, double %b, i32 0 + %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i) + %0 = extractelement <1 x i64> %vcage2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcagts_f32(float %a, float %b) { +; CHECK: test_vcagts_f32 +; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcagt.i = insertelement <1 x float> undef, float %a, i32 0 + %vcagt1.i = insertelement <1 x float> undef, float %b, i32 0 + %vcagt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcagt.i, <1 x float> %vcagt1.i) + %0 = extractelement <1 x i32> %vcagt2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcagtd_f64(double %a, double %b) { +; CHECK: test_vcagtd_f64 +; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcagt.i = insertelement <1 x double> undef, double %a, i32 0 + %vcagt1.i = insertelement <1 x double> undef, double %b, i32 0 + %vcagt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcagt.i, <1 x double> %vcagt1.i) + %0 = extractelement <1 x i64> %vcagt2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcales_f32(float %a, float %b) { +; CHECK: test_vcales_f32 +; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcage.i = insertelement <1 x float> undef, float %b, i32 0 + %vcage1.i = insertelement <1 x float> undef, float %a, i32 0 + %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i) + %0 = extractelement <1 x i32> %vcage2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcaled_f64(double %a, double %b) { +; CHECK: test_vcaled_f64 +; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcage.i = insertelement <1 x double> undef, double %b, i32 0 + %vcage1.i = insertelement <1 x double> undef, double %a, i32 0 + %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i) + %0 = extractelement <1 x i64> %vcage2.i, i32 0 + ret i64 %0 +} + +define i32 @test_vcalts_f32(float %a, float %b) { +; CHECK: test_vcalts_f32 +; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}} +entry: + %vcalt.i = insertelement <1 x float> undef, float %b, i32 0 + %vcalt1.i = insertelement <1 x float> undef, float %a, i32 0 + %vcalt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcalt.i, <1 x float> %vcalt1.i) + %0 = extractelement <1 x i32> %vcalt2.i, i32 0 + ret i32 %0 +} + +define i64 @test_vcaltd_f64(double %a, double %b) { +; CHECK: test_vcaltd_f64 +; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} +entry: + %vcalt.i = insertelement <1 x double> undef, double %b, i32 0 + %vcalt1.i = insertelement <1 x double> undef, double %a, i32 0 + %vcalt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcalt.i, <1 x double> %vcalt1.i) + %0 = extractelement <1 x i64> %vcalt2.i, i32 0 + ret i64 %0 +} + +declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>) +declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s index e0675e2b779..28f8e7a816b 100644 --- a/test/MC/AArch64/neon-diagnostics.s +++ b/test/MC/AArch64/neon-diagnostics.s @@ -4397,6 +4397,146 @@ // CHECK-ERROR: cmtst b20, d21, d22 // CHECK-ERROR: ^ +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Equal +//---------------------------------------------------------------------- + + fcmeq s10, h11, s12 + fcmeq d20, s21, d22 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmeq s10, h11, s12 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmeq d20, s21, d22 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Equal To Zero +//---------------------------------------------------------------------- + + fcmeq h10, s11, #0.0 + fcmeq d20, s21, #0.0 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmeq h10, s11, #0.0 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmeq d20, s21, #0.0 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greater Than Or Equal +//---------------------------------------------------------------------- + + fcmge s10, h11, s12 + fcmge d20, s21, d22 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmge s10, h11, s12 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmge d20, s21, d22 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero +//---------------------------------------------------------------------- + + fcmge h10, s11, #0.0 + fcmge d20, s21, #0.0 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmge h10, s11, #0.0 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmge d20, s21, #0.0 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greather Than +//---------------------------------------------------------------------- + + fcmgt s10, h11, s12 + fcmgt d20, s21, d22 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmgt s10, h11, s12 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmgt d20, s21, d22 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greather Than Zero +//---------------------------------------------------------------------- + + fcmgt h10, s11, #0.0 + fcmgt d20, s21, #0.0 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmgt h10, s11, #0.0 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmgt d20, s21, #0.0 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Less Than Or Equal To Zero +//---------------------------------------------------------------------- + + fcmle h10, s11, #0.0 + fcmle d20, s21, #0.0 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmle h10, s11, #0.0 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmle d20, s21, #0.0 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Less Than +//---------------------------------------------------------------------- + + fcmlt h10, s11, #0.0 + fcmlt d20, s21, #0.0 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmlt h10, s11, #0.0 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: fcmlt d20, s21, #0.0 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal +//---------------------------------------------------------------------- + + facge s10, h11, s12 + facge d20, s21, d22 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: facge s10, h11, s12 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: facge d20, s21, d22 +// CHECK-ERROR: ^ + +//---------------------------------------------------------------------- +// Scalar Floating-point Absolute Compare Mask Greater Than +//---------------------------------------------------------------------- + + facgt s10, h11, s12 + facgt d20, d21, s22 + +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: facgt s10, h11, s12 +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: facgt d20, d21, s22 +// CHECK-ERROR: ^ + //---------------------------------------------------------------------- // Scalar Signed Saturating Accumulated of Unsigned Value //---------------------------------------------------------------------- diff --git a/test/MC/AArch64/neon-scalar-fp-compare.s b/test/MC/AArch64/neon-scalar-fp-compare.s new file mode 100644 index 00000000000..a59ec0d1d6e --- /dev/null +++ b/test/MC/AArch64/neon-scalar-fp-compare.s @@ -0,0 +1,103 @@ +// RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -show-encoding < %s | FileCheck %s + +// Check that the assembler can handle the documented syntax for AArch64 + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Equal +//---------------------------------------------------------------------- + + fcmeq s10, s11, s12 + fcmeq d20, d21, d22 + +// CHECK: fcmeq s10, s11, s12 // encoding: [0x6a,0xe5,0x2c,0x5e] +// CHECK: fcmeq d20, d21, d22 // encoding: [0xb4,0xe6,0x76,0x5e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Equal To Zero +//---------------------------------------------------------------------- + + fcmeq s10, s11, #0.0 + fcmeq d20, d21, #0.0 + +// CHECK: fcmeq s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x5e] +// CHECK: fcmeq d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x5e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greater Than Or Equal +//---------------------------------------------------------------------- + + fcmge s10, s11, s12 + fcmge d20, d21, d22 + +// CHECK: fcmge s10, s11, s12 // encoding: [0x6a,0xe5,0x2c,0x7e] +// CHECK: fcmge d20, d21, d22 // encoding: [0xb4,0xe6,0x76,0x7e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero +//---------------------------------------------------------------------- + + fcmge s10, s11, #0.0 + fcmge d20, d21, #0.0 + +// CHECK: fcmge s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x7e] +// CHECK: fcmge d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x7e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greather Than +//---------------------------------------------------------------------- + + fcmgt s10, s11, s12 + fcmgt d20, d21, d22 + +// CHECK: fcmgt s10, s11, s12 // encoding: [0x6a,0xe5,0xac,0x7e] +// CHECK: fcmgt d20, d21, d22 // encoding: [0xb4,0xe6,0xf6,0x7e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Greather Than Zero +//---------------------------------------------------------------------- + + fcmgt s10, s11, #0.0 + fcmgt d20, d21, #0.0 + +// CHECK: fcmgt s10, s11, #0.0 // encoding: [0x6a,0xc9,0xa0,0x5e] +// CHECK: fcmgt d20, d21, #0.0 // encoding: [0xb4,0xca,0xe0,0x5e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Less Than Or Equal To Zero +//---------------------------------------------------------------------- + + fcmle s10, s11, #0.0 + fcmle d20, d21, #0.0 + +// CHECK: fcmle s10, s11, #0.0 // encoding: [0x6a,0xd9,0xa0,0x7e] +// CHECK: fcmle d20, d21, #0.0 // encoding: [0xb4,0xda,0xe0,0x7e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Compare Mask Less Than +//---------------------------------------------------------------------- + + fcmlt s10, s11, #0.0 + fcmlt d20, d21, #0.0 + +// CHECK: fcmlt s10, s11, #0.0 // encoding: [0x6a,0xe9,0xa0,0x5e] +// CHECK: fcmlt d20, d21, #0.0 // encoding: [0xb4,0xea,0xe0,0x5e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal +//---------------------------------------------------------------------- + + facge s10, s11, s12 + facge d20, d21, d22 + +// CHECK: facge s10, s11, s12 // encoding: [0x6a,0xed,0x2c,0x7e] +// CHECK: facge d20, d21, d22 // encoding: [0xb4,0xee,0x76,0x7e] + +//---------------------------------------------------------------------- +// Scalar Floating-point Absolute Compare Mask Greater Than +//---------------------------------------------------------------------- + + facgt s10, s11, s12 + facgt d20, d21, d22 + +// CHECK: facgt s10, s11, s12 // encoding: [0x6a,0xed,0xac,0x7e] +// CHECK: facgt d20, d21, d22 // encoding: [0xb4,0xee,0xf6,0x7e] diff --git a/test/MC/Disassembler/AArch64/neon-instructions.txt b/test/MC/Disassembler/AArch64/neon-instructions.txt index fa34c37d1da..2627b144033 100644 --- a/test/MC/Disassembler/AArch64/neon-instructions.txt +++ b/test/MC/Disassembler/AArch64/neon-instructions.txt @@ -1600,6 +1600,86 @@ # CHECK: cmtst d20, d21, d22 0xb4,0x8e,0xf6,0x5e +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Equal +#---------------------------------------------------------------------- +# CHECK: fcmeq s10, s11, s12 +# CHECK: fcmeq d20, d21, d22 +0x6a,0xe5,0x2c,0x5e +0xb4,0xe6,0x76,0x5e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Equal To Zero +#---------------------------------------------------------------------- +# CHECK: fcmeq s10, s11, #0.0 +# CHECK: fcmeq d20, d21, #0.0 +0x6a,0xd9,0xa0,0x5e +0xb4,0xda,0xe0,0x5e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Greater Than Or Equal +#---------------------------------------------------------------------- +# CHECK: fcmge s10, s11, s12 +# CHECK: fcmge d20, d21, d22 +0x6a,0xe5,0x2c,0x7e +0xb4,0xe6,0x76,0x7e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Greater Than Or Equal To Zero +#---------------------------------------------------------------------- +# CHECK: fcmge s10, s11, #0.0 +# CHECK: fcmge d20, d21, #0.0 +0x6a,0xc9,0xa0,0x7e +0xb4,0xca,0xe0,0x7e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Greather Than +#---------------------------------------------------------------------- +# CHECK: fcmgt s10, s11, s12 +# CHECK: fcmgt d20, d21, d22 +0x6a,0xe5,0xac,0x7e +0xb4,0xe6,0xf6,0x7e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Greather Than Zero +#---------------------------------------------------------------------- +# CHECK: fcmgt s10, s11, #0.0 +# CHECK: fcmgt d20, d21, #0.0 +0x6a,0xc9,0xa0,0x5e +0xb4,0xca,0xe0,0x5e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Less Than Or Equal To Zero +#---------------------------------------------------------------------- +# CHECK: fcmle s10, s11, #0.0 +# CHECK: fcmle d20, d21, #0.0 +0x6a,0xd9,0xa0,0x7e +0xb4,0xda,0xe0,0x7e + +#---------------------------------------------------------------------- +# Scalar Floating-point Compare Mask Less Than +#---------------------------------------------------------------------- +# CHECK: fcmlt s10, s11, #0.0 +# CHECK: fcmlt d20, d21, #0.0 +0x6a,0xe9,0xa0,0x5e +0xb4,0xea,0xe0,0x5e + +#---------------------------------------------------------------------- +# Scalar Floating-point Absolute Compare Mask Greater Than Or Equal +#---------------------------------------------------------------------- +# CHECK: facge s10, s11, s12 +# CHECK: facge d20, d21, d22 +0x6a,0xed,0x2c,0x7e +0xb4,0xee,0x76,0x7e + +#---------------------------------------------------------------------- +# Scalar Floating-point Absolute Compare Mask Greater Than +#---------------------------------------------------------------------- +# CHECK: facgt s10, s11, s12 +# CHECK: facgt d20, d21, d22 +0x6a,0xed,0xac,0x7e +0xb4,0xee,0xf6,0x7e + #---------------------------------------------------------------------- # Scalar Absolute Value #----------------------------------------------------------------------