diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 8306fd5d966..361a790c0d7 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -5122,6 +5122,27 @@ defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns; +def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src), + (v1i64 FPR64:$Rn))), + (SUQADDdd FPR64:$Src, FPR64:$Rn)>; + +def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src), + (v1i64 FPR64:$Rn))), + (USQADDdd FPR64:$Src, FPR64:$Rn)>; + +def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))), + (ABSdd FPR64:$Rn)>; + +def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))), + (SQABSdd FPR64:$Rn)>; + +def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))), + (SQNEGdd FPR64:$Rn)>; + +def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))), + (v1i64 FPR64:$Rn))), + (NEGdd FPR64:$Rn)>; + // Scalar Signed Saturating Extract Unsigned Narrow defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">; defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns @llvm.arm.neon.vqneg.v1i64(<1 x i64>) + +declare <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64>) + +declare <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64>) + +declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>) + +declare <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64>, <1 x i64>) + +define <1 x i64> @test_vuqadd_s64(<1 x i64> %a, <1 x i64> %b) { +entry: + ; CHECK: test_vuqadd_s64 + %vuqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %a, <1 x i64> %b) + ; CHECK: suqadd d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %vuqadd2.i +} + +define <1 x i64> @test_vsqadd_u64(<1 x i64> %a, <1 x i64> %b) { +entry: + ; CHECK: test_vsqadd_u64 + %vsqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %a, <1 x i64> %b) + ; CHECK: usqadd d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %vsqadd2.i +} + +define <1 x i64> @test_vabs_s64(<1 x i64> %a) { + ; CHECK: test_vabs_s64 +entry: + %vabs1.i = tail call <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64> %a) + ; CHECK: abs d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %vabs1.i +} + +define <1 x i64> @test_vqabs_s64(<1 x i64> %a) { + ; CHECK: test_vqabs_s64 +entry: + %vqabs1.i = tail call <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64> %a) + ; CHECK: sqabs d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %vqabs1.i +} + +define <1 x i64> @test_vqneg_s64(<1 x i64> %a) { + ; CHECK: test_vqneg_s64 +entry: + %vqneg1.i = tail call <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64> %a) + ; CHECK: sqneg d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %vqneg1.i +} + +define <1 x i64> @test_vneg_s64(<1 x i64> %a) { + ; CHECK: test_vneg_s64 +entry: + %sub.i = sub <1 x i64> zeroinitializer, %a + ; CHECK: neg d{{[0-9]+}}, d{{[0-9]+}} + ret <1 x i64> %sub.i +} +