diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 361a790c0d7..6b83d88d7f0 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -5194,6 +5194,8 @@ defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>; // Scalar Reduce Addition Pairwise (Integer) def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))), (ADDPvv_D_2D VPR128:$Rn)>; +def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))), + (ADDPvv_D_2D VPR128:$Rn)>; // Scalar Reduce Addition Pairwise (Floating Point) defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>; @@ -5237,6 +5239,26 @@ defm : Neon_ScalarPair_SD_size_patterns; +defm : Neon_ScalarPair_SD_size_patterns; + +def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))), + (FADDPvv_S_2S (v2f32 + (EXTRACT_SUBREG + (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))), + sub_64)))>; + +defm : Neon_ScalarPair_SD_size_patterns; + +defm : Neon_ScalarPair_SD_size_patterns; + +defm : Neon_ScalarPair_SD_size_patterns; + +defm : Neon_ScalarPair_SD_size_patterns; // Scalar by element Arithmetic diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll index 1bb3b40440a..80e8dc339d6 100644 --- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll +++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll @@ -101,3 +101,147 @@ define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) { ret <1 x double> %val } +define float @test_vaddv_f32(<2 x float> %a) { +; CHECK-LABEL: test_vaddv_f32 +; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define float @test_vaddvq_f32(<4 x float> %a) { +; CHECK-LABEL: test_vaddvq_f32 +; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define double @test_vaddvq_f64(<2 x double> %a) { +; CHECK-LABEL: test_vaddvq_f64 +; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a) + %2 = extractelement <1 x double> %1, i32 0 + ret double %2 +} + +define float @test_vmaxv_f32(<2 x float> %a) { +; CHECK-LABEL: test_vmaxv_f32 +; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define double @test_vmaxvq_f64(<2 x double> %a) { +; CHECK-LABEL: test_vmaxvq_f64 +; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a) + %2 = extractelement <1 x double> %1, i32 0 + ret double %2 +} + +define float @test_vminv_f32(<2 x float> %a) { +; CHECK-LABEL: test_vminv_f32 +; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define double @test_vminvq_f64(<2 x double> %a) { +; CHECK-LABEL: test_vminvq_f64 +; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a) + %2 = extractelement <1 x double> %1, i32 0 + ret double %2 +} + +define double @test_vmaxnmvq_f64(<2 x double> %a) { +; CHECK-LABEL: test_vmaxnmvq_f64 +; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a) + %2 = extractelement <1 x double> %1, i32 0 + ret double %2 +} + +define float @test_vmaxnmv_f32(<2 x float> %a) { +; CHECK-LABEL: test_vmaxnmv_f32 +; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define double @test_vminnmvq_f64(<2 x double> %a) { +; CHECK-LABEL: test_vminnmvq_f64 +; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a) + %2 = extractelement <1 x double> %1, i32 0 + ret double %2 +} + +define float @test_vminnmv_f32(<2 x float> %a) { +; CHECK-LABEL: test_vminnmv_f32 +; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s + %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a) + %2 = extractelement <1 x float> %1, i32 0 + ret float %2 +} + +define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vpaddq_s64 +; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %1 +} + +define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test_vpaddq_u64 +; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d + %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %1 +} + +define i64 @test_vaddvq_s64(<2 x i64> %a) { +; CHECK-LABEL: test_vaddvq_s64 +; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) + %2 = extractelement <1 x i64> %1, i32 0 + ret i64 %2 +} + +define i64 @test_vaddvq_u64(<2 x i64> %a) { +; CHECK-LABEL: test_vaddvq_u64 +; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d + %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a) + %2 = extractelement <1 x i64> %1, i32 0 + ret i64 %2 +} + +declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>) + +declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>) + +declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>) + +declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>) + +declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>) + +declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>) + +declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>) + +declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>) + +declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>) + +declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>) + +declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>) + +declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>) + +declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>) \ No newline at end of file