AArch64/ARM64: enable various AArch64 tests on ARM64.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206877 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-04-22 10:10:26 +00:00
parent 0e277d18bb
commit ba61446a56
13 changed files with 34 additions and 18 deletions

View File

@ -5,6 +5,11 @@
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s ; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID ; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a53 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s
; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
; CHECK-NOT: {{.*}} is not a recognized processor for this target ; CHECK-NOT: {{.*}} is not a recognized processor for this target
; INVALID: {{.*}} is not a recognized processor for this target ; INVALID: {{.*}} is not a recognized processor for this target

View File

@ -1,5 +1,6 @@
; REQUIRES: asserts ; REQUIRES: asserts
; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
; arm64 now has a separate copy of this test.
; ;
; The Cortex-A53 machine model will cause the MADD instruction to be scheduled ; The Cortex-A53 machine model will cause the MADD instruction to be scheduled
; much higher than the ADD instructions in order to hide latency. When not ; much higher than the ADD instructions in order to hide latency. When not

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has tests for i64 versions, uses different approach for others.
define i64 @test_vabsd_s64(i64 %a) { define i64 @test_vabsd_s64(i64 %a) {
; CHECK: test_vabsd_s64 ; CHECK: test_vabsd_s64

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has a copy of the key parts in AdvSIMD-Scalar.ll
define <1 x i64> @add1xi64(<1 x i64> %A, <1 x i64> %B) { define <1 x i64> @add1xi64(<1 x i64> %A, <1 x i64> %B) {
;CHECK: add {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} ;CHECK: add {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}

View File

@ -1,10 +1,11 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
declare float @llvm.fma.f32(float, float, float) declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double) declare double @llvm.fma.f64(double, double, double)
define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) { define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmla_ss4S ; CHECK-LABEL: test_fmla_ss4S
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3 %tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
@ -12,7 +13,7 @@ define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
} }
define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) { define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmla_ss4S_swap ; CHECK-LABEL: test_fmla_ss4S_swap
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3 %tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a) %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
@ -20,7 +21,7 @@ define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
} }
define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) { define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
; CHECK: test_fmla_ss2S ; CHECK-LABEL: test_fmla_ss2S
; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1 %tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a) %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
@ -28,15 +29,15 @@ define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
} }
define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) { define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) {
; CHECK: test_fmla_ddD ; CHECK-LABEL: test_fmla_ddD
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] ; CHECK: {{fmla d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmadd d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
%tmp1 = extractelement <1 x double> %v, i32 0 %tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
ret double %tmp2 ret double %tmp2
} }
define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) { define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmla_dd2D ; CHECK-LABEL: test_fmla_dd2D
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1 %tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a) %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
@ -44,7 +45,7 @@ define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
} }
define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) { define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmla_dd2D_swap ; CHECK-LABEL: test_fmla_dd2D_swap
; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1 %tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a) %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
@ -52,7 +53,7 @@ define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
} }
define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) { define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmls_ss4S ; CHECK-LABEL: test_fmls_ss4S
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3 %tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1 %tmp2 = fsub float -0.0, %tmp1
@ -61,7 +62,7 @@ define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
} }
define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) { define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
; CHECK: test_fmls_ss4S_swap ; CHECK-LABEL: test_fmls_ss4S_swap
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3] ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
%tmp1 = extractelement <4 x float> %v, i32 3 %tmp1 = extractelement <4 x float> %v, i32 3
%tmp2 = fsub float -0.0, %tmp1 %tmp2 = fsub float -0.0, %tmp1
@ -71,7 +72,7 @@ define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) { define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
; CHECK: test_fmls_ss2S ; CHECK-LABEL: test_fmls_ss2S
; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1] ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
%tmp1 = extractelement <2 x float> %v, i32 1 %tmp1 = extractelement <2 x float> %v, i32 1
%tmp2 = fsub float -0.0, %tmp1 %tmp2 = fsub float -0.0, %tmp1
@ -80,8 +81,8 @@ define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
} }
define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) { define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
; CHECK: test_fmls_ddD ; CHECK-LABEL: test_fmls_ddD
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0] ; CHECK: {{fmls d[0-9]+, d[0-9]+, v[0-9]+.d\[0]|fmsub d[0-9]+, d[0-9]+, d[0-9]+, d[0-9]+}}
%tmp1 = extractelement <1 x double> %v, i32 0 %tmp1 = extractelement <1 x double> %v, i32 0
%tmp2 = fsub double -0.0, %tmp1 %tmp2 = fsub double -0.0, %tmp1
%tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a) %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
@ -89,7 +90,7 @@ define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
} }
define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) { define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmls_dd2D ; CHECK-LABEL: test_fmls_dd2D
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1 %tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1 %tmp2 = fsub double -0.0, %tmp1
@ -98,7 +99,7 @@ define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
} }
define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) { define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
; CHECK: test_fmls_dd2D_swap ; CHECK-LABEL: test_fmls_dd2D_swap
; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1] ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
%tmp1 = extractelement <2 x double> %v, i32 1 %tmp1 = extractelement <2 x double> %v, i32 1
%tmp2 = fsub double -0.0, %tmp1 %tmp2 = fsub double -0.0, %tmp1

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
; arm64 has (the non-trivial parts of) this test covered by vcmp.ll
;; Scalar Integer Compare ;; Scalar Integer Compare

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
; arm64 has a different approach to scalars. Discarding.
define float @test_vcvts_f32_s32(i32 %a) { define float @test_vcvts_f32_s32(i32 %a) {
; CHECK: test_vcvts_f32_s32 ; CHECK: test_vcvts_f32_s32

View File

@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
; arm64 doesn't use <1 x iN> types, for N < 64.
define <1 x i64> @test_zext_v1i32_v1i64(<2 x i32> %v) nounwind readnone { define <1 x i64> @test_zext_v1i32_v1i64(<2 x i32> %v) nounwind readnone {
; CHECK-LABEL: test_zext_v1i32_v1i64: ; CHECK-LABEL: test_zext_v1i32_v1i64:

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has separate copy of parts that aren't pure intrinsic wrangling.
define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) { define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) {
; CHECK: test_vshr_n_s8 ; CHECK: test_vshr_n_s8

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; This test is just intrinsic pumping. arm64 has its own tbl/tbx tests.
declare <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) declare <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; arm64 has its own copy: aarch64-neon-simd-vget.ll
define <8 x i8> @test_vget_high_s8(<16 x i8> %a) { define <8 x i8> @test_vget_high_s8(<16 x i8> %a) {
; CHECK-LABEL: test_vget_high_s8: ; CHECK-LABEL: test_vget_high_s8:

View File

@ -1,11 +1,12 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
; A vector TruncStore can not be selected. ; A vector TruncStore can not be selected.
; Test a trunc IR and a vector store IR can be selected correctly. ; Test a trunc IR and a vector store IR can be selected correctly.
define void @truncStore.v2i64(<2 x i64> %a, <2 x i32>* %result) { define void @truncStore.v2i64(<2 x i64> %a, <2 x i32>* %result) {
; CHECK-LABEL: truncStore.v2i64: ; CHECK-LABEL: truncStore.v2i64:
; CHECK: xtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d ; CHECK: xtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
; CHECK: st1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}] ; CHECK: {{st1 {v[0-9]+.2s}|str d[0-9]+}}, [x{{[0-9]+|sp}}]
%b = trunc <2 x i64> %a to <2 x i32> %b = trunc <2 x i64> %a to <2 x i32>
store <2 x i32> %b, <2 x i32>* %result store <2 x i32> %b, <2 x i32>* %result
ret void ret void
@ -14,7 +15,7 @@ define void @truncStore.v2i64(<2 x i64> %a, <2 x i32>* %result) {
define void @truncStore.v4i32(<4 x i32> %a, <4 x i16>* %result) { define void @truncStore.v4i32(<4 x i32> %a, <4 x i16>* %result) {
; CHECK-LABEL: truncStore.v4i32: ; CHECK-LABEL: truncStore.v4i32:
; CHECK: xtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s ; CHECK: xtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
; CHECK: st1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}] ; CHECK: {{st1 {v[0-9]+.4h}|str d[0-9]+}}, [x{{[0-9]+|sp}}]
%b = trunc <4 x i32> %a to <4 x i16> %b = trunc <4 x i32> %a to <4 x i16>
store <4 x i16> %b, <4 x i16>* %result store <4 x i16> %b, <4 x i16>* %result
ret void ret void
@ -23,7 +24,7 @@ define void @truncStore.v4i32(<4 x i32> %a, <4 x i16>* %result) {
define void @truncStore.v8i16(<8 x i16> %a, <8 x i8>* %result) { define void @truncStore.v8i16(<8 x i16> %a, <8 x i8>* %result) {
; CHECK-LABEL: truncStore.v8i16: ; CHECK-LABEL: truncStore.v8i16:
; CHECK: xtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h ; CHECK: xtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
; CHECK: st1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}] ; CHECK: {{st1 {v[0-9]+.8b}|str d[0-9]+}}, [x{{[0-9]+|sp}}]
%b = trunc <8 x i16> %a to <8 x i8> %b = trunc <8 x i16> %a to <8 x i8>
store <8 x i8> %b, <8 x i8>* %result store <8 x i8> %b, <8 x i8>* %result
ret void ret void

View File

@ -1,5 +1,5 @@
; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s ; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 < %s | FileCheck --check-prefix=CHECK-NOFP %s ; arm64 has its own copy of this file, ported during implementation (variadic-aapcs.ll)
%va_list = type {i8*, i8*, i8*, i32, i32} %va_list = type {i8*, i8*, i8*, i32, i32}