mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-24 22:32:47 +00:00
27b1252c13
This changes the tests that were targeting ARM EABI to explicitly specify the environment rather than relying on the default. This breaks with the new Windows on ARM support when running the tests on Windows where the default environment is no longer EABI. Take the opportunity to avoid a pointless redirect (helps when trying to debug with providing a command line invocation which can be copy and pasted) and removing a few greps in favour of FileCheck. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205541 91177308-0d34-0410-b5e6-96231b3b80d8
204 lines
6.0 KiB
LLVM
204 lines
6.0 KiB
LLVM
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
|
|
|
|
define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
|
;CHECK-LABEL: vcges8:
|
|
;CHECK: vcge.s8
|
|
%tmp1 = load <8 x i8>* %A
|
|
%tmp2 = load <8 x i8>* %B
|
|
%tmp3 = icmp sge <8 x i8> %tmp1, %tmp2
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
|
|
ret <8 x i8> %tmp4
|
|
}
|
|
|
|
define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
|
;CHECK-LABEL: vcges16:
|
|
;CHECK: vcge.s16
|
|
%tmp1 = load <4 x i16>* %A
|
|
%tmp2 = load <4 x i16>* %B
|
|
%tmp3 = icmp sge <4 x i16> %tmp1, %tmp2
|
|
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
|
|
ret <4 x i16> %tmp4
|
|
}
|
|
|
|
define <2 x i32> @vcges32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
|
;CHECK-LABEL: vcges32:
|
|
;CHECK: vcge.s32
|
|
%tmp1 = load <2 x i32>* %A
|
|
%tmp2 = load <2 x i32>* %B
|
|
%tmp3 = icmp sge <2 x i32> %tmp1, %tmp2
|
|
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
|
|
ret <2 x i32> %tmp4
|
|
}
|
|
|
|
define <8 x i8> @vcgeu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeu8:
|
|
;CHECK: vcge.u8
|
|
%tmp1 = load <8 x i8>* %A
|
|
%tmp2 = load <8 x i8>* %B
|
|
%tmp3 = icmp uge <8 x i8> %tmp1, %tmp2
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
|
|
ret <8 x i8> %tmp4
|
|
}
|
|
|
|
define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeu16:
|
|
;CHECK: vcge.u16
|
|
%tmp1 = load <4 x i16>* %A
|
|
%tmp2 = load <4 x i16>* %B
|
|
%tmp3 = icmp uge <4 x i16> %tmp1, %tmp2
|
|
%tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
|
|
ret <4 x i16> %tmp4
|
|
}
|
|
|
|
define <2 x i32> @vcgeu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeu32:
|
|
;CHECK: vcge.u32
|
|
%tmp1 = load <2 x i32>* %A
|
|
%tmp2 = load <2 x i32>* %B
|
|
%tmp3 = icmp uge <2 x i32> %tmp1, %tmp2
|
|
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
|
|
ret <2 x i32> %tmp4
|
|
}
|
|
|
|
define <2 x i32> @vcgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
|
|
;CHECK-LABEL: vcgef32:
|
|
;CHECK: vcge.f32
|
|
%tmp1 = load <2 x float>* %A
|
|
%tmp2 = load <2 x float>* %B
|
|
%tmp3 = fcmp oge <2 x float> %tmp1, %tmp2
|
|
%tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
|
|
ret <2 x i32> %tmp4
|
|
}
|
|
|
|
define <16 x i8> @vcgeQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQs8:
|
|
;CHECK: vcge.s8
|
|
%tmp1 = load <16 x i8>* %A
|
|
%tmp2 = load <16 x i8>* %B
|
|
%tmp3 = icmp sge <16 x i8> %tmp1, %tmp2
|
|
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
|
|
ret <16 x i8> %tmp4
|
|
}
|
|
|
|
define <8 x i16> @vcgeQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQs16:
|
|
;CHECK: vcge.s16
|
|
%tmp1 = load <8 x i16>* %A
|
|
%tmp2 = load <8 x i16>* %B
|
|
%tmp3 = icmp sge <8 x i16> %tmp1, %tmp2
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
|
|
ret <8 x i16> %tmp4
|
|
}
|
|
|
|
define <4 x i32> @vcgeQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQs32:
|
|
;CHECK: vcge.s32
|
|
%tmp1 = load <4 x i32>* %A
|
|
%tmp2 = load <4 x i32>* %B
|
|
%tmp3 = icmp sge <4 x i32> %tmp1, %tmp2
|
|
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
|
|
ret <4 x i32> %tmp4
|
|
}
|
|
|
|
define <16 x i8> @vcgeQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQu8:
|
|
;CHECK: vcge.u8
|
|
%tmp1 = load <16 x i8>* %A
|
|
%tmp2 = load <16 x i8>* %B
|
|
%tmp3 = icmp uge <16 x i8> %tmp1, %tmp2
|
|
%tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
|
|
ret <16 x i8> %tmp4
|
|
}
|
|
|
|
define <8 x i16> @vcgeQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQu16:
|
|
;CHECK: vcge.u16
|
|
%tmp1 = load <8 x i16>* %A
|
|
%tmp2 = load <8 x i16>* %B
|
|
%tmp3 = icmp uge <8 x i16> %tmp1, %tmp2
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
|
|
ret <8 x i16> %tmp4
|
|
}
|
|
|
|
define <4 x i32> @vcgeQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQu32:
|
|
;CHECK: vcge.u32
|
|
%tmp1 = load <4 x i32>* %A
|
|
%tmp2 = load <4 x i32>* %B
|
|
%tmp3 = icmp uge <4 x i32> %tmp1, %tmp2
|
|
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
|
|
ret <4 x i32> %tmp4
|
|
}
|
|
|
|
define <4 x i32> @vcgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
|
|
;CHECK-LABEL: vcgeQf32:
|
|
;CHECK: vcge.f32
|
|
%tmp1 = load <4 x float>* %A
|
|
%tmp2 = load <4 x float>* %B
|
|
%tmp3 = fcmp oge <4 x float> %tmp1, %tmp2
|
|
%tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
|
|
ret <4 x i32> %tmp4
|
|
}
|
|
|
|
define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
|
|
;CHECK-LABEL: vacgef32:
|
|
;CHECK: vacge.f32
|
|
%tmp1 = load <2 x float>* %A
|
|
%tmp2 = load <2 x float>* %B
|
|
%tmp3 = call <2 x i32> @llvm.arm.neon.vacge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
|
|
ret <2 x i32> %tmp3
|
|
}
|
|
|
|
define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
|
|
;CHECK-LABEL: vacgeQf32:
|
|
;CHECK: vacge.f32
|
|
%tmp1 = load <4 x float>* %A
|
|
%tmp2 = load <4 x float>* %B
|
|
%tmp3 = call <4 x i32> @llvm.arm.neon.vacge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
|
|
ret <4 x i32> %tmp3
|
|
}
|
|
|
|
declare <2 x i32> @llvm.arm.neon.vacge.v2i32.v2f32(<2 x float>, <2 x float>) nounwind readnone
|
|
declare <4 x i32> @llvm.arm.neon.vacge.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
|
|
|
|
define <8 x i8> @vcgei8Z(<8 x i8>* %A) nounwind {
|
|
;CHECK-LABEL: vcgei8Z:
|
|
;CHECK-NOT: vmov
|
|
;CHECK-NOT: vmvn
|
|
;CHECK: vcge.s8
|
|
%tmp1 = load <8 x i8>* %A
|
|
%tmp3 = icmp sge <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
|
|
ret <8 x i8> %tmp4
|
|
}
|
|
|
|
define <8 x i8> @vclei8Z(<8 x i8>* %A) nounwind {
|
|
;CHECK-LABEL: vclei8Z:
|
|
;CHECK-NOT: vmov
|
|
;CHECK-NOT: vmvn
|
|
;CHECK: vcle.s8
|
|
%tmp1 = load <8 x i8>* %A
|
|
%tmp3 = icmp sle <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
|
|
%tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
|
|
ret <8 x i8> %tmp4
|
|
}
|
|
|
|
; Radar 8782191
|
|
; Floating-point comparisons against zero produce results with integer
|
|
; elements, not floating-point elements.
|
|
define void @test_vclez_fp() nounwind optsize {
|
|
;CHECK-LABEL: test_vclez_fp:
|
|
;CHECK: vcle.f32
|
|
entry:
|
|
%0 = fcmp ole <4 x float> undef, zeroinitializer
|
|
%1 = sext <4 x i1> %0 to <4 x i16>
|
|
%2 = add <4 x i16> %1, zeroinitializer
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
%4 = add <8 x i16> %3, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
|
|
%5 = trunc <8 x i16> %4 to <8 x i8>
|
|
tail call void @llvm.arm.neon.vst1.v8i8(i8* undef, <8 x i8> %5, i32 1)
|
|
unreachable
|
|
}
|
|
|
|
declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
|