llvm-6502/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
Saleem Abdulrasool 27b1252c13 ARM: fixup more tests to specify the target more explicitly
This changes the tests that were targeting ARM EABI to explicitly specify the
environment rather than relying on the default.  This breaks with the new
Windows on ARM support when running the tests on Windows where the default
environment is no longer EABI.

Take the opportunity to avoid a pointless redirect (helps when trying to debug
with providing a command line invocation which can be copy and pasted) and
removing a few greps in favour of FileCheck.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205541 91177308-0d34-0410-b5e6-96231b3b80d8
2014-04-03 16:01:44 +00:00

151 lines
3.6 KiB
LLVM

; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; PR12281
; Test generataion of code for vmull instruction when multiplying 128-bit
; vectors that were created by sign-extending smaller vector sizes.
;
; The vmull operation requires 64-bit vectors, so we must extend the original
; vector size to 64 bits for vmull operation.
; Previously failed with an assertion because the <4 x i8> vector was too small
; for vmull.
; Vector x Constant
; v4i8
;
define void @sextload_v4i8_c(<4 x i8>* %v) nounwind {
;CHECK-LABEL: sextload_v4i8_c:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
;CHECK: vmull
%v1 = mul <4 x i32> %v0, <i32 3, i32 3, i32 3, i32 3>
store <4 x i32> %v1, <4 x i32>* undef, align 8
ret void;
}
; v2i8
;
define void @sextload_v2i8_c(<2 x i8>* %v) nounwind {
;CHECK-LABEL: sextload_v2i8_c:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, <i64 3, i64 3>
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}
; v2i16
;
define void @sextload_v2i16_c(<2 x i16>* %v) nounwind {
;CHECK-LABEL: sextload_v2i16_c:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, <i64 3, i64 3>
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}
; Vector x Vector
; v4i8
;
define void @sextload_v4i8_v(<4 x i8>* %v, <4 x i8>* %p) nounwind {
;CHECK-LABEL: sextload_v4i8_v:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
%1 = load <4 x i8>* %p, align 8
%v2 = sext <4 x i8> %1 to <4 x i32>
;CHECK: vmull
%v1 = mul <4 x i32> %v0, %v2
store <4 x i32> %v1, <4 x i32>* undef, align 8
ret void;
}
; v2i8
;
define void @sextload_v2i8_v(<2 x i8>* %v, <2 x i8>* %p) nounwind {
;CHECK-LABEL: sextload_v2i8_v:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
%1 = load <2 x i8>* %p, align 8
%v2 = sext <2 x i8> %1 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, %v2
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}
; v2i16
;
define void @sextload_v2i16_v(<2 x i16>* %v, <2 x i16>* %p) nounwind {
;CHECK-LABEL: sextload_v2i16_v:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
%1 = load <2 x i16>* %p, align 8
%v2 = sext <2 x i16> %1 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, %v2
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}
; Vector(small) x Vector(big)
; v4i8 x v4i16
;
define void @sextload_v4i8_vs(<4 x i8>* %v, <4 x i16>* %p) nounwind {
;CHECK-LABEL: sextload_v4i8_vs:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
%1 = load <4 x i16>* %p, align 8
%v2 = sext <4 x i16> %1 to <4 x i32>
;CHECK: vmull
%v1 = mul <4 x i32> %v0, %v2
store <4 x i32> %v1, <4 x i32>* undef, align 8
ret void;
}
; v2i8
; v2i8 x v2i16
define void @sextload_v2i8_vs(<2 x i8>* %v, <2 x i16>* %p) nounwind {
;CHECK-LABEL: sextload_v2i8_vs:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
%1 = load <2 x i16>* %p, align 8
%v2 = sext <2 x i16> %1 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, %v2
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}
; v2i16
; v2i16 x v2i32
define void @sextload_v2i16_vs(<2 x i16>* %v, <2 x i32>* %p) nounwind {
;CHECK-LABEL: sextload_v2i16_vs:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
%1 = load <2 x i32>* %p, align 8
%v2 = sext <2 x i32> %1 to <2 x i64>
;CHECK: vmull
%v1 = mul <2 x i64> %v0, %v2
store <2 x i64> %v1, <2 x i64>* undef, align 8
ret void;
}