2009-09-09 00:09:15 +00:00
|
|
|
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
|
2009-07-26 00:39:34 +00:00
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64D8:
|
|
|
|
;CHECK: vrev64.8
|
|
|
|
%tmp1 = load <8 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <8 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64D16:
|
|
|
|
;CHECK: vrev64.16
|
|
|
|
%tmp1 = load <4 x i16>* %A
|
|
|
|
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <4 x i16> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64D32:
|
|
|
|
;CHECK: vrev64.32
|
|
|
|
%tmp1 = load <2 x i32>* %A
|
|
|
|
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
|
|
|
|
ret <2 x i32> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64Df:
|
|
|
|
;CHECK: vrev64.32
|
|
|
|
%tmp1 = load <2 x float>* %A
|
|
|
|
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
|
|
|
|
ret <2 x float> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64Q8:
|
|
|
|
;CHECK: vrev64.8
|
|
|
|
%tmp1 = load <16 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
|
|
|
|
ret <16 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64Q16:
|
|
|
|
;CHECK: vrev64.16
|
|
|
|
%tmp1 = load <8 x i16>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
|
|
ret <8 x i16> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64Q32:
|
|
|
|
;CHECK: vrev64.32
|
|
|
|
%tmp1 = load <4 x i32>* %A
|
|
|
|
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x i32> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev64Qf:
|
|
|
|
;CHECK: vrev64.32
|
|
|
|
%tmp1 = load <4 x float>* %A
|
|
|
|
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x float> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev32D8:
|
|
|
|
;CHECK: vrev32.8
|
|
|
|
%tmp1 = load <8 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
|
|
ret <8 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev32D16:
|
|
|
|
;CHECK: vrev32.16
|
|
|
|
%tmp1 = load <4 x i16>* %A
|
|
|
|
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x i16> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev32Q8:
|
|
|
|
;CHECK: vrev32.8
|
|
|
|
%tmp1 = load <16 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
|
|
|
|
ret <16 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev32Q16:
|
|
|
|
;CHECK: vrev32.16
|
|
|
|
%tmp1 = load <8 x i16>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
|
|
ret <8 x i16> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev16D8:
|
|
|
|
;CHECK: vrev16.8
|
|
|
|
%tmp1 = load <8 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
|
|
ret <8 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
2010-06-17 15:18:27 +00:00
|
|
|
define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
|
2009-07-26 00:39:34 +00:00
|
|
|
;CHECK: test_vrev16Q8:
|
|
|
|
;CHECK: vrev16.8
|
|
|
|
%tmp1 = load <16 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
|
|
|
|
ret <16 x i8> %tmp2
|
|
|
|
}
|
2010-08-17 05:54:34 +00:00
|
|
|
|
|
|
|
; Undef shuffle indices should not prevent matching to VREV:
|
|
|
|
|
|
|
|
define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
|
|
|
|
;CHECK: test_vrev64D8_undef:
|
|
|
|
;CHECK: vrev64.8
|
|
|
|
%tmp1 = load <8 x i8>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <8 x i8> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
|
|
|
|
;CHECK: test_vrev32Q16_undef:
|
|
|
|
;CHECK: vrev32.16
|
|
|
|
%tmp1 = load <8 x i16>* %A
|
|
|
|
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
|
|
|
|
ret <8 x i16> %tmp2
|
|
|
|
}
|
SelectionDAG shuffle nodes do not allow operands with different numbers of
elements than the result vector type. So, when an instruction like:
%8 = shufflevector <2 x float> %4, <2 x float> %7, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
is translated to a DAG, each operand is changed to a concat_vectors node that appends 2 undef elements. That is:
shuffle [a,b], [c,d] is changed to:
shuffle [a,b,u,u], [c,d,u,u]
That's probably the right thing for x86 but for NEON, we'd much rather have:
shuffle [a,b,c,d], undef
Teach the DAG combiner how to do that transformation for ARM. Radar 8597007.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117482 91177308-0d34-0410-b5e6-96231b3b80d8
2010-10-27 20:38:28 +00:00
|
|
|
|
|
|
|
; A vcombine feeding a VREV should not obscure things. Radar 8597007.
|
|
|
|
|
|
|
|
define void @test_with_vcombine(<4 x float>* %v) nounwind {
|
|
|
|
;CHECK: test_with_vcombine:
|
|
|
|
;CHECK-NOT: vext
|
|
|
|
;CHECK: vrev64.32
|
|
|
|
%tmp1 = load <4 x float>* %v, align 16
|
|
|
|
%tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
|
|
|
|
%tmp3 = extractelement <2 x double> %tmp2, i32 0
|
|
|
|
%tmp4 = bitcast double %tmp3 to <2 x float>
|
|
|
|
%tmp5 = extractelement <2 x double> %tmp2, i32 1
|
|
|
|
%tmp6 = bitcast double %tmp5 to <2 x float>
|
|
|
|
%tmp7 = fadd <2 x float> %tmp6, %tmp6
|
|
|
|
%tmp8 = shufflevector <2 x float> %tmp4, <2 x float> %tmp7, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
store <4 x float> %tmp8, <4 x float>* %v, align 16
|
|
|
|
ret void
|
|
|
|
}
|
2011-05-17 20:48:40 +00:00
|
|
|
|
2011-05-18 06:42:21 +00:00
|
|
|
; vrev <4 x i16> should use VREV32 and not VREV64
|
2011-05-17 20:48:40 +00:00
|
|
|
define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
|
|
|
|
; CHECK: test_vrev64:
|
|
|
|
; CHECK: vext.16
|
2011-05-18 06:42:21 +00:00
|
|
|
; CHECK: vrev32.16
|
2011-05-17 20:48:40 +00:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x i16>* %source to <8 x i16>*
|
|
|
|
%tmp2 = load <8 x i16>* %0, align 4
|
|
|
|
%tmp3 = extractelement <8 x i16> %tmp2, i32 6
|
|
|
|
%tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
|
|
|
|
%tmp9 = extractelement <8 x i16> %tmp2, i32 5
|
|
|
|
%tmp11 = insertelement <2 x i16> %tmp5, i16 %tmp9, i32 1
|
|
|
|
store <2 x i16> %tmp11, <2 x i16>* %dst, align 4
|
|
|
|
ret void
|
|
|
|
}
|
2011-05-18 21:44:54 +00:00
|
|
|
|
|
|
|
; Test vrev of float4
|
|
|
|
define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest) nounwind noinline ssp {
|
|
|
|
; CHECK: float_vrev64
|
|
|
|
; CHECK: vext.32
|
|
|
|
; CHECK: vrev64.32
|
|
|
|
entry:
|
|
|
|
%0 = bitcast float* %source to <4 x float>*
|
|
|
|
%tmp2 = load <4 x float>* %0, align 4
|
|
|
|
%tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
|
|
|
|
%arrayidx8 = getelementptr inbounds <4 x float>* %dest, i32 11
|
|
|
|
store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|