[AArch64 NEON] Fix a bug caused by undef lane when generating VEXT.

It was commited as r199628 but reverted in r199628 as causing
regression test failed. It's because of old vervsion of patch
I used to commit. Sorry for mistake.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@199704 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Kevin Qin 2014-01-21 01:48:52 +00:00
parent 665b92322a
commit 9fe8c2b527
2 changed files with 53 additions and 15 deletions

View File

@ -4654,10 +4654,15 @@ AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// it into NEON_VEXTRACT.
if (V1EltNum == Length) {
// Check if the shuffle mask is sequential.
int SkipUndef = 0;
while (ShuffleMask[SkipUndef] == -1) {
SkipUndef++;
}
int CurMask = ShuffleMask[SkipUndef];
if (CurMask >= SkipUndef) {
bool IsSequential = true;
int CurMask = ShuffleMask[0];
for (int I = 0; I < Length; ++I) {
if (ShuffleMask[I] != CurMask) {
for (int I = SkipUndef; I < Length; ++I) {
if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) {
IsSequential = false;
break;
}
@ -4666,12 +4671,13 @@ AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (IsSequential) {
assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
unsigned VecSize = EltSize * V1EltNum;
unsigned Index = (EltSize/8) * ShuffleMask[0];
unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef);
if (VecSize == 64 || VecSize == 128)
return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
DAG.getConstant(Index, MVT::i64));
}
}
}
// For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
// by element from V2 to V1 .

View File

@ -188,3 +188,35 @@ entry:
%vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i16> %vext
}
define <8 x i8> @test_undef_vext_s8(<8 x i8> %a) {
; CHECK: test_undef_vext_s8:
; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
entry:
%vext = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 10, i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
ret <8 x i8> %vext
}
define <16 x i8> @test_undef_vextq_s8(<16 x i8> %a) {
; CHECK: test_undef_vextq_s8:
; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
entry:
%vext = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 20, i32 20, i32 20, i32 20, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 20, i32 20, i32 20, i32 20, i32 20>
ret <16 x i8> %vext
}
define <4 x i16> @test_undef_vext_s16(<4 x i16> %a) {
; CHECK: test_undef_vext_s16:
; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
entry:
%vext = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
ret <4 x i16> %vext
}
define <8 x i16> @test_undef_vextq_s16(<8 x i16> %a) {
; CHECK: test_undef_vextq_s16:
; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
entry:
%vext = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i16> %vext
}