From d7689b6ff6faa91f8c27b632a9e51ea55d638644 Mon Sep 17 00:00:00 2001 From: Jiangning Liu Date: Fri, 23 May 2014 02:54:50 +0000 Subject: [PATCH] [ARM64] Fix a bug in shuffle vector lowering to generate corect vext ISD with swapped input vectors. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209495 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM64/ARM64ISelLowering.cpp | 27 ++-- test/CodeGen/ARM64/vext_reverse.ll | 172 +++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 14 deletions(-) create mode 100644 test/CodeGen/ARM64/vext_reverse.ll diff --git a/lib/Target/ARM64/ARM64ISelLowering.cpp b/lib/Target/ARM64/ARM64ISelLowering.cpp index 385373116de..c24b7deea94 100644 --- a/lib/Target/ARM64/ARM64ISelLowering.cpp +++ b/lib/Target/ARM64/ARM64ISelLowering.cpp @@ -4270,23 +4270,22 @@ static bool isEXTMask(ArrayRef M, EVT VT, bool &ReverseEXT, // The index of an EXT is the first element if it is not UNDEF. // Watch out for the beginning UNDEFs. The EXT index should be the expected - // value of the first element. - // E.g. <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. - // <-1, -1, 0, 1, ...> is treated as . IDX is - // equal to the ExpectedElt. - Imm = (M[0] >= 0) ? static_cast(M[0]) : ExpectedElt.getZExtValue(); + // value of the first element. E.g. + // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. + // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. + // ExpectedElt is the last mask index plus 1. + Imm = ExpectedElt.getZExtValue(); - // If no beginning UNDEFs, do swap when M[0] >= NumElts. - if (M[0] >= 0 && Imm >= NumElts) { + // There are two difference cases requiring to reverse input vectors. + // For example, for vector <4 x i32> we have the following cases, + // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) + // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) + // For both cases, we finally use mask <5, 6, 7, 0>, which requires + // to reverse two input vectors. + if (Imm < NumElts) ReverseEXT = true; + else Imm -= NumElts; - } else if (M[0] < 0) { - // Only do swap when beginning UNDEFs more than the first real element, - if (*FirstRealElt < FirstRealElt - M.begin()) - ReverseEXT = true; - if (Imm >= NumElts) - Imm -= NumElts; - } return true; } diff --git a/test/CodeGen/ARM64/vext_reverse.ll b/test/CodeGen/ARM64/vext_reverse.ll new file mode 100644 index 00000000000..c45e55edeca --- /dev/null +++ b/test/CodeGen/ARM64/vext_reverse.ll @@ -0,0 +1,172 @@ +; RUN: llc -mtriple=arm64-linux-gnuabi < %s | FileCheck %s + +; The following tests is to check the correctness of reversing input operand +; of vext by enumerating all cases of using two undefs in shuffle masks. + +define <4 x i16> @vext_6701_0(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_0: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_12(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_12: +; CHECK: ext v0.8b, v0.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_13(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_13: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_14(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_14: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_23(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_23: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_24(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_24: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_6701_34(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_6701_34: +; CHECK: ext v0.8b, v1.8b, v0.8b, #4 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_0(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_0: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_12(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_12: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_13(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_13: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_14(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_14: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_23(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_23: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_24(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_24: +; CHECK: rev32 v0.4h, v1.4h + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_5670_34(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_5670_34: +; CHECK: ext v0.8b, v1.8b, v0.8b, #2 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_0(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_0: +; CHECK: ext v0.8b, v1.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_12(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_12: +; CHECK: ext v0.8b, v0.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_13(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_13: +; CHECK: rev32 v0.4h, v0.4h + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_14(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_14: +; CHECK: ext v0.8b, v0.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_23(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_23: +; CHECK: ext v0.8b, v1.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_24(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_24: +; CHECK: ext v0.8b, v1.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +} + +define <4 x i16> @vext_7012_34(<4 x i16> %a1, <4 x i16> %a2) { +entry: +; CHECK-LABEL: vext_7012_34: +; CHECK: ext v0.8b, v1.8b, v0.8b, #6 + %x = shufflevector <4 x i16> %a1, <4 x i16> %a2, <4 x i32> + ret <4 x i16> %x +}