From 67297cd9563d6385570be219098faf8d0fe4c0de Mon Sep 17 00:00:00 2001 From: Ahmed Bougacha Date: Thu, 5 Mar 2015 19:37:53 +0000 Subject: [PATCH] [ARM] Enable vector extload combine for legal types. This commit enables forming vector extloads for ARM. It only does so for legal types, and when we can't fold the extension in a wide/long form of the user instruction. Enabling it for larger types isn't as good an idea on ARM as it is on X86, because: - we pretend that extloads are legal, but end up generating vld+vmov - we have instructions like vld {dN, dM}, which can't be generated when we "manually expand" extloads to vld+vmov. For legal types, the combine doesn't fire that often: in the integration tests only in a big endian testcase, where it removes a pointless AND. Related to rdar://19723053 Differential Revision: http://reviews.llvm.org/D7423 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231396 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMISelLowering.cpp | 22 +++++++++++++++++++ lib/Target/ARM/ARMISelLowering.h | 2 ++ test/CodeGen/ARM/big-endian-neon-extend.ll | 10 --------- .../CodeGen/ARM/dagcombine-anyexttozeroext.ll | 2 +- test/CodeGen/ARM/vector-extend-narrow.ll | 2 +- 5 files changed, 26 insertions(+), 12 deletions(-) diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 7694378b480..3cf8d8809bb 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -10077,6 +10077,28 @@ bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { return false; } +bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { + EVT VT = ExtVal.getValueType(); + + if (!isTypeLegal(VT)) + return false; + + // Don't create a loadext if we can fold the extension into a wide/long + // instruction. + // If there's more than one user instruction, the loadext is desirable no + // matter what. There can be two uses by the same instruction. + if (ExtVal->use_empty() || + !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) + return true; + + SDNode *U = *ExtVal->use_begin(); + if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || + U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) + return false; + + return true; +} + bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 7fd2725b7e5..6977862fe35 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -283,6 +283,8 @@ namespace llvm { using TargetLowering::isZExtFree; bool isZExtFree(SDValue Val, EVT VT2) const override; + bool isVectorLoadExtDesirable(SDValue ExtVal) const override; + bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; diff --git a/test/CodeGen/ARM/big-endian-neon-extend.ll b/test/CodeGen/ARM/big-endian-neon-extend.ll index 1e35305bdba..f8542b708b8 100644 --- a/test/CodeGen/ARM/big-endian-neon-extend.ll +++ b/test/CodeGen/ARM/big-endian-neon-extend.ll @@ -3,15 +3,10 @@ define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr ) { ; CHECK-LABEL: vector_ext_2i8_to_2i64: ; CHECK: vld1.16 {[[REG:d[0-9]+]][0]}, [r0:16] -; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xff -; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]] ; CHECK-NEXT: vrev16.8 [[REG]], [[REG]] ; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]] ; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]] ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] -; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] ; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1] ; CHECK-NEXT: bx lr %1 = load <2 x i8>, <2 x i8>* %loadaddr @@ -23,14 +18,9 @@ define void @vector_ext_2i8_to_2i64( <2 x i8>* %loadaddr, <2 x i64>* %storeaddr define void @vector_ext_2i16_to_2i64( <2 x i16>* %loadaddr, <2 x i64>* %storeaddr ) { ; CHECK-LABEL: vector_ext_2i16_to_2i64: ; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}, [r0:32] -; CHECK-NEXT: vmov.i64 [[MASK:q[0-9]+]], #0xffff -; CHECK-NEXT: vrev64.32 [[MASK]], [[MASK]] ; CHECK-NEXT: vrev32.16 [[REG]], [[REG]] ; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]] ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] -; CHECK-NEXT: vand [[QREG]], [[QREG]], [[MASK]] -; CHECK-NEXT: vrev64.32 [[QREG]], [[QREG]] ; CHECK-NEXT: vst1.64 {[[REG]], {{d[0-9]+}}}, [r1] ; CHECK-NEXT: bx lr %1 = load <2 x i16>, <2 x i16>* %loadaddr diff --git a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll index 98a2ce973ea..8b7153503b1 100644 --- a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll +++ b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll @@ -2,7 +2,7 @@ ; CHECK-LABEL: f: define float @f(<4 x i16>* nocapture %in) { - ; CHECK: vldr + ; CHECK: vld1 ; CHECK: vmovl.u16 ; CHECK-NOT: vand %1 = load <4 x i16>, <4 x i16>* %in diff --git a/test/CodeGen/ARM/vector-extend-narrow.ll b/test/CodeGen/ARM/vector-extend-narrow.ll index 7e2751b5cf5..d054bfda615 100644 --- a/test/CodeGen/ARM/vector-extend-narrow.ll +++ b/test/CodeGen/ARM/vector-extend-narrow.ll @@ -2,7 +2,7 @@ ; CHECK-LABEL: f: define float @f(<4 x i16>* nocapture %in) { - ; CHECK: vldr + ; CHECK: vld1 ; CHECK: vmovl.u16 %1 = load <4 x i16>, <4 x i16>* %in ; CHECK: vcvt.f32.u32