From 541681c8485c18b564970c80180a798b2c1663e8 Mon Sep 17 00:00:00 2001 From: Weiming Zhao Date: Wed, 25 Sep 2013 23:12:06 +0000 Subject: [PATCH] Fix PR 17368: disable vector mul distribution for square of add/sub for ARM Generally, it is desirable to distribute (a + b) * c to a*c + b*c for ARM with VMLx forwarding, where a, b and c are vectors. However, for (a + b)*(a + b), distribution will result in one extra instruction. With distribution: x = a + b (add) y = a * x (mul) z = y + b * y (mla) Without distribution: x = a + b (add) z = x * x (mul) This patch checks if a mul is a square of add/sub. If yes, skip distribution. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191410 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMISelLowering.cpp | 10 ++++++++++ test/CodeGen/ARM/vmul.ll | 11 +++++++++++ 2 files changed, 21 insertions(+) diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index c83f7b194ae..773b710ab05 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -8342,6 +8342,13 @@ static SDValue PerformSUBCombine(SDNode *N, /// is faster than /// vadd d3, d0, d1 /// vmul d3, d3, d2 +// However, for (A + B) * (A + B), +// vadd d2, d0, d1 +// vmul d3, d0, d2 +// vmla d3, d1, d2 +// is slower than +// vadd d2, d0, d1 +// vmul d3, d2, d2 static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget) { @@ -8361,6 +8368,9 @@ static SDValue PerformVMULCombine(SDNode *N, std::swap(N0, N1); } + if (N0 == N1) + return SDValue(); + EVT VT = N->getValueType(0); SDLoc DL(N); SDValue N00 = N0->getOperand(0); diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll index 5e5e99bc2f9..de329acdf3c 100644 --- a/test/CodeGen/ARM/vmul.ll +++ b/test/CodeGen/ARM/vmul.ll @@ -515,6 +515,17 @@ entry: ret void } +define <8 x i8> @no_distribute(<8 x i8> %a, <8 x i8> %b) nounwind { +entry: +; CHECK: no_distribute +; CHECK: vadd.i8 +; CHECK: vmul.i8 +; CHECK-NOT: vmla.i8 + %0 = add <8 x i8> %a, %b + %1 = mul <8x i8> %0, %0 + ret <8 x i8> %1 +} + ; If one operand has a zero-extend and the other a sign-extend, vmull ; cannot be used. define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {