Make CanXFormVExtractWithShuffleIntoLoad reject loads with multiple uses. Also make it return false if there's not even a load at all. This makes the code better match the code in DAGCombiner that it tries to match. These two changes prevent some cases where vector_shuffles were making it to instruction selection and causing the older shuffle selection code to be triggered. Also needed to fix a bad pattern that this change exposed. This is the first step towards getting rid of the old shuffle selection support. No test cases yet because there's no way to tell whether a shuffle was handled in the legalize stage or at instruction selection.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147428 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Craig Topper 2012-01-02 08:46:48 +00:00
parent f3cd23c6cb
commit a51bb3aa75
2 changed files with 21 additions and 25 deletions

View File

@ -6268,31 +6268,27 @@ bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
if (V.getOpcode() == ISD::BITCAST) if (V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0); V = V.getOperand(0);
if (ISD::isNormalLoad(V.getNode())) { if (!ISD::isNormalLoad(V.getNode()))
// Is the original load suitable? return false;
LoadSDNode *LN0 = cast<LoadSDNode>(V);
// FIXME: avoid the multi-use bug that is preventing lots of // Is the original load suitable?
// of foldings to be detected, this is still wrong of course, but LoadSDNode *LN0 = cast<LoadSDNode>(V);
// give the temporary desired behavior, and if it happens that
// the load has real more uses, during isel it will not fold, and
// will generate poor code.
if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse()
return false;
if (!HasShuffleIntoBitcast) if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
return true; return false;
// If there's a bitcast before the shuffle, check if the load type and if (!HasShuffleIntoBitcast)
// alignment is valid. return true;
unsigned Align = LN0->getAlignment();
unsigned NewAlign =
TLI.getTargetData()->getABITypeAlignment(
VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) // If there's a bitcast before the shuffle, check if the load type and
return false; // alignment is valid.
} unsigned Align = LN0->getAlignment();
unsigned NewAlign =
TLI.getTargetData()->getABITypeAlignment(
VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
return false;
return true; return true;
} }

View File

@ -1236,10 +1236,10 @@ let Predicates = [HasAVX] in {
// Store patterns // Store patterns
def : Pat<(store (f64 (vector_extract def : Pat<(store (f64 (vector_extract
(v2f64 (X86Unpckh VR128:$src, (undef))), (iPTR 0))), addr:$dst), (v2f64 (X86Unpckh VR128:$src, VR128:$src)), (iPTR 0))), addr:$dst),
(VMOVHPSmr addr:$dst, VR128:$src)>; (VMOVHPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (f64 (vector_extract def : Pat<(store (f64 (vector_extract
(v2f64 (X86Unpckh VR128:$src, (undef))), (iPTR 0))), addr:$dst), (v2f64 (X86Unpckh VR128:$src, VR128:$src)), (iPTR 0))), addr:$dst),
(VMOVHPDmr addr:$dst, VR128:$src)>; (VMOVHPDmr addr:$dst, VR128:$src)>;
} }
@ -1259,7 +1259,7 @@ let Predicates = [HasSSE1] in {
// Store patterns // Store patterns
def : Pat<(store (f64 (vector_extract def : Pat<(store (f64 (vector_extract
(v2f64 (X86Unpckh VR128:$src, (undef))), (iPTR 0))), addr:$dst), (v2f64 (X86Unpckh VR128:$src, VR128:$src)), (iPTR 0))), addr:$dst),
(MOVHPSmr addr:$dst, VR128:$src)>; (MOVHPSmr addr:$dst, VR128:$src)>;
} }
@ -1279,7 +1279,7 @@ let Predicates = [HasSSE2] in {
// Store patterns // Store patterns
def : Pat<(store (f64 (vector_extract def : Pat<(store (f64 (vector_extract
(v2f64 (X86Unpckh VR128:$src, (undef))), (iPTR 0))),addr:$dst), (v2f64 (X86Unpckh VR128:$src, VR128:$src)), (iPTR 0))),addr:$dst),
(MOVHPDmr addr:$dst, VR128:$src)>; (MOVHPDmr addr:$dst, VR128:$src)>;
} }