* Prefer using operation of matching types. e.g unpcklpd rather than movlhps.

* Bug fixes.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27218 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2006-03-28 06:50:32 +00:00
parent ec1ab444a1
commit 2064a2b47e
3 changed files with 73 additions and 41 deletions

View File

@ -1456,16 +1456,43 @@ bool X86::isSHUFPMask(SDNode *N) {
bool X86::isMOVHLPSMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
if (N->getNumOperands() != 2)
if (N->getNumOperands() != 4)
return false;
// Expect bit 0 == 1, bit1 == 1
// Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
SDOperand Bit0 = N->getOperand(0);
SDOperand Bit1 = N->getOperand(1);
SDOperand Bit2 = N->getOperand(2);
SDOperand Bit3 = N->getOperand(3);
assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
isa<ConstantSDNode>(Bit2) && isa<ConstantSDNode>(Bit3) &&
"Invalid VECTOR_SHUFFLE mask!");
return (cast<ConstantSDNode>(Bit0)->getValue() == 6 &&
cast<ConstantSDNode>(Bit1)->getValue() == 7 &&
cast<ConstantSDNode>(Bit2)->getValue() == 2 &&
cast<ConstantSDNode>(Bit3)->getValue() == 3);
}
/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool X86::isMOVLHPSMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
if (N->getNumOperands() != 4)
return false;
// Expect bit0 == 0, bit1 == 1, bit2 == 4, bit3 == 5
SDOperand Bit0 = N->getOperand(0);
SDOperand Bit1 = N->getOperand(1);
SDOperand Bit2 = N->getOperand(2);
SDOperand Bit3 = N->getOperand(3);
assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
isa<ConstantSDNode>(Bit2) && isa<ConstantSDNode>(Bit3) &&
"Invalid VECTOR_SHUFFLE mask!");
return (cast<ConstantSDNode>(Bit0)->getValue() == 0 &&
cast<ConstantSDNode>(Bit1)->getValue() == 3);
cast<ConstantSDNode>(Bit1)->getValue() == 1 &&
cast<ConstantSDNode>(Bit2)->getValue() == 4 &&
cast<ConstantSDNode>(Bit3)->getValue() == 5);
}
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
@ -1556,6 +1583,30 @@ unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
return Mask;
}
/// CommuteVectorShuffleIfNeeded - Swap vector_shuffle operands (as well as
/// values in ther permute mask if needed. Return an empty SDOperand is it is
/// already well formed.
static SDOperand CommuteVectorShuffleIfNeeded(SDOperand V1, SDOperand V2,
SDOperand Mask, MVT::ValueType VT,
SelectionDAG &DAG) {
unsigned NumElems = Mask.getNumOperands();
SDOperand Half1 = Mask.getOperand(0);
SDOperand Half2 = Mask.getOperand(NumElems/2);
if (cast<ConstantSDNode>(Half1)->getValue() >= NumElems &&
cast<ConstantSDNode>(Half2)->getValue() < NumElems) {
// Swap the operands and change mask.
std::vector<SDOperand> MaskVec;
for (unsigned i = NumElems / 2; i != NumElems; ++i)
MaskVec.push_back(Mask.getOperand(i));
for (unsigned i = 0; i != NumElems / 2; ++i)
MaskVec.push_back(Mask.getOperand(i));
Mask =
DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), MaskVec);
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
}
return SDOperand();
}
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
@ -2336,11 +2387,10 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
unsigned NumElems = PermMask.getNumOperands();
// All v2f64 cases are handled.
if (NumElems == 2) return SDOperand();
// Handle splat cases.
if (X86::isSplatMask(PermMask.Val)) {
if (NumElems == 2)
return CommuteVectorShuffleIfNeeded(V1, V2, PermMask, VT, DAG);
else if (X86::isSplatMask(PermMask.Val)) {
// Handle splat cases.
if (V2.getOpcode() == ISD::UNDEF)
// Leave the VECTOR_SHUFFLE alone. It matches SHUFP*.
return SDOperand();
@ -2350,7 +2400,8 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
DAG.getNode(ISD::UNDEF, V1.getValueType()),
PermMask);
} else if (X86::isUNPCKLMask(PermMask.Val)) {
} else if (X86::isUNPCKLMask(PermMask.Val) ||
X86::isUNPCKHMask(PermMask.Val)) {
// Leave the VECTOR_SHUFFLE alone. It matches {P}UNPCKL*.
return SDOperand();
} else if (X86::isPSHUFDMask(PermMask.Val)) {
@ -2362,21 +2413,8 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
DAG.getNode(ISD::UNDEF, V1.getValueType()),
PermMask);
} else if (X86::isSHUFPMask(PermMask.Val)) {
SDOperand Elt = PermMask.getOperand(0);
if (cast<ConstantSDNode>(Elt)->getValue() >= NumElems) {
// Swap the operands and change mask.
std::vector<SDOperand> MaskVec;
for (unsigned i = NumElems / 2; i != NumElems; ++i)
MaskVec.push_back(PermMask.getOperand(i));
for (unsigned i = 0; i != NumElems / 2; ++i)
MaskVec.push_back(PermMask.getOperand(i));
PermMask =
DAG.getNode(ISD::BUILD_VECTOR, PermMask.getValueType(), MaskVec);
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, PermMask);
}
return SDOperand();
}
} else if (X86::isSHUFPMask(PermMask.Val))
return CommuteVectorShuffleIfNeeded(V1, V2, PermMask, VT, DAG);
assert(0 && "Unexpected VECTOR_SHUFFLE to lower");
abort();

View File

@ -188,6 +188,10 @@ namespace llvm {
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
bool isSHUFPMask(SDNode *N);
/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVLHPSMask(SDNode *N);
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVHLPSMask(SDNode *N);

View File

@ -63,6 +63,10 @@ def MOVLHPS_splat_mask : PatLeaf<(build_vector), [{
return X86::isSplatMask(N);
}]>;
def MOVLHPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVLHPSMask(N);
}]>;
def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVHLPSMask(N);
}]>;
@ -492,13 +496,13 @@ let isTwoAddress = 1 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movlhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
UNPCKL_shuffle_mask)))]>;
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
MOVLHPS_shuffle_mask)))]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movlhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
MOVHLPS_shuffle_mask)))]>;
}
@ -1204,17 +1208,3 @@ def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), PSHUFD_shuffle_mask:$sm),
def : Pat<(vector_shuffle (v4i32 VR128:$src), (undef), PSHUFD_shuffle_mask:$sm),
(v4i32 (PSHUFDrr VR128:$src, PSHUFD_shuffle_mask:$sm))>,
Requires<[HasSSE2]>;
// Shuffle v2i64
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (v2i64 VR128:$src2),
UNPCKL_shuffle_mask:$sm),
(v2i64 (MOVLHPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (v2i64 VR128:$src2),
MOVHLPS_shuffle_mask:$sm),
(v2i64 (MOVHLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (load addr:$src2),
UNPCKL_shuffle_mask:$sm),
(v2i64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (load addr:$src2),
UNPCKH_shuffle_mask:$sm),
(v2i64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;