diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 5649d6461e5..e9178f217a3 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -277,7 +277,7 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) setOperationAction(ISD::SUB, MVT::v4f32, Legal); setOperationAction(ISD::MUL, MVT::v4f32, Legal); setOperationAction(ISD::LOAD, MVT::v4f32, Legal); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Expand); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); } @@ -300,10 +300,11 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) setOperationAction(ISD::LOAD, MVT::v8i16, Legal); setOperationAction(ISD::LOAD, MVT::v4i32, Legal); setOperationAction(ISD::LOAD, MVT::v2i64, Legal); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Expand); - setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Expand); - setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Expand); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Expand); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); @@ -1529,6 +1530,23 @@ unsigned X86::getShuffleSHUFImmediate(SDNode *N) { return Mask; } +/// isZeroVector - Return true if all elements of BUILD_VECTOR are 0 or +0.0. +bool X86::isZeroVector(SDNode *N) { + for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); + I != E; ++I) { + if (ConstantFPSDNode *FPC = dyn_cast(*I)) { + if (!FPC->isExactlyValue(+0.0)) + return false; + } else if (ConstantSDNode *C = dyn_cast(*I)) { + if (!C->isNullValue()) + return false; + } else + return false; + } + + return true; +} + /// LowerOperation - Provide custom lowering hooks for some operations. /// SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { @@ -2348,10 +2366,28 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { return SDOperand(); } - // TODO. - assert(0 && "TODO"); + assert(0 && "Unexpected VECTOR_SHUFFLE to lower"); abort(); } + case ISD::BUILD_VECTOR: { + bool isZero = true; + unsigned NumElems = Op.getNumOperands(); + for (unsigned i = 0; i < NumElems; ++i) { + SDOperand V = Op.getOperand(i); + if (ConstantFPSDNode *FPC = dyn_cast(V)) { + if (!FPC->isExactlyValue(+0.0)) + isZero = false; + } else if (ConstantSDNode *C = dyn_cast(V)) { + if (!C->isNullValue()) + isZero = false; + } else + isZero = false; + } + + if (isZero) + return Op; + return SDOperand(); + } } } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index bc4a7461d1d..854f76da2be 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -208,6 +208,9 @@ namespace llvm { /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* /// instructions. unsigned getShuffleSHUFImmediate(SDNode *N); + + /// isZeroVector - Return true if all elements of BUILD_VECTOR are 0 or +0.0. + bool isZeroVector(SDNode *N); } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index 03b615cddb8..3283ed6b479 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -50,10 +50,6 @@ def X86fp_to_i64mem : SDNode<"X86ISD::FP_TO_INT64_IN_MEM", SDTX86FpToIMem, // FPStack pattern fragments //===----------------------------------------------------------------------===// -def fp32imm0 : PatLeaf<(f32 fpimm), [{ - return N->isExactlyValue(+0.0); -}]>; - def fp64imm0 : PatLeaf<(f64 fpimm), [{ return N->isExactlyValue(+0.0); }]>; diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index e2ec85df82f..2011f1e5a8f 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -45,6 +45,14 @@ def loadv8i16 : PatFrag<(ops node:$ptr), (v8i16 (load node:$ptr))>; def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>; def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; +def fp32imm0 : PatLeaf<(f32 fpimm), [{ + return N->isExactlyValue(+0.0); +}]>; + +def vecimm0 : PatLeaf<(build_vector), [{ + return X86::isZeroVector(N); +}]>; + // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*, // SHUFP* etc. imm. def SHUFFLE_get_shuf_imm : SDNodeXForm, + Requires<[HasSSE2]>, TB, OpSize; +def VZEROv8i16 : I<0xEF, MRMInitReg, (ops VR128:$dst), + "pxor $dst, $dst", [(set VR128:$dst, (v8i16 vecimm0))]>, + Requires<[HasSSE2]>, TB, OpSize; +def VZEROv4i32 : I<0xEF, MRMInitReg, (ops VR128:$dst), + "pxor $dst, $dst", [(set VR128:$dst, (v4i32 vecimm0))]>, + Requires<[HasSSE2]>, TB, OpSize; +def VZEROv2i64 : I<0xEF, MRMInitReg, (ops VR128:$dst), + "pxor $dst, $dst", [(set VR128:$dst, (v2i64 vecimm0))]>, + Requires<[HasSSE2]>, TB, OpSize; +def VZEROv4f32 : PSI<0x57, MRMInitReg, (ops VR128:$dst), + "xorps $dst, $dst", [(set VR128:$dst, (v4f32 vecimm0))]>; +def VZEROv2f64 : PDI<0x57, MRMInitReg, (ops VR128:$dst), + "xorpd $dst, $dst", [(set VR128:$dst, (v2f64 vecimm0))]>; + def FR32ToV4F32 : PSI<0x28, MRMSrcReg, (ops VR128:$dst, FR32:$src), "movaps {$src, $dst|$dst, $src}", [(set VR128:$dst,