diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b276e83c9a2..57a13ac20f2 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -640,6 +640,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SELECT, MVT::v4i16, Promote); setOperationAction(ISD::SELECT, MVT::v2i32, Promote); setOperationAction(ISD::SELECT, MVT::v1i64, Custom); + setOperationAction(ISD::VSETCC, MVT::v8i8, Custom); + setOperationAction(ISD::VSETCC, MVT::v4i16, Custom); + setOperationAction(ISD::VSETCC, MVT::v2i32, Custom); } if (!UseSoftFloat && Subtarget->hasSSE1()) { @@ -5482,8 +5485,11 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) { switch (VT.getSimpleVT()) { default: break; + case MVT::v8i8: case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; + case MVT::v4i16: case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; + case MVT::v2i32: case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; } diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index fd00b08383f..e7fea06d178 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -657,6 +657,33 @@ def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src), (iPTR 0))))), (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>; +// Patterns for vector comparisons +def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, VR64:$src2)), + (MMX_PCMPEQBrr VR64:$src1, VR64:$src2)>; +def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPEQBrm VR64:$src1, addr:$src2)>; +def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, VR64:$src2)), + (MMX_PCMPEQWrr VR64:$src1, VR64:$src2)>; +def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPEQWrm VR64:$src1, addr:$src2)>; +def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, VR64:$src2)), + (MMX_PCMPEQDrr VR64:$src1, VR64:$src2)>; +def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPEQDrm VR64:$src1, addr:$src2)>; + +def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, VR64:$src2)), + (MMX_PCMPGTBrr VR64:$src1, VR64:$src2)>; +def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPGTBrm VR64:$src1, addr:$src2)>; +def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, VR64:$src2)), + (MMX_PCMPGTWrr VR64:$src1, VR64:$src2)>; +def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPGTWrm VR64:$src1, addr:$src2)>; +def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, VR64:$src2)), + (MMX_PCMPGTDrr VR64:$src1, VR64:$src2)>; +def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, (bitconvert (load_mmx addr:$src2)))), + (MMX_PCMPGTDrm VR64:$src1, addr:$src2)>; + // CMOV* - Used to implement the SELECT DAG operation. Expanded by the // scheduler into a branch sequence. // These are expanded by the scheduler.