diff --git a/include/llvm/IntrinsicsCellSPU.td b/include/llvm/IntrinsicsCellSPU.td index 7030278708e..dde5aff2fe0 100644 --- a/include/llvm/IntrinsicsCellSPU.td +++ b/include/llvm/IntrinsicsCellSPU.td @@ -20,6 +20,9 @@ def cell_i7_ty: LLVMType; def cell_i8_ty: LLVMType; +// Keep this here until it's actually supported: +def llvm_i128_ty : LLVMType; + class v16i8_u7imm : GCCBuiltin, Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, cell_i7_ty], @@ -158,9 +161,17 @@ let TargetPrefix = "spu" in { [IntrNoMem]>; def int_spu_si_shli: v4i32_u7imm<"shli">; - def int_spu_si_shlqbi: v16i8_rr<"shlqbi">; + + def int_spu_si_shlqbi: + GCCBuiltin, + Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_spu_si_shlqbii: v16i8_u7imm<"shlqbii">; - def int_spu_si_shlqby: v16i8_rr<"shlqby">; + def int_spu_si_shlqby: + GCCBuiltin, + Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; def int_spu_si_shlqbyi: v16i8_u7imm<"shlqbyi">; def int_spu_si_ceq: v4i32_rr<"ceq">; diff --git a/lib/Target/CellSPU/CellSDKIntrinsics.td b/lib/Target/CellSPU/CellSDKIntrinsics.td index 2f453b1feb1..5d759a41c2c 100644 --- a/lib/Target/CellSPU/CellSDKIntrinsics.td +++ b/lib/Target/CellSPU/CellSDKIntrinsics.td @@ -210,20 +210,20 @@ def CellSDKshli: (SHLIv4i32 VECREG:$rA, uimm7:$val)>; def CellSDKshlqbi: - Pat<(int_spu_si_shlqbi VECREG:$rA, VECREG:$rB), - (SHLQBIvec VECREG:$rA, VECREG:$rB)>; + Pat<(int_spu_si_shlqbi VECREG:$rA, R32C:$rB), + (SHLQBIv16i8 VECREG:$rA, R32C:$rB)>; def CellSDKshlqii: Pat<(int_spu_si_shlqbii VECREG:$rA, uimm7:$val), - (SHLQBIIvec VECREG:$rA, uimm7:$val)>; + (SHLQBIIv16i8 VECREG:$rA, uimm7:$val)>; def CellSDKshlqby: - Pat<(int_spu_si_shlqby VECREG:$rA, VECREG:$rB), - (SHLQBYvec VECREG:$rA, VECREG:$rB)>; + Pat<(int_spu_si_shlqby VECREG:$rA, R32C:$rB), + (SHLQBYv16i8 VECREG:$rA, R32C:$rB)>; def CellSDKshlqbyi: Pat<(int_spu_si_shlqbyi VECREG:$rA, uimm7:$val), - (SHLQBYIvec VECREG:$rA, uimm7:$val)>; + (SHLQBYIv16i8 VECREG:$rA, uimm7:$val)>; //===----------------------------------------------------------------------===// // Branch/compare intrinsics: diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index 8bde66300a1..6fad71471dc 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -166,24 +166,25 @@ namespace { struct valtype_map_s { MVT::ValueType VT; unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined) + bool ldresult_imm; /// LDRESULT instruction requires immediate? int prefslot_byte; /// Byte offset of the "preferred" slot }; const valtype_map_s valtype_map[] = { - { MVT::i1, 0, 3 }, - { MVT::i8, SPU::ORBIr8, 3 }, - { MVT::i16, SPU::ORHIr16, 2 }, - { MVT::i32, SPU::ORIr32, 0 }, - { MVT::i64, SPU::ORIr64, 0 }, - { MVT::f32, 0, 0 }, - { MVT::f64, 0, 0 }, + { MVT::i1, 0, false, 3 }, + { MVT::i8, SPU::ORBIr8, true, 3 }, + { MVT::i16, SPU::ORHIr16, true, 2 }, + { MVT::i32, SPU::ORIr32, true, 0 }, + { MVT::i64, SPU::ORr64, false, 0 }, + { MVT::f32, SPU::ORf32, false, 0 }, + { MVT::f64, SPU::ORf64, false, 0 }, // vector types... (sigh!) - { MVT::v16i8, 0, 0 }, - { MVT::v8i16, 0, 0 }, - { MVT::v4i32, 0, 0 }, - { MVT::v2i64, 0, 0 }, - { MVT::v4f32, 0, 0 }, - { MVT::v2f64, 0, 0 } + { MVT::v16i8, 0, false, 0 }, + { MVT::v8i16, 0, false, 0 }, + { MVT::v4i32, 0, false, 0 }, + { MVT::v2i64, 0, false, 0 }, + { MVT::v4f32, 0, false, 0 }, + { MVT::v2f64, 0, false, 0 } }; const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]); @@ -603,7 +604,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { // to i8, then i8 to i16 in logical/branching operations. DEBUG(cerr << "CellSPU: Coalescing (zero_extend:i16 (and:i8 " ", ))\n"); - NewOpc = SPU::ANDHI1To2; + NewOpc = SPU::ANDHIi8i16; Ops[0] = Op1.getOperand(0); Ops[1] = Op1.getOperand(1); n_ops = 2; @@ -615,24 +616,23 @@ SPUDAGToDAGISel::Select(SDOperand Op) { SDOperand Arg = N->getOperand(0); SDOperand Chain = N->getOperand(1); SDNode *Result; + const valtype_map_s *vtm = getValueTypeMapEntry(VT); + + if (vtm->ldresult_ins == 0) { + cerr << "LDRESULT for unsupported type: " + << MVT::getValueTypeString(VT) + << "\n"; + abort(); + } AddToISelQueue(Arg); - if (!MVT::isFloatingPoint(VT)) { + Opc = vtm->ldresult_ins; + if (vtm->ldresult_imm) { SDOperand Zero = CurDAG->getTargetConstant(0, VT); - const valtype_map_s *vtm = getValueTypeMapEntry(VT); - - if (vtm->ldresult_ins == 0) { - cerr << "LDRESULT for unsupported type: " - << MVT::getValueTypeString(VT) - << "\n"; - abort(); - } else - Opc = vtm->ldresult_ins; AddToISelQueue(Zero); Result = CurDAG->getTargetNode(Opc, VT, MVT::Other, Arg, Zero, Chain); } else { - Opc = (VT == MVT::f32 ? SPU::ORf32 : SPU::ORf64); Result = CurDAG->getTargetNode(Opc, MVT::Other, Arg, Arg, Chain); } diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 23c860ab754..99243d3a62b 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -216,6 +216,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setOperationAction(ISD::SHL, MVT::i8, Custom); setOperationAction(ISD::SRL, MVT::i8, Custom); setOperationAction(ISD::SRA, MVT::i8, Custom); + // And SPU needs custom lowering for shift left/right for i64 + setOperationAction(ISD::SHL, MVT::i64, Custom); + setOperationAction(ISD::SRL, MVT::i64, Custom); + setOperationAction(ISD::SRA, MVT::i64, Custom); // Custom lower i32 multiplications setOperationAction(ISD::MUL, MVT::i32, Custom); @@ -248,13 +252,11 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setOperationAction(ISD::SELECT, MVT::f32, Expand); setOperationAction(ISD::SELECT, MVT::f64, Expand); - setOperationAction(ISD::SETCC, MVT::i1, Expand); - setOperationAction(ISD::SETCC, MVT::i8, Expand); - setOperationAction(ISD::SETCC, MVT::i16, Expand); - setOperationAction(ISD::SETCC, MVT::i32, Expand); - setOperationAction(ISD::SETCC, MVT::i64, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Expand); - setOperationAction(ISD::SETCC, MVT::f64, Expand); + // Zero extension and sign extension for i64 have to be + // custom legalized + setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom); + setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom); + setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom); // SPU has a legal FP -> signed INT instruction setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); @@ -377,6 +379,9 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) // We have target-specific dag combine patterns for the following nodes: setTargetDAGCombine(ISD::ADD); + setTargetDAGCombine(ISD::ZERO_EXTEND); + setTargetDAGCombine(ISD::SIGN_EXTEND); + setTargetDAGCombine(ISD::ANY_EXTEND); computeRegisterProperties(); } @@ -407,13 +412,17 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const node_names[(unsigned) SPUISD::MPYU] = "SPUISD::MPYU"; node_names[(unsigned) SPUISD::MPYH] = "SPUISD::MPYH"; node_names[(unsigned) SPUISD::MPYHH] = "SPUISD::MPYHH"; + node_names[(unsigned) SPUISD::SHLQUAD_L_BITS] = "SPUISD::SHLQUAD_L_BITS"; + node_names[(unsigned) SPUISD::SHLQUAD_L_BYTES] = "SPUISD::SHLQUAD_L_BYTES"; node_names[(unsigned) SPUISD::VEC_SHL] = "SPUISD::VEC_SHL"; node_names[(unsigned) SPUISD::VEC_SRL] = "SPUISD::VEC_SRL"; node_names[(unsigned) SPUISD::VEC_SRA] = "SPUISD::VEC_SRA"; node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL"; node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR"; - node_names[(unsigned) SPUISD::ROTBYTES_RIGHT_Z] = - "SPUISD::ROTBYTES_RIGHT_Z"; + node_names[(unsigned) SPUISD::ROTQUAD_RZ_BYTES] = + "SPUISD::ROTQUAD_RZ_BYTES"; + node_names[(unsigned) SPUISD::ROTQUAD_RZ_BITS] = + "SPUISD::ROTQUAD_RZ_BITS"; node_names[(unsigned) SPUISD::ROTBYTES_RIGHT_S] = "SPUISD::ROTBYTES_RIGHT_S"; node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT"; @@ -708,15 +717,7 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { && basePtr.getOperand(0).getOpcode() == SPUISD::IndirectAddr)) { insertEltPtr = basePtr; } else { -#if 0 - // $sp is always aligned, so use it when necessary to avoid loading - // an address - SDOperand ptrP = - basePtr.Val->hasOneUse() ? DAG.getRegister(SPU::R1, PtrVT) : basePtr; - insertEltPtr = DAG.getNode(ISD::ADD, PtrVT, ptrP, insertEltOffs); -#else insertEltPtr = DAG.getNode(ISD::ADD, PtrVT, basePtr, insertEltOffs); -#endif } insertEltOp = DAG.getNode(SPUISD::INSERT_MASK, stVecVT, insertEltPtr); @@ -762,14 +763,9 @@ LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // Just return the SDOperand with the constant pool address in it. return DAG.getNode(SPUISD::AFormAddr, PtrVT, CPI, Zero); } else { -#if 1 SDOperand Hi = DAG.getNode(SPUISD::Hi, PtrVT, CPI, Zero); SDOperand Lo = DAG.getNode(SPUISD::Lo, PtrVT, CPI, Zero); - - return DAG.getNode(ISD::ADD, PtrVT, Lo, Hi); -#else - return DAG.getNode(SPUISD::IndirectAddr, PtrVT, CPI, Zero); -#endif + return DAG.getNode(SPUISD::IndirectAddr, PtrVT, Hi, Lo); } } @@ -787,10 +783,13 @@ LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { const TargetMachine &TM = DAG.getTarget(); if (TM.getRelocationModel() == Reloc::Static) { - SDOperand JmpAForm = DAG.getNode(SPUISD::AFormAddr, PtrVT, JTI, Zero); - return (!ST->usingLargeMem() - ? JmpAForm - : DAG.getNode(SPUISD::IndirectAddr, PtrVT, JmpAForm, Zero)); + if (!ST->usingLargeMem()) { + return DAG.getNode(SPUISD::AFormAddr, PtrVT, JTI, Zero); + } else { + SDOperand Hi = DAG.getNode(SPUISD::Hi, PtrVT, JTI, Zero); + SDOperand Lo = DAG.getNode(SPUISD::Lo, PtrVT, JTI, Zero); + return DAG.getNode(SPUISD::IndirectAddr, PtrVT, Hi, Lo); + } } assert(0 && @@ -1807,7 +1806,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { else SrcElt = cast(PermMask.getOperand(i))->getValue(); - for (unsigned j = 0; j != BytesPerElement; ++j) { + for (unsigned j = 0; j < BytesPerElement; ++j) { ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, MVT::i8)); } @@ -1925,17 +1924,10 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { // is to break it all apart, sign extend, and reassemble the various // intermediate products. case MVT::v16i8: { - MachineFunction &MF = DAG.getMachineFunction(); - MachineRegisterInfo &RegInfo = MF.getRegInfo(); - SDOperand Chain = Op.getOperand(0); SDOperand rA = Op.getOperand(0); SDOperand rB = Op.getOperand(1); - SDOperand c8 = DAG.getConstant(8, MVT::i8); - SDOperand c16 = DAG.getConstant(16, MVT::i8); - - unsigned FSMBreg_2222 = RegInfo.createVirtualRegister(&SPU::VECREGRegClass); - unsigned LoProd_reg = RegInfo.createVirtualRegister(&SPU::VECREGRegClass); - unsigned HiProd_reg = RegInfo.createVirtualRegister(&SPU::VECREGRegClass); + SDOperand c8 = DAG.getConstant(8, MVT::i32); + SDOperand c16 = DAG.getConstant(16, MVT::i32); SDOperand LLProd = DAG.getNode(SPUISD::MPY, MVT::v8i16, @@ -1950,24 +1942,19 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8); - SDOperand FSMBdef_2222 = - DAG.getCopyToReg(Chain, FSMBreg_2222, - DAG.getNode(SPUISD::FSMBI, MVT::v8i16, - DAG.getConstant(0x2222, MVT::i32))); + SDOperand FSMBmask = DAG.getNode(SPUISD::FSMBI, MVT::v8i16, + DAG.getConstant(0x2222, MVT::i32)); - SDOperand FSMBuse_2222 = - DAG.getCopyFromReg(FSMBdef_2222, FSMBreg_2222, MVT::v4i32); - - SDOperand LoProd_1 = - DAG.getCopyToReg(Chain, LoProd_reg, - DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd, - FSMBuse_2222)); + SDOperand LoProdParts = + DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, + DAG.getNode(SPUISD::SELB, MVT::v8i16, + LLProd, LHProd, FSMBmask)); SDOperand LoProdMask = DAG.getConstant(0xffff, MVT::i32); SDOperand LoProd = DAG.getNode(ISD::AND, MVT::v4i32, - DAG.getCopyFromReg(LoProd_1, LoProd_reg, MVT::v4i32), + LoProdParts, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, LoProdMask, LoProdMask, LoProdMask, LoProdMask)); @@ -1993,15 +1980,13 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8))); SDOperand HHProd = - DAG.getCopyToReg(Chain, HiProd_reg, - DAG.getNode(SPUISD::SELB, MVT::v8i16, - HLProd, - DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8), - FSMBuse_2222)); + DAG.getNode(SPUISD::SELB, MVT::v8i16, + HLProd, + DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8), + FSMBmask); SDOperand HiProd = - DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32, - DAG.getCopyFromReg(HHProd, HiProd_reg, MVT::v4i32), c16); + DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32, HHProd, c16); return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, DAG.getNode(ISD::OR, MVT::v4i32, @@ -2168,7 +2153,8 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return result; } -static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) { +static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) +{ SDOperand N0 = Op.getOperand(0); // Everything has at least one operand assert(Op.getValueType() == MVT::i8); @@ -2254,6 +2240,87 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) { return SDOperand(); } +static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) +{ + MVT::ValueType VT = Op.getValueType(); + unsigned VecVT = + MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT))); + + SDOperand Op0 = Op.getOperand(0); + + switch (Opc) { + case ISD::ZERO_EXTEND: + case ISD::SIGN_EXTEND: + case ISD::ANY_EXTEND: { + MVT::ValueType Op0VT = Op0.getValueType(); + unsigned Op0VecVT = + MVT::getVectorType(Op0VT, (128 / MVT::getSizeInBits(Op0VT))); + + assert(Op0VT == MVT::i32 + && "CellSPU: Zero/sign extending something other than i32"); + + unsigned NewOpc = (Opc == ISD::SIGN_EXTEND + ? SPUISD::ROTBYTES_RIGHT_S + : SPUISD::ROTQUAD_RZ_BYTES); + SDOperand PromoteScalar = + DAG.getNode(SPUISD::PROMOTE_SCALAR, Op0VecVT, Op0); + + return DAG.getNode(SPUISD::EXTRACT_ELT0, VT, + DAG.getNode(ISD::BIT_CONVERT, VecVT, + DAG.getNode(NewOpc, Op0VecVT, + PromoteScalar, + DAG.getConstant(4, MVT::i32)))); + } + + case ISD::SHL: { + SDOperand ShiftAmt = Op.getOperand(1); + unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + SDOperand Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0); + SDOperand MaskLower = + DAG.getNode(SPUISD::SELB, VecVT, + Op0Vec, + DAG.getConstant(0, VecVT), + DAG.getNode(SPUISD::FSMBI, VecVT, + DAG.getConstant(0xff00ULL, MVT::i16))); + SDOperand ShiftAmtBytes = + DAG.getNode(ISD::SRL, ShiftAmtVT, + ShiftAmt, + DAG.getConstant(3, ShiftAmtVT)); + SDOperand ShiftAmtBits = + DAG.getNode(ISD::AND, ShiftAmtVT, + ShiftAmt, + DAG.getConstant(7, ShiftAmtVT)); + + return DAG.getNode(SPUISD::EXTRACT_ELT0, VT, + DAG.getNode(SPUISD::SHLQUAD_L_BITS, VecVT, + DAG.getNode(SPUISD::SHLQUAD_L_BYTES, VecVT, + MaskLower, ShiftAmtBytes), + ShiftAmtBits)); + } + + case ISD::SRL: { + unsigned VT = unsigned(Op.getValueType()); + SDOperand ShiftAmt = Op.getOperand(1); + unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType()); + SDOperand ShiftAmtBytes = + DAG.getNode(ISD::SRL, ShiftAmtVT, + ShiftAmt, + DAG.getConstant(3, ShiftAmtVT)); + SDOperand ShiftAmtBits = + DAG.getNode(ISD::AND, ShiftAmtVT, + ShiftAmt, + DAG.getConstant(7, ShiftAmtVT)); + + return DAG.getNode(SPUISD::ROTQUAD_RZ_BITS, VT, + DAG.getNode(SPUISD::ROTQUAD_RZ_BYTES, VT, + Op0, ShiftAmtBytes), + ShiftAmtBits); + } + } + + return SDOperand(); +} + //! Lower byte immediate operations for v16i8 vectors: static SDOperand LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { @@ -2438,10 +2505,13 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { SDOperand SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { - switch (Op.getOpcode()) { + unsigned Opc = (unsigned) Op.getOpcode(); + unsigned VT = (unsigned) Op.getValueType(); + + switch (Opc) { default: { cerr << "SPUTargetLowering::LowerOperation(): need to lower this!\n"; - cerr << "Op.getOpcode() = " << Op.getOpcode() << "\n"; + cerr << "Op.getOpcode() = " << Opc << "\n"; cerr << "*Op.Val:\n"; Op.Val->dump(); abort(); @@ -2471,14 +2541,22 @@ SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); - // i8 math ops: + + // i8, i64 math ops: + case ISD::ZERO_EXTEND: + case ISD::SIGN_EXTEND: + case ISD::ANY_EXTEND: case ISD::SUB: case ISD::ROTR: case ISD::ROTL: case ISD::SRL: case ISD::SHL: case ISD::SRA: - return LowerI8Math(Op, DAG, Op.getOpcode()); + if (VT == MVT::i8) + return LowerI8Math(Op, DAG, Opc); + else if (VT == MVT::i64) + return LowerI64Math(Op, DAG, Opc); + break; // Vector-related lowering. case ISD::BUILD_VECTOR: @@ -2500,15 +2578,15 @@ SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) // Vector and i8 multiply: case ISD::MUL: - if (MVT::isVector(Op.getValueType())) + if (MVT::isVector(VT)) return LowerVectorMUL(Op, DAG); - else if (Op.getValueType() == MVT::i8) - return LowerI8Math(Op, DAG, Op.getOpcode()); + else if (VT == MVT::i8) + return LowerI8Math(Op, DAG, Opc); else - return LowerMUL(Op, DAG, Op.getValueType(), Op.getOpcode()); + return LowerMUL(Op, DAG, VT, Opc); case ISD::FDIV: - if (Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::v4f32) + if (VT == MVT::f32 || VT == MVT::v4f32) return LowerFDIVf32(Op, DAG); // else if (Op.getValueType() == MVT::f64) // return LowerFDIVf64(Op, DAG); @@ -2534,29 +2612,12 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const #endif const SPUSubtarget *ST = SPUTM.getSubtargetImpl(); SelectionDAG &DAG = DCI.DAG; - SDOperand N0 = N->getOperand(0); // everything has at least one operand + SDOperand Op0 = N->getOperand(0); // everything has at least one operand + SDOperand Result; // Initially, NULL result switch (N->getOpcode()) { default: break; - case SPUISD::IndirectAddr: { - if (!ST->usingLargeMem() && N0.getOpcode() == SPUISD::AFormAddr) { - ConstantSDNode *CN = cast(N->getOperand(1)); - if (CN->getValue() == 0) { - // (SPUindirect (SPUaform , 0), 0) -> - // (SPUaform , 0) - - DEBUG(cerr << "Replace: "); - DEBUG(N->dump(&DAG)); - DEBUG(cerr << "\nWith: "); - DEBUG(N0.Val->dump(&DAG)); - DEBUG(cerr << "\n"); - - return N0; - } - } - } case ISD::ADD: { - SDOperand Op0 = N->getOperand(0); SDOperand Op1 = N->getOperand(1); if ((Op1.getOpcode() == ISD::Constant @@ -2603,10 +2664,104 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const Op1.getOperand(0), combinedConst); } } + break; + } + case ISD::SIGN_EXTEND: + case ISD::ZERO_EXTEND: + case ISD::ANY_EXTEND: { + if (Op0.getOpcode() == SPUISD::EXTRACT_ELT0 && + N->getValueType(0) == Op0.getValueType()) { + // (any_extend (SPUextract_elt0 )) -> + // (SPUextract_elt0 ) + // Types must match, however... + DEBUG(cerr << "Replace: "); + DEBUG(N->dump(&DAG)); + DEBUG(cerr << "\nWith: "); + DEBUG(Op0.Val->dump(&DAG)); + DEBUG(cerr << "\n"); + + return Op0; + } + break; + } + case SPUISD::IndirectAddr: { + if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) { + ConstantSDNode *CN = cast(N->getOperand(1)); + if (CN->getValue() == 0) { + // (SPUindirect (SPUaform , 0), 0) -> + // (SPUaform , 0) + + DEBUG(cerr << "Replace: "); + DEBUG(N->dump(&DAG)); + DEBUG(cerr << "\nWith: "); + DEBUG(Op0.Val->dump(&DAG)); + DEBUG(cerr << "\n"); + + return Op0; + } + } + break; + } + case SPUISD::SHLQUAD_L_BITS: + case SPUISD::SHLQUAD_L_BYTES: + case SPUISD::VEC_SHL: + case SPUISD::VEC_SRL: + case SPUISD::VEC_SRA: + case SPUISD::ROTQUAD_RZ_BYTES: + case SPUISD::ROTQUAD_RZ_BITS: { + SDOperand Op1 = N->getOperand(1); + + if (isa(Op1)) { + // Kill degenerate vector shifts: + ConstantSDNode *CN = cast(Op1); + + if (CN->getValue() == 0) { + Result = Op0; + } + } + break; + } + case SPUISD::PROMOTE_SCALAR: { + switch (Op0.getOpcode()) { + default: + break; + case ISD::ANY_EXTEND: + case ISD::ZERO_EXTEND: + case ISD::SIGN_EXTEND: { + // (SPUpromote_scalar (any|sign|zero_extend (SPUextract_elt0 ))) -> + // + // but only if the SPUpromote_scalar and types match. + SDOperand Op00 = Op0.getOperand(0); + if (Op00.getOpcode() == SPUISD::EXTRACT_ELT0) { + SDOperand Op000 = Op00.getOperand(0); + if (Op000.getValueType() == N->getValueType(0)) { + Result = Op000; + } + } + break; + } + case SPUISD::EXTRACT_ELT0: { + // (SPUpromote_scalar (SPUextract_elt0 )) -> + // + Result = Op0.getOperand(0); + break; + } + } + break; } } // Otherwise, return unchanged. - return SDOperand(); +#if 0 + if (Result.Val) { + DEBUG(cerr << "\nReplace.SPU: "); + DEBUG(N->dump(&DAG)); + DEBUG(cerr << "\nWith: "); + DEBUG(Result.Val->dump(&DAG)); + DEBUG(cerr << "\n"); + } +#endif + + return Result; } //===----------------------------------------------------------------------===// @@ -2657,6 +2812,7 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } +//! Compute used/known bits for a SPU operand void SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, const APInt &Mask, @@ -2664,7 +2820,66 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth ) const { - KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); + const uint64_t uint64_sizebits = sizeof(uint64_t) * 8; + + switch (Op.getOpcode()) { + default: + // KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); + break; + +#if 0 + case CALL: + case SHUFB: + case INSERT_MASK: + case CNTB: +#endif + + case SPUISD::PROMOTE_SCALAR: { + SDOperand Op0 = Op.getOperand(0); + uint64_t InMask = MVT::getIntVTBitMask(Op0.getValueType()); + KnownZero |= APInt(uint64_sizebits, ~InMask, false); + KnownOne |= APInt(uint64_sizebits, InMask, false); + break; + } + + case SPUISD::LDRESULT: + case SPUISD::EXTRACT_ELT0: + case SPUISD::EXTRACT_ELT0_CHAINED: { + uint64_t InMask = MVT::getIntVTBitMask(Op.getValueType()); + KnownZero |= APInt(uint64_sizebits, ~InMask, false); + KnownOne |= APInt(uint64_sizebits, InMask, false); + break; + } + +#if 0 + case EXTRACT_I1_ZEXT: + case EXTRACT_I1_SEXT: + case EXTRACT_I8_ZEXT: + case EXTRACT_I8_SEXT: + case MPY: + case MPYU: + case MPYH: + case MPYHH: + case SHLQUAD_L_BITS: + case SHLQUAD_L_BYTES: + case VEC_SHL: + case VEC_SRL: + case VEC_SRA: + case VEC_ROTL: + case VEC_ROTR: + case ROTQUAD_RZ_BYTES: + case ROTQUAD_RZ_BITS: + case ROTBYTES_RIGHT_S: + case ROTBYTES_LEFT: + case ROTBYTES_LEFT_CHAINED: + case FSMBI: + case SELB: + case SFPConstant: + case FPInterp: + case FPRecipEst: + case SEXT32TO64: +#endif + } } // LowerAsmOperandForConstraint diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index 847c668d709..a6147bff9b1 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -50,12 +50,15 @@ namespace llvm { MPYU, ///< Multiply Unsigned MPYH, ///< Multiply High MPYHH, ///< Multiply High-High + SHLQUAD_L_BITS, ///< Rotate quad left, by bits + SHLQUAD_L_BYTES, ///< Rotate quad left, by bytes VEC_SHL, ///< Vector shift left VEC_SRL, ///< Vector shift right (logical) VEC_SRA, ///< Vector shift right (arithmetic) VEC_ROTL, ///< Vector rotate left VEC_ROTR, ///< Vector rotate right - ROTBYTES_RIGHT_Z, ///< Vector rotate right, by bytes, zero fill + ROTQUAD_RZ_BYTES, ///< Rotate quad right, by bytes, zero fill + ROTQUAD_RZ_BITS, ///< Rotate quad right, by bits, zero fill ROTBYTES_RIGHT_S, ///< Vector rotate right, by bytes, sign fill ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI) ROTBYTES_LEFT_CHAINED, ///< Rotate bytes (loads -> ROTQBYI), with chain diff --git a/lib/Target/CellSPU/SPUInstrFormats.td b/lib/Target/CellSPU/SPUInstrFormats.td index eda1ab3da47..f423dfa3420 100644 --- a/lib/Target/CellSPU/SPUInstrFormats.td +++ b/lib/Target/CellSPU/SPUInstrFormats.td @@ -14,7 +14,7 @@ // This was kiped from the PPC instruction formats (seemed like a good idea...) -class I +class SPUInstr : Instruction { field bits<32> Inst; @@ -28,7 +28,7 @@ class I // RR Format class RRForm opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I { + : SPUInstr { bits<7> RA; bits<7> RB; bits<7> RT; @@ -70,7 +70,7 @@ let RT = 0 in { // RRR Format class RRRForm opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<7> RA; bits<7> RB; @@ -89,7 +89,7 @@ class RRRForm opcode, dag OOL, dag IOL, string asmstr, // RI7 Format class RI7Form opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<7> i7; bits<7> RA; @@ -106,7 +106,7 @@ class RI7Form opcode, dag OOL, dag IOL, string asmstr, // CVTIntFp Format class CVTIntFPForm opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<7> RA; bits<7> RT; @@ -149,7 +149,7 @@ let RA = 0 in { // Branch indirect external data forms: class BISLEDForm DE_flag, string asmstr, list pattern> - : I<(outs), (ins indcalltarget:$func), asmstr, BranchResolv> + : SPUInstr<(outs), (ins indcalltarget:$func), asmstr, BranchResolv> { bits<7> Rcalldest; @@ -166,7 +166,7 @@ class BISLEDForm DE_flag, string asmstr, list pattern> // RI10 Format class RI10Form opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<10> i10; bits<7> RA; @@ -182,43 +182,27 @@ class RI10Form opcode, dag OOL, dag IOL, string asmstr, // RI10 Format, where the constant is zero (or effectively ignored by the // SPU) -class RI10Form_1 opcode, dag OOL, dag IOL, string asmstr, - InstrItinClass itin, list pattern> - : I -{ - bits<7> RA; - bits<7> RT; - - let Pattern = pattern; - - let Inst{0-7} = opcode; - let Inst{8-17} = 0; - let Inst{18-24} = RA; - let Inst{25-31} = RT; +let i10 = 0 in { + class RI10Form_1 opcode, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list pattern> + : RI10Form + { } } // RI10 Format, where RT is ignored. // This format is used primarily by the Halt If ... Immediate set of // instructions -class RI10Form_2 opcode, dag OOL, dag IOL, string asmstr, - InstrItinClass itin, list pattern> - : I -{ - bits<10> i10; - bits<7> RA; - - let Pattern = pattern; - - let Inst{0-7} = opcode; - let Inst{8-17} = i10; - let Inst{18-24} = RA; - let Inst{25-31} = 0; +let RT = 0 in { + class RI10Form_2 opcode, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list pattern> + : RI10Form + { } } // RI16 Format class RI16Form opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<16> i16; bits<7> RT; @@ -254,7 +238,7 @@ let RT = 0 in { // RI18 Format class RI18Form opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> - : I + : SPUInstr { bits<18> i18; bits<7> RT; @@ -306,7 +290,7 @@ class RR_Int_v4i32 opcode, string opc, InstrItinClass itin, //===----------------------------------------------------------------------===// class Pseudo pattern> - : I { + : SPUInstr { let Pattern = pattern; let Inst{31-0} = 0; } diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp index 64f6225f707..5eb467eaf25 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.cpp +++ b/lib/Target/CellSPU/SPUInstrInfo.cpp @@ -49,14 +49,13 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI, break; case SPU::ORIv4i32: case SPU::ORIr32: - case SPU::ORIr64: case SPU::ORHIv8i16: case SPU::ORHIr16: - case SPU::ORHI1To2: + case SPU::ORHIi8i16: case SPU::ORBIv16i8: case SPU::ORBIr8: - case SPU::ORI2To4: - case SPU::ORI1To4: + case SPU::ORIi16i32: + case SPU::ORIi8i32: case SPU::AHIvec: case SPU::AHIr16: case SPU::AIvec: @@ -103,7 +102,6 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI, case SPU::ORr64: case SPU::ORf32: case SPU::ORf64: - case SPU::ORgprc: assert(MI.getNumOperands() == 3 && MI.getOperand(0).isRegister() && MI.getOperand(1).isRegister() && @@ -203,14 +201,15 @@ void SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB, BuildMI(MBB, MI, get(SPU::ORf32), DestReg).addReg(SrcReg) .addReg(SrcReg); } else if (DestRC == SPU::R64CRegisterClass) { - BuildMI(MBB, MI, get(SPU::ORIr64), DestReg).addReg(SrcReg).addImm(0); + BuildMI(MBB, MI, get(SPU::ORr64), DestReg).addReg(SrcReg) + .addReg(SrcReg); } else if (DestRC == SPU::R64FPRegisterClass) { BuildMI(MBB, MI, get(SPU::ORf64), DestReg).addReg(SrcReg) .addReg(SrcReg); - } else if (DestRC == SPU::GPRCRegisterClass) { + } /* else if (DestRC == SPU::GPRCRegisterClass) { BuildMI(MBB, MI, get(SPU::ORgprc), DestReg).addReg(SrcReg) .addReg(SrcReg); - } else if (DestRC == SPU::VECREGRegisterClass) { + } */ else if (DestRC == SPU::VECREGRegisterClass) { BuildMI(MBB, MI, get(SPU::ORv4i32), DestReg).addReg(SrcReg) .addReg(SrcReg); } else { diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td index 7f86ae14d50..cfe47c6d32c 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.td +++ b/lib/Target/CellSPU/SPUInstrInfo.td @@ -374,53 +374,45 @@ def ILHUf32: // ILHUhi: Used for loading high portion of an address. Note the symbolHi // printer used for the operand. -def ILHUhi : RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val), +def ILHUhi: + RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val), "ilhu\t$rT, $val", ImmLoad, [(set R32C:$rT, hi16:$val)]>; // Immediate load address (can also be used to load 18-bit unsigned constants, // see the zext 16->32 pattern) -def ILAr64: - RI18Form<0b1000010, (outs R64C:$rT), (ins u18imm_i64:$val), - "ila\t$rT, $val", LoadNOP, - [(set R64C:$rT, imm18:$val)]>; +class ILAInst pattern>: + RI18Form<0b1000010, OOL, IOL, "ila\t$rT, $val", + LoadNOP, pattern>; -// TODO: ILAv2i64 +multiclass ImmLoadAddress +{ + def v2i64: ILAInst<(outs VECREG:$rT), (ins u18imm:$val), + [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>; -def ILAv2i64: - RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val), - "ila\t$rT, $val", LoadNOP, - [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>; + def v4i32: ILAInst<(outs VECREG:$rT), (ins u18imm:$val), + [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>; -def ILAv4i32: - RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val), - "ila\t$rT, $val", LoadNOP, - [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>; + def r64: ILAInst<(outs R64C:$rT), (ins u18imm_i64:$val), + [(set R64C:$rT, imm18:$val)]>; -def ILAr32: - RI18Form<0b1000010, (outs R32C:$rT), (ins u18imm:$val), - "ila\t$rT, $val", LoadNOP, - [(set R32C:$rT, imm18:$val)]>; + def r32: ILAInst<(outs R32C:$rT), (ins u18imm:$val), + [(set R32C:$rT, imm18:$val)]>; -def ILAf32: - RI18Form<0b1000010, (outs R32FP:$rT), (ins f18imm:$val), - "ila\t$rT, $val", LoadNOP, - [(set R32FP:$rT, fpimm18:$val)]>; + def f32: ILAInst<(outs R32FP:$rT), (ins f18imm:$val), + [(set R32FP:$rT, fpimm18:$val)]>; -def ILAf64: - RI18Form<0b1000010, (outs R64FP:$rT), (ins f18imm_f64:$val), - "ila\t$rT, $val", LoadNOP, - [(set R64FP:$rT, fpimm18:$val)]>; + def f64: ILAInst<(outs R64FP:$rT), (ins f18imm_f64:$val), + [(set R64FP:$rT, fpimm18:$val)]>; -def ILAlo: - RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLo:$val), - "ila\t$rT, $val", ImmLoad, - [(set R32C:$rT, imm18:$val)]>; + def lo: ILAInst<(outs R32C:$rT), (ins symbolLo:$val), + [(set R32C:$rT, imm18:$val)]>; -def ILAlsa: - RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLSA:$val), - "ila\t$rT, $val", ImmLoad, - [/* no pattern */]>; + def lsa: ILAInst<(outs R32C:$rT), (ins symbolLSA:$val), + [/* no pattern */]>; +} + +defm ILA : ImmLoadAddress; // Immediate OR, Halfword Lower: The "other" part of loading large constants // into 32-bit registers. See the anonymous pattern Pat<(i32 imm:$imm), ...> @@ -465,7 +457,7 @@ class FSMBIVec [(set (vectype VECREG:$rT), (SPUfsmbi immU16:$val))]> { } -multiclass FSMBIs +multiclass FormSelectMaskBytesImm { def v16i8: FSMBIVec; def v8i16: FSMBIVec; @@ -473,7 +465,27 @@ multiclass FSMBIs def v2i64: FSMBIVec; } -defm FSMBI : FSMBIs; +defm FSMBI : FormSelectMaskBytesImm; + +// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits +def FSMB: + RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA), + "fsmb\t$rT, $rA", SelectOp, + []>; + +// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is +// only 8-bits wide (even though it's input as 16-bits here) +def FSMH: + RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA), + "fsmh\t$rT, $rA", SelectOp, + []>; + +// fsm: Form select mask for words. Like the other fsm* instructions, +// only the lower 4 bits of $rA are significant. +def FSM: + RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA), + "fsm\t$rT, $rA", SelectOp, + []>; //===----------------------------------------------------------------------===// // Integer and Logical Operations: @@ -487,8 +499,6 @@ def AHv8i16: def : Pat<(add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)), (AHv8i16 VECREG:$rA, VECREG:$rB)>; -// [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>; - def AHr16: RRForm<0b00010011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), "ah\t$rT, $rA, $rB", IntegerOp, @@ -500,20 +510,23 @@ def AHIvec: [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>; -def AHIr16 : RI10Form<0b10111000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val), - "ahi\t$rT, $rA, $val", IntegerOp, - [(set R16C:$rT, (add R16C:$rA, v8i16SExt10Imm:$val))]>; +def AHIr16: + RI10Form<0b10111000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val), + "ahi\t$rT, $rA, $val", IntegerOp, + [(set R16C:$rT, (add R16C:$rA, v8i16SExt10Imm:$val))]>; -def Avec : RRForm<0b00000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "a\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; +def Avec: + RRForm<0b00000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + "a\t$rT, $rA, $rB", IntegerOp, + [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; def : Pat<(add (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)), (Avec VECREG:$rA, VECREG:$rB)>; -def Ar32 : RRForm<0b00000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "a\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>; +def Ar32: + RRForm<0b00000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + "a\t$rT, $rA, $rB", IntegerOp, + [(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>; def Ar8: RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), @@ -802,26 +815,6 @@ def CNTBv4i32 : "cntb\t$rT, $rA", IntegerOp, [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>; -// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits -def FSMB: - RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA), - "fsmb\t$rT, $rA", SelectOp, - []>; - -// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is -// only 8-bits wide (even though it's input as 16-bits here) -def FSMH: - RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA), - "fsmh\t$rT, $rA", SelectOp, - []>; - -// fsm: Form select mask for words. Like the other fsm* instructions, -// only the lower 4 bits of $rA are significant. -def FSM: - RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA), - "fsm\t$rT, $rA", SelectOp, - []>; - // gbb: Gather all low order bits from each byte in $rA into a single 16-bit // quantity stored into $rT def GBB: @@ -923,281 +916,257 @@ def : Pat<(sext R32C:$inp), (XSWDr32 R32C:$inp)>; // AND operations -def ANDv16i8: - RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA), - (v16i8 VECREG:$rB)))]>; -def ANDv8i16: - RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA), - (v8i16 VECREG:$rB)))]>; +class ANDInst pattern> : + RRForm<0b10000011000, OOL, IOL, "and\t$rT, $rA, $rB", + IntegerOp, pattern>; -def ANDv4i32: - RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA), - (v4i32 VECREG:$rB)))]>; +class ANDVecInst: + ANDInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (and (vectype VECREG:$rA), + (vectype VECREG:$rB)))]>; -def ANDr32: - RRForm<0b10000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>; +multiclass BitwiseAnd +{ + def v16i8: ANDVecInst; + def v8i16: ANDVecInst; + def v4i32: ANDVecInst; + def v2i64: ANDVecInst; -//===--------------------------------------------- -// Special instructions to perform the fabs instruction -def ANDfabs32: - RRForm<0b10000011000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [/* Intentionally does not match a pattern */]>; + def r64: ANDInst<(outs R64C:$rT), (ins R64C:$rA, R64C:$rB), + [(set R64C:$rT, (and R64C:$rA, R64C:$rB))]>; -def ANDfabs64: - RRForm<0b10000011000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [/* Intentionally does not match a pattern */]>; + def r32: ANDInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>; -// Could use ANDv4i32, but won't for clarity -def ANDfabsvec: - RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [/* Intentionally does not match a pattern */]>; + def r16: ANDInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB), + [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>; -//===--------------------------------------------- + def r8: ANDInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB), + [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>; -def ANDr16: - RRForm<0b10000011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>; + //===--------------------------------------------- + // Special instructions to perform the fabs instruction + def fabs32: ANDInst<(outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB), + [/* Intentionally does not match a pattern */]>; -def ANDr8: - RRForm<0b10000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>; + def fabs64: ANDInst<(outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB), + [/* Intentionally does not match a pattern */]>; -// Hacked form of AND to zero-extend 16-bit quantities to 32-bit -// quantities -- see 16->32 zext pattern. -// -// This pattern is somewhat artificial, since it might match some -// compiler generated pattern but it is unlikely to do so. -def AND2To4: - RRForm<0b10000011000, (outs R32C:$rT), (ins R16C:$rA, R32C:$rB), - "and\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (and (zext R16C:$rA), R32C:$rB))]>; + // Could use v4i32, but won't for clarity + def fabsvec: ANDInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [/* Intentionally does not match a pattern */]>; + + //===--------------------------------------------- + + // Hacked form of AND to zero-extend 16-bit quantities to 32-bit + // quantities -- see 16->32 zext pattern. + // + // This pattern is somewhat artificial, since it might match some + // compiler generated pattern but it is unlikely to do so. + + def i16i32: ANDInst<(outs R32C:$rT), (ins R16C:$rA, R32C:$rB), + [(set R32C:$rT, (and (zext R16C:$rA), R32C:$rB))]>; +} + +defm AND : BitwiseAnd; // N.B.: vnot_conv is one of those special target selection pattern fragments, // in which we expect there to be a bit_convert on the constant. Bear in mind // that llvm translates "not " to "xor , -1" (or in this case, a // constant -1 vector.) -def ANDCv16i8: - RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA), - (vnot (v16i8 VECREG:$rB))))]>; -def ANDCv8i16: - RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA), - (vnot (v8i16 VECREG:$rB))))]>; +class ANDCInst pattern>: + RRForm<0b10000011010, OOL, IOL, "andc\t$rT, $rA, $rB", + IntegerOp, pattern>; -def ANDCv4i32: - RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA), - (vnot (v4i32 VECREG:$rB))))]>; +class ANDCVecInst: + ANDCInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (and (vectype VECREG:$rA), + (vnot (vectype VECREG:$rB))))]>; -def ANDCr32: - RRForm<0b10000011010, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (and R32C:$rA, (not R32C:$rB)))]>; +class ANDCRegInst: + ANDCInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (and rclass:$rA, (not rclass:$rB)))]>; -def ANDCr16: - RRForm<0b10000011010, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (and R16C:$rA, (not R16C:$rB)))]>; +multiclass AndComplement +{ + def v16i8: ANDCVecInst; + def v8i16: ANDCVecInst; + def v4i32: ANDCVecInst; + def v2i64: ANDCVecInst; -def ANDCr8: - RRForm<0b10000011010, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "andc\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (and R8C:$rA, (not R8C:$rB)))]>; + def r128: ANDCRegInst; + def r64: ANDCRegInst; + def r32: ANDCRegInst; + def r16: ANDCRegInst; + def r8: ANDCRegInst; +} -def ANDBIv16i8: - RI10Form<0b01101000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), - "andbi\t$rT, $rA, $val", IntegerOp, - [(set (v16i8 VECREG:$rT), - (and (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>; +defm ANDC : AndComplement; -def ANDBIr8: - RI10Form<0b01101000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), - "andbi\t$rT, $rA, $val", IntegerOp, - [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>; +class ANDBIInst pattern>: + RI10Form<0b01101000, OOL, IOL, "andbi\t$rT, $rA, $val", + IntegerOp, pattern>; -def ANDHIv8i16: - RI10Form<0b10101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), - "andhi\t$rT, $rA, $val", IntegerOp, - [(set (v8i16 VECREG:$rT), - (and (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>; +multiclass AndByteImm +{ + def v16i8: ANDBIInst<(outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + [(set (v16i8 VECREG:$rT), + (and (v16i8 VECREG:$rA), + (v16i8 v16i8U8Imm:$val)))]>; -def ANDHIr16: - RI10Form<0b10101000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val), - "andhi\t$rT, $rA, $val", IntegerOp, - [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>; + def r8: ANDBIInst<(outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), + [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>; +} -def ANDHI1To2: - RI10Form<0b10101000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val), - "andhi\t$rT, $rA, $val", IntegerOp, - [(set R16C:$rT, (and (zext R8C:$rA), i16ImmSExt10:$val))]>; +defm ANDBI : AndByteImm; -def ANDIv4i32: - RI10Form<0b00101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), - "andi\t$rT, $rA, $val", IntegerOp, - [(set (v4i32 VECREG:$rT), - (and (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>; +class ANDHIInst pattern> : + RI10Form<0b10101000, OOL, IOL, "andhi\t$rT, $rA, $val", + IntegerOp, pattern>; -def ANDIr32: - RI10Form<0b10101000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), - "andi\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>; +multiclass AndHalfwordImm +{ + def v8i16: ANDHIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v8i16 VECREG:$rT), + (and (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>; -// Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32 -// pattern below. -def ANDI1To4: - RI10Form<0b10101000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val), - "andi\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (and (zext R8C:$rA), i32ImmSExt10:$val))]>; + def r16: ANDHIInst<(outs R16C:$rT), (ins R16C:$rA, u10imm:$val), + [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>; -// Hacked form of ANDI to zero-extend i16 quantities to i32. See the -// zext 16->32 pattern below. -// -// Note that this pattern is somewhat artificial, since it might match -// something the compiler generates but is unlikely to occur in practice. -def ANDI2To4: - RI10Form<0b10101000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val), - "andi\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (and (zext R16C:$rA), i32ImmSExt10:$val))]>; + // Zero-extend i8 to i16: + def i8i16: ANDHIInst<(outs R16C:$rT), (ins R8C:$rA, u10imm:$val), + [(set R16C:$rT, (and (zext R8C:$rA), i16ImmUns10:$val))]>; +} +defm ANDHI : AndHalfwordImm; + +class ANDIInst pattern> : + RI10Form<0b00101000, OOL, IOL, "andi\t$rT, $rA, $val", + IntegerOp, pattern>; + +multiclass AndWordImm +{ + def v4i32: ANDIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v4i32 VECREG:$rT), + (and (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>; + + def r32: ANDIInst<(outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>; + + // Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32 + // pattern below. + def i8i32: ANDIInst<(outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val), + [(set R32C:$rT, + (and (zext R8C:$rA), i32ImmSExt10:$val))]>; + + // Hacked form of ANDI to zero-extend i16 quantities to i32. See the + // zext 16->32 pattern below. + // + // Note that this pattern is somewhat artificial, since it might match + // something the compiler generates but is unlikely to occur in practice. + def i16i32: ANDIInst<(outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val), + [(set R32C:$rT, + (and (zext R16C:$rA), i32ImmSExt10:$val))]>; +} + +defm ANDI : AndWordImm; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // Bitwise OR group: +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + // Bitwise "or" (N.B.: These are also register-register copy instructions...) -def ORv16i8: - RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>; +class ORInst pattern>: + RRForm<0b10000010000, OOL, IOL, "or\t$rT, $rA, $rB", + IntegerOp, pattern>; -def ORv8i16: - RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>; +class ORVecInst: + ORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (or (vectype VECREG:$rA), + (vectype VECREG:$rB)))]>; -def ORv4i32: - RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; +class ORRegInst: + ORInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (or rclass:$rA, rclass:$rB))]>; -def ORv4f32: - RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set (v4f32 VECREG:$rT), - (v4f32 (bitconvert (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))))]>; +class ORPromoteScalar: + ORInst<(outs VECREG:$rT), (ins rclass:$rA, rclass:$rB), + [/* no pattern */]>; -def ORv2f64: - RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set (v2f64 VECREG:$rT), - (v2f64 (bitconvert (or (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)))))]>; +class ORExtractElt: + ORInst<(outs rclass:$rT), (ins VECREG:$rA, VECREG:$rB), + [/* no pattern */]>; -def ORgprc: - RRForm<0b10000010000, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set GPRC:$rT, (or GPRC:$rA, GPRC:$rB))]>; +multiclass BitwiseOr +{ + def v16i8: ORVecInst; + def v8i16: ORVecInst; + def v4i32: ORVecInst; + def v2i64: ORVecInst; -def ORr64: - RRForm<0b10000010000, (outs R64C:$rT), (ins R64C:$rA, R64C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set R64C:$rT, (or R64C:$rA, R64C:$rB))]>; + def v4f32: ORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v4f32 VECREG:$rT), + (v4f32 (bitconvert (or (v4i32 VECREG:$rA), + (v4i32 VECREG:$rB)))))]>; -def ORr32: - RRForm<0b10000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (or R32C:$rA, R32C:$rB))]>; + def v2f64: ORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v2f64 VECREG:$rT), + (v2f64 (bitconvert (or (v2i64 VECREG:$rA), + (v2i64 VECREG:$rB)))))]>; -def ORr16: - RRForm<0b10000010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (or R16C:$rA, R16C:$rB))]>; + def r64: ORRegInst; + def r32: ORRegInst; + def r16: ORRegInst; + def r8: ORRegInst; -def ORr8: - RRForm<0b10000010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (or R8C:$rA, R8C:$rB))]>; + // OR instructions used to copy f32 and f64 registers. + def f32: ORInst<(outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB), + [/* no pattern */]>; -// OR instruction forms that are used to copy f32 and f64 registers. -// They do not match patterns. -def ORf32: - RRForm<0b10000010000, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; + def f64: ORInst<(outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB), + [/* no pattern */]>; -def ORf64: - RRForm<0b10000010000, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; + // scalar->vector promotion: + def v16i8_i8: ORPromoteScalar; + def v8i16_i16: ORPromoteScalar; + def v4i32_i32: ORPromoteScalar; + def v2i64_i64: ORPromoteScalar; + def v4f32_f32: ORPromoteScalar; + def v2f64_f64: ORPromoteScalar; -// ORv*_*: Used in scalar->vector promotions: -def ORv16i8_i8: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R8C:$rA, R8C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; + // extract element 0: + def i8_v16i8: ORExtractElt; + def i16_v8i16: ORExtractElt; + def i32_v4i32: ORExtractElt; + def i64_v2i64: ORExtractElt; + def f32_v4f32: ORExtractElt; + def f64_v2f64: ORExtractElt; +} +defm OR : BitwiseOr; + +// scalar->vector promotion patterns: def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)), (ORv16i8_i8 R8C:$rA, R8C:$rA)>; -def ORv8i16_i16: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(v8i16 (SPUpromote_scalar R16C:$rA)), (ORv8i16_i16 R16C:$rA, R16C:$rA)>; -def ORv4i32_i32: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R32C:$rA, R32C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(v4i32 (SPUpromote_scalar R32C:$rA)), (ORv4i32_i32 R32C:$rA, R32C:$rA)>; -def ORv2i64_i64: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R64C:$rA, R64C:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(v2i64 (SPUpromote_scalar R64C:$rA)), (ORv2i64_i64 R64C:$rA, R64C:$rA)>; -def ORv4f32_f32: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R32FP:$rA, R32FP:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(v4f32 (SPUpromote_scalar R32FP:$rA)), (ORv4f32_f32 R32FP:$rA, R32FP:$rA)>; -def ORv2f64_f64: - RRForm<0b10000010000, (outs VECREG:$rT), (ins R64FP:$rA, R64FP:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(v2f64 (SPUpromote_scalar R64FP:$rA)), (ORv2f64_f64 R64FP:$rA, R64FP:$rA)>; // ORi*_v*: Used to extract vector element 0 (the preferred slot) -def ORi8_v16i8: - RRForm<0b10000010000, (outs R8C:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)), (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>; @@ -1205,157 +1174,144 @@ def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)), def : Pat<(SPUextract_elt0_chained (v16i8 VECREG:$rA)), (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>; -def ORi16_v8i16: - RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(SPUextract_elt0 (v8i16 VECREG:$rA)), (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>; def : Pat<(SPUextract_elt0_chained (v8i16 VECREG:$rA)), (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>; -def ORi32_v4i32: - RRForm<0b10000010000, (outs R32C:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(SPUextract_elt0 (v4i32 VECREG:$rA)), (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>; def : Pat<(SPUextract_elt0_chained (v4i32 VECREG:$rA)), (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>; -def ORi64_v2i64: - RRForm<0b10000010000, (outs R64C:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(SPUextract_elt0 (v2i64 VECREG:$rA)), (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>; def : Pat<(SPUextract_elt0_chained (v2i64 VECREG:$rA)), (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>; -def ORf32_v4f32: - RRForm<0b10000010000, (outs R32FP:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(SPUextract_elt0 (v4f32 VECREG:$rA)), (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>; def : Pat<(SPUextract_elt0_chained (v4f32 VECREG:$rA)), (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>; -def ORf64_v2f64: - RRForm<0b10000010000, (outs R64FP:$rT), (ins VECREG:$rA, VECREG:$rB), - "or\t$rT, $rA, $rB", IntegerOp, - [/* no pattern */]>; - def : Pat<(SPUextract_elt0 (v2f64 VECREG:$rA)), (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>; def : Pat<(SPUextract_elt0_chained (v2f64 VECREG:$rA)), (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>; -// ORC: Bitwise "or" with complement (match before ORvec, ORr32) -def ORCv16i8: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA), - (vnot (v16i8 VECREG:$rB))))]>; +// ORC: Bitwise "or" with complement (c = a | ~b) -def ORCv8i16: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), - (vnot (v8i16 VECREG:$rB))))]>; +class ORCInst pattern>: + RRForm<0b10010010000, OOL, IOL, "orc\t$rT, $rA, $rB", + IntegerOp, pattern>; -def ORCv4i32: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), - (vnot (v4i32 VECREG:$rB))))]>; +class ORCVecInst: + ORCInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (or (vectype VECREG:$rA), + (vnot (vectype VECREG:$rB))))]>; -def ORCr32: - RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (or R32C:$rA, (not R32C:$rB)))]>; +class ORCRegInst: + ORCInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (or rclass:$rA, (not rclass:$rB)))]>; -def ORCr16: - RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (or R16C:$rA, (not R16C:$rB)))]>; +multiclass BitwiseOrComplement +{ + def v16i8: ORCVecInst; + def v8i16: ORCVecInst; + def v4i32: ORCVecInst; + def v2i64: ORCVecInst; -def ORCr8: - RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "orc\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (or R8C:$rA, (not R8C:$rB)))]>; + def r64: ORCRegInst; + def r32: ORCRegInst; + def r16: ORCRegInst; + def r8: ORCRegInst; +} + +defm ORC : BitwiseOrComplement; // OR byte immediate -def ORBIv16i8: - RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), - "orbi\t$rT, $rA, $val", IntegerOp, - [(set (v16i8 VECREG:$rT), - (or (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>; +class ORBIInst pattern>: + RI10Form<0b01100000, OOL, IOL, "orbi\t$rT, $rA, $val", + IntegerOp, pattern>; -def ORBIr8: - RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), - "orbi\t$rT, $rA, $val", IntegerOp, - [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>; +class ORBIVecInst: + ORBIInst<(outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + [(set (v16i8 VECREG:$rT), (or (vectype VECREG:$rA), + (vectype immpred:$val)))]>; + +multiclass BitwiseOrByteImm +{ + def v16i8: ORBIVecInst; + + def r8: ORBIInst<(outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), + [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>; +} + +defm ORBI : BitwiseOrByteImm; // OR halfword immediate -def ORHIv8i16: - RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), - "orhi\t$rT, $rA, $val", IntegerOp, - [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), - v8i16Uns10Imm:$val))]>; +class ORHIInst pattern>: + RI10Form<0b10100000, OOL, IOL, "orhi\t$rT, $rA, $val", + IntegerOp, pattern>; -def ORHIr16: - RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, u10imm:$val), - "orhi\t$rT, $rA, $val", IntegerOp, - [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>; +class ORHIVecInst: + ORHIInst<(outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + [(set (vectype VECREG:$rT), (or (vectype VECREG:$rA), + immpred:$val))]>; -// Hacked form of ORHI used to promote 8-bit registers to 16-bit -def ORHI1To2: - RI10Form<0b10100000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val), - "orhi\t$rT, $rA, $val", IntegerOp, - [(set R16C:$rT, (or (anyext R8C:$rA), i16ImmSExt10:$val))]>; +multiclass BitwiseOrHalfwordImm +{ + def v8i16: ORHIVecInst; + + def r16: ORHIInst<(outs R16C:$rT), (ins R16C:$rA, u10imm:$val), + [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>; + + // Specialized ORHI form used to promote 8-bit registers to 16-bit + def i8i16: ORHIInst<(outs R16C:$rT), (ins R8C:$rA, s10imm:$val), + [(set R16C:$rT, (or (anyext R8C:$rA), + i16ImmSExt10:$val))]>; +} + +defm ORHI : BitwiseOrHalfwordImm; + +class ORIInst pattern>: + RI10Form<0b00100000, OOL, IOL, "ori\t$rT, $rA, $val", + IntegerOp, pattern>; + +class ORIVecInst: + ORIInst<(outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + [(set (vectype VECREG:$rT), (or (vectype VECREG:$rA), + immpred:$val))]>; // Bitwise "or" with immediate -def ORIv4i32: - RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), - "ori\t$rT, $rA, $val", IntegerOp, - [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), - v4i32Uns10Imm:$val))]>; +multiclass BitwiseOrImm +{ + def v4i32: ORIVecInst; -def ORIr32: - RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val), - "ori\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>; + def r32: ORIInst<(outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val), + [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>; -def ORIr64: - RI10Form_1<0b00100000, (outs R64C:$rT), (ins R64C:$rA, s10imm_i32:$val), - "ori\t$rT, $rA, $val", IntegerOp, - [/* no pattern */]>; + // i16i32: hacked version of the ori instruction to extend 16-bit quantities + // to 32-bit quantities. used exclusively to match "anyext" conversions (vide + // infra "anyext 16->32" pattern.) + def i16i32: ORIInst<(outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (or (anyext R16C:$rA), + i32ImmSExt10:$val))]>; -// ORI2To4: hacked version of the ori instruction to extend 16-bit quantities -// to 32-bit quantities. used exclusively to match "anyext" conversions (vide -// infra "anyext 16->32" pattern.) -def ORI2To4: - RI10Form<0b00100000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val), - "ori\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (or (anyext R16C:$rA), i32ImmSExt10:$val))]>; + // i8i32: Hacked version of the ORI instruction to extend 16-bit quantities + // to 32-bit quantities. Used exclusively to match "anyext" conversions (vide + // infra "anyext 16->32" pattern.) + def i8i32: ORIInst<(outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (or (anyext R8C:$rA), + i32ImmSExt10:$val))]>; +} -// ORI1To4: Hacked version of the ORI instruction to extend 16-bit quantities -// to 32-bit quantities. Used exclusively to match "anyext" conversions (vide -// infra "anyext 16->32" pattern.) -def ORI1To4: - RI10Form<0b00100000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val), - "ori\t$rT, $rA, $val", IntegerOp, - [(set R32C:$rT, (or (anyext R8C:$rA), i32ImmSExt10:$val))]>; +defm ORI : BitwiseOrImm; // ORX: "or" across the vector: or's $rA's word slots leaving the result in // $rT[0], slots 1-3 are zeroed. @@ -1423,18 +1379,25 @@ def XORr8: "xor\t$rT, $rA, $rB", IntegerOp, [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>; -def XORBIv16i8: - RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), - "xorbi\t$rT, $rA, $val", IntegerOp, - [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>; +class XORBIInst pattern>: + RI10Form<0b01100000, OOL, IOL, "xorbi\t$rT, $rA, $val", + IntegerOp, pattern>; -def XORBIr8: - RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), - "xorbi\t$rT, $rA, $val", IntegerOp, - [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>; +multiclass XorByteImm +{ + def v16i8: + XORBIInst<(outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>; + + def r8: + XORBIInst<(outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val), + [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>; +} + +defm XORBI : XorByteImm; def XORHIv8i16: - RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), "xorhi\t$rT, $rA, $val", IntegerOp, [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>; @@ -1445,7 +1408,7 @@ def XORHIr16: [(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>; def XORIv4i32: - RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), "xori\t$rT, $rA, $val", IntegerOp, [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>; @@ -1630,7 +1593,7 @@ def SELBv16i8: RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC), "selb\t$rT, $rA, $rB, $rC", IntegerOp, [(set (v16i8 VECREG:$rT), - (SPUselb_v16i8 (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), + (SPUselb (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), (v16i8 VECREG:$rC)))]>; def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)), @@ -1701,7 +1664,7 @@ def SELBv8i16: RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC), "selb\t$rT, $rA, $rB, $rC", IntegerOp, [(set (v8i16 VECREG:$rT), - (SPUselb_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), + (SPUselb (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), (v8i16 VECREG:$rC)))]>; def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)), @@ -1772,7 +1735,7 @@ def SELBv4i32: RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC), "selb\t$rT, $rA, $rB, $rC", IntegerOp, [(set (v4i32 VECREG:$rT), - (SPUselb_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), + (SPUselb (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), (v4i32 VECREG:$rC)))]>; def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)), @@ -1954,43 +1917,60 @@ def : Pat<(or (and (not R8C:$rC), R8C:$rA), //===----------------------------------------------------------------------===// // Vector shuffle... //===----------------------------------------------------------------------===// - -def SHUFB: - RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC), - "shufb\t$rT, $rA, $rB, $rC", IntegerOp, - [/* no pattern */]>; - // SPUshuffle is generated in LowerVECTOR_SHUFFLE and gets replaced with SHUFB. // See the SPUshuffle SDNode operand above, which sets up the DAG pattern // matcher to emit something when the LowerVECTOR_SHUFFLE generates a node with // the SPUISD::SHUFB opcode. -def : Pat<(SPUshuffle (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; +//===----------------------------------------------------------------------===// -def : Pat<(SPUshuffle (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; +class SHUFBInst pattern>: + RRRForm<0b1000, OOL, IOL, "shufb\t$rT, $rA, $rB, $rC", + IntegerOp, pattern>; -def : Pat<(SPUshuffle (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; +class SHUFBVecInst: + SHUFBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC), + [(set (vectype VECREG:$rT), (SPUshuffle (vectype VECREG:$rA), + (vectype VECREG:$rB), + (vectype VECREG:$rC)))]>; -def : Pat<(SPUshuffle (v4f32 VECREG:$rA), (v4f32 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; +// It's this pattern that's probably the most useful, since SPUISelLowering +// methods create a v16i8 vector for $rC: +class SHUFBVecPat1: + Pat<(SPUshuffle (vectype VECREG:$rA), (vectype VECREG:$rB), + (v16i8 VECREG:$rC)), + (inst VECREG:$rA, VECREG:$rB, VECREG:$rC)>; -def : Pat<(SPUshuffle (v2i64 VECREG:$rA), (v2i64 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; +multiclass ShuffleBytes +{ + def v16i8 : SHUFBVecInst; + def v8i16 : SHUFBVecInst; + def v4i32 : SHUFBVecInst; + def v2i64 : SHUFBVecInst; -def : Pat<(SPUshuffle (v2f64 VECREG:$rA), (v2f64 VECREG:$rB), VECREG:$rC), - (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>; + def v4f32 : SHUFBVecInst; + def v2f64 : SHUFBVecInst; +} + +defm SHUFB : ShuffleBytes; + +def : SHUFBVecPat1; +def : SHUFBVecPat1; +def : SHUFBVecPat1; +def : SHUFBVecPat1; +def : SHUFBVecPat1; //===----------------------------------------------------------------------===// // Shift and rotate group: //===----------------------------------------------------------------------===// -def SHLHv8i16: - RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB), - "shlh\t$rT, $rA, $rB", RotateShift, - [(set (v8i16 VECREG:$rT), - (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), R16C:$rB))]>; +class SHLHInst pattern>: + RRForm<0b11111010000, OOL, IOL, "shlh\t$rT, $rA, $rB", + RotateShift, pattern>; + +class SHLHVecInst: + SHLHInst<(outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB), + [(set (vectype VECREG:$rT), + (SPUvec_shl (vectype VECREG:$rA), R16C:$rB))]>; // $rB gets promoted to 32-bit register type when confronted with // this llvm assembly code: @@ -1999,178 +1979,271 @@ def SHLHv8i16: // %A = shl i16 %arg1, %arg2 // ret i16 %A // } -// -// However, we will generate this code when lowering 8-bit shifts and rotates. -def SHLHr16: - RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "shlh\t$rT, $rA, $rB", RotateShift, - [(set R16C:$rT, (shl R16C:$rA, R16C:$rB))]>; +multiclass ShiftLeftHalfword +{ + def v8i16: SHLHVecInst; + def r16: SHLHInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB), + [(set R16C:$rT, (shl R16C:$rA, R16C:$rB))]>; + def r16_r32: SHLHInst<(outs R16C:$rT), (ins R16C:$rA, R32C:$rB), + [(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>; +} -def SHLHr16_r32: - RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB), - "shlh\t$rT, $rA, $rB", RotateShift, - [(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>; +defm SHLH : ShiftLeftHalfword; -def SHLHIv8i16: - RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val), - "shlhi\t$rT, $rA, $val", RotateShift, - [(set (v8i16 VECREG:$rT), - (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)))]>; +//===----------------------------------------------------------------------===// -def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)), - (SHLHIv8i16 VECREG:$rA, imm:$val)>; +class SHLHIInst pattern>: + RI7Form<0b11111010000, OOL, IOL, "shlhi\t$rT, $rA, $val", + RotateShift, pattern>; -def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)), - (SHLHIv8i16 VECREG:$rA, imm:$val)>; +class SHLHIVecInst: + SHLHIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), + [(set (vectype VECREG:$rT), + (SPUvec_shl (vectype VECREG:$rA), (i16 uimm7:$val)))]>; -def SHLHIr16: - RI7Form<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val), - "shlhi\t$rT, $rA, $val", RotateShift, - [(set R16C:$rT, (shl R16C:$rA, (i32 uimm7:$val)))]>; - -def : Pat<(shl R16C:$rA, (i8 uimm7:$val)), +multiclass ShiftLeftHalfwordImm +{ + def v8i16: SHLHIVecInst; + def r16: SHLHIInst<(outs R16C:$rT), (ins R16C:$rA, u7imm:$val), + [(set R16C:$rT, (shl R16C:$rA, (i16 uimm7:$val)))]>; +} + +defm SHLHI : ShiftLeftHalfwordImm; + +def : Pat<(SPUvec_shl (v8i16 VECREG:$rA), (i32 uimm7:$val)), + (SHLHIv8i16 VECREG:$rA, uimm7:$val)>; + +def : Pat<(shl R16C:$rA, (i32 uimm7:$val)), (SHLHIr16 R16C:$rA, uimm7:$val)>; -def : Pat<(shl R16C:$rA, (i16 uimm7:$val)), - (SHLHIr16 R16C:$rA, uimm7:$val)>; +//===----------------------------------------------------------------------===// -def SHLv4i32: - RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB), - "shl\t$rT, $rA, $rB", RotateShift, - [(set (v4i32 VECREG:$rT), - (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), R16C:$rB))]>; +class SHLInst pattern>: + RRForm<0b11111010000, OOL, IOL, "shl\t$rT, $rA, $rB", + RotateShift, pattern>; -def SHLr32: - RRForm<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "shl\t$rT, $rA, $rB", RotateShift, - [(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>; +multiclass ShiftLeftWord +{ + def v4i32: + SHLInst<(outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB), + [(set (v4i32 VECREG:$rT), + (SPUvec_shl (v4i32 VECREG:$rA), R16C:$rB))]>; + def r32: + SHLInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>; +} -def SHLIv4i32: - RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val), - "shli\t$rT, $rA, $val", RotateShift, - [(set (v4i32 VECREG:$rT), - (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)))]>; +defm SHL: ShiftLeftWord; -def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)), - (SHLIv4i32 VECREG:$rA, uimm7:$val)>; +//===----------------------------------------------------------------------===// -def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)), - (SHLIv4i32 VECREG:$rA, uimm7:$val)>; +class SHLIInst pattern>: + RI7Form<0b11111010000, OOL, IOL, "shli\t$rT, $rA, $val", + RotateShift, pattern>; -def SHLIr32: - RI7Form<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val), - "shli\t$rT, $rA, $val", RotateShift, - [(set R32C:$rT, (shl R32C:$rA, (i32 uimm7:$val)))]>; +multiclass ShiftLeftWordImm +{ + def v4i32: + SHLIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val), + [(set (v4i32 VECREG:$rT), + (SPUvec_shl (v4i32 VECREG:$rA), (i32 uimm7:$val)))]>; -def : Pat<(shl R32C:$rA, (i16 uimm7:$val)), - (SHLIr32 R32C:$rA, uimm7:$val)>; + def r32: + SHLIInst<(outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val), + [(set R32C:$rT, (shl R32C:$rA, (i32 uimm7:$val)))]>; +} -def : Pat<(shl R32C:$rA, (i8 uimm7:$val)), - (SHLIr32 R32C:$rA, uimm7:$val)>; +defm SHLI : ShiftLeftWordImm; +//===----------------------------------------------------------------------===// // SHLQBI vec form: Note that this will shift the entire vector (the 128-bit // register) to the left. Vector form is here to ensure type correctness. -def SHLQBIvec: - RRForm<0b11011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "shlqbi\t$rT, $rA, $rB", RotateShift, - [/* intrinsic */]>; +// +// The shift count is in the lowest 3 bits (29-31) of $rB, so only a bit shift +// of 7 bits is actually possible. +// +// Note also that SHLQBI/SHLQBII are used in conjunction with SHLQBY/SHLQBYI +// to shift i64 and i128. SHLQBI is the residual left over after shifting by +// bytes with SHLQBY. -// See note above on SHLQBI. -def SHLQBIIvec: - RI7Form<0b11011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), - "shlqbii\t$rT, $rA, $val", RotateShift, - [/* intrinsic */]>; +class SHLQBIInst pattern>: + RRForm<0b11011011100, OOL, IOL, "shlqbi\t$rT, $rA, $rB", + RotateShift, pattern>; + +class SHLQBIVecInst: + SHLQBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [(set (vectype VECREG:$rT), + (SPUshlquad_l_bits (vectype VECREG:$rA), R32C:$rB))]>; + +multiclass ShiftLeftQuadByBits +{ + def v16i8: SHLQBIVecInst; + def v8i16: SHLQBIVecInst; + def v4i32: SHLQBIVecInst; + def v2i64: SHLQBIVecInst; +} + +defm SHLQBI : ShiftLeftQuadByBits; + +// See note above on SHLQBI. In this case, the predicate actually does then +// enforcement, whereas with SHLQBI, we have to "take it on faith." +class SHLQBIIInst pattern>: + RI7Form<0b11011111100, OOL, IOL, "shlqbii\t$rT, $rA, $val", + RotateShift, pattern>; + +class SHLQBIIVecInst: + SHLQBIIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val), + [(set (vectype VECREG:$rT), + (SPUshlquad_l_bits (vectype VECREG:$rA), (i32 bitshift:$val)))]>; + +multiclass ShiftLeftQuadByBitsImm +{ + def v16i8 : SHLQBIIVecInst; + def v8i16 : SHLQBIIVecInst; + def v4i32 : SHLQBIIVecInst; + def v2i64 : SHLQBIIVecInst; +} + +defm SHLQBII : ShiftLeftQuadByBitsImm; // SHLQBY, SHLQBYI vector forms: Shift the entire vector to the left by bytes, -// not by bits. -def SHLQBYvec: - RI7Form<0b11111011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "shlqbyi\t$rT, $rA, $rB", RotateShift, - [/* intrinsic */]>; +// not by bits. See notes above on SHLQBI. -def SHLQBYIvec: - RI7Form<0b11111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), - "shlqbyi\t$rT, $rA, $val", RotateShift, - [/* intrinsic */]>; +class SHLQBYInst pattern>: + RI7Form<0b11111011100, OOL, IOL, "shlqbyi\t$rT, $rA, $rB", + RotateShift, pattern>; -// ROTH v8i16 form: -def ROTHv8i16: - RRForm<0b00111010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "roth\t$rT, $rA, $rB", RotateShift, - [(set (v8i16 VECREG:$rT), - (SPUvec_rotl_v8i16 VECREG:$rA, VECREG:$rB))]>; +class SHLQBYVecInst: + SHLQBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [(set (vectype VECREG:$rT), + (SPUshlquad_l_bytes (vectype VECREG:$rA), R32C:$rB))]>; -def ROTHr16: - RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "roth\t$rT, $rA, $rB", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, R16C:$rB))]>; +multiclass ShiftLeftQuadBytes +{ + def v16i8: SHLQBYVecInst; + def v8i16: SHLQBYVecInst; + def v4i32: SHLQBYVecInst; + def v2i64: SHLQBYVecInst; + def r128: SHLQBYInst<(outs GPRC:$rT), (ins GPRC:$rA, R32C:$rB), + [(set GPRC:$rT, (SPUshlquad_l_bytes GPRC:$rA, R32C:$rB))]>; +} -def ROTHr16_r32: - RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB), - "roth\t$rT, $rA, $rB", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>; +defm SHLQBY: ShiftLeftQuadBytes; -// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or -// 32-bit register -def ROTHr16_r8: - RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R8C:$rB), - "roth\t$rT, $rA, $rB", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, (i32 (zext R8C:$rB))))]>; +class SHLQBYIInst pattern>: + RI7Form<0b11111111100, OOL, IOL, "shlqbyi\t$rT, $rA, $val", + RotateShift, pattern>; -def : Pat<(rotl R16C:$rA, (i32 (sext R8C:$rB))), - (ROTHr16_r8 R16C:$rA, R8C:$rB)>; +class SHLQBYIVecInst: + SHLQBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val), + [(set (vectype VECREG:$rT), + (SPUshlquad_l_bytes (vectype VECREG:$rA), (i32 uimm7:$val)))]>; -def : Pat<(rotl R16C:$rA, (i32 (zext R8C:$rB))), - (ROTHr16_r8 R16C:$rA, R8C:$rB)>; +multiclass ShiftLeftQuadBytesImm +{ + def v16i8: SHLQBYIVecInst; + def v8i16: SHLQBYIVecInst; + def v4i32: SHLQBYIVecInst; + def v2i64: SHLQBYIVecInst; + def r128: SHLQBYIInst<(outs GPRC:$rT), (ins GPRC:$rA, u7imm_i32:$val), + [(set GPRC:$rT, + (SPUshlquad_l_bytes GPRC:$rA, (i32 uimm7:$val)))]>; +} -def : Pat<(rotl R16C:$rA, (i32 (anyext R8C:$rB))), - (ROTHr16_r8 R16C:$rA, R8C:$rB)>; +defm SHLQBYI : ShiftLeftQuadBytesImm; -def ROTHIv8i16: - RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val), - "rothi\t$rT, $rA, $val", RotateShift, - [(set (v8i16 VECREG:$rT), - (SPUvec_rotl_v8i16 VECREG:$rA, (i8 uimm7:$val)))]>; +// Special form for truncating i64 to i32: +def SHLQBYItrunc64: SHLQBYIInst<(outs R32C:$rT), (ins R64C:$rA, u7imm_i32:$val), + [/* no pattern, see below */]>; -def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)), - (ROTHIv8i16 VECREG:$rA, imm:$val)>; +def : Pat<(trunc R64C:$rSrc), + (SHLQBYItrunc64 R64C:$rSrc, 4)>; -def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i32 uimm7:$val)), +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate halfword: +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +class ROTHInst pattern>: + RRForm<0b00111010000, OOL, IOL, "roth\t$rT, $rA, $rB", + RotateShift, pattern>; + +class ROTHVecInst: + ROTHInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), + (SPUvec_rotl VECREG:$rA, VECREG:$rB))]>; + +class ROTHRegInst: + ROTHInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (rotl rclass:$rA, rclass:$rB))]>; + +multiclass RotateLeftHalfword +{ + def v8i16: ROTHVecInst; + def r16: ROTHRegInst; +} + +defm ROTH: RotateLeftHalfword; + +def ROTHr16_r32: ROTHInst<(outs R16C:$rT), (ins R16C:$rA, R32C:$rB), + [(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate halfword, immediate: +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +class ROTHIInst pattern>: + RI7Form<0b00111110000, OOL, IOL, "rothi\t$rT, $rA, $val", + RotateShift, pattern>; + +class ROTHIVecInst: + ROTHIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), + [(set (vectype VECREG:$rT), + (SPUvec_rotl VECREG:$rA, (i16 uimm7:$val)))]>; + +multiclass RotateLeftHalfwordImm +{ + def v8i16: ROTHIVecInst; + def r16: ROTHIInst<(outs R16C:$rT), (ins R16C:$rA, u7imm:$val), + [(set R16C:$rT, (rotl R16C:$rA, (i16 uimm7:$val)))]>; + def r16_r32: ROTHIInst<(outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val), + [(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>; +} + +defm ROTHI: RotateLeftHalfwordImm; + +def : Pat<(SPUvec_rotl VECREG:$rA, (i32 uimm7:$val)), (ROTHIv8i16 VECREG:$rA, imm:$val)>; -def ROTHIr16: - RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm:$val), - "rothi\t$rT, $rA, $val", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, (i16 uimm7:$val)))]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate word: +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -def ROTHIr16_i32: - RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val), - "rothi\t$rT, $rA, $val", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>; +class ROTInst pattern>: + RRForm<0b00011010000, OOL, IOL, "rot\t$rT, $rA, $rB", + RotateShift, pattern>; -def ROTHIr16_i8: - RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i8:$val), - "rothi\t$rT, $rA, $val", RotateShift, - [(set R16C:$rT, (rotl R16C:$rA, (i8 uimm7:$val)))]>; +class ROTVecInst: + ROTInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [(set (vectype VECREG:$rT), + (SPUvec_rotl (vectype VECREG:$rA), R32C:$rB))]>; -def ROTv4i32: - RRForm<0b00011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), - "rot\t$rT, $rA, $rB", RotateShift, - [(set (v4i32 VECREG:$rT), - (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), R32C:$rB))]>; +class ROTRegInst: + ROTInst<(outs rclass:$rT), (ins rclass:$rA, R32C:$rB), + [(set rclass:$rT, + (rotl rclass:$rA, R32C:$rB))]>; -def ROTr32: - RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "rot\t$rT, $rA, $rB", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, R32C:$rB))]>; +multiclass RotateLeftWord +{ + def v4i32: ROTVecInst; + def r32: ROTRegInst; +} + +defm ROT: RotateLeftWord; // The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or // 32-bit register def ROTr32_r16_anyext: - RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R16C:$rB), - "rot\t$rT, $rA, $rB", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>; + ROTInst<(outs R32C:$rT), (ins R32C:$rA, R16C:$rB), + [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>; def : Pat<(rotl R32C:$rA, (i32 (zext R16C:$rB))), (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>; @@ -2179,9 +2252,8 @@ def : Pat<(rotl R32C:$rA, (i32 (sext R16C:$rB))), (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>; def ROTr32_r8_anyext: - RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R8C:$rB), - "rot\t$rT, $rA, $rB", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>; + ROTInst<(outs R32C:$rT), (ins R32C:$rA, R8C:$rB), + [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>; def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))), (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>; @@ -2189,53 +2261,99 @@ def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))), def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))), (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>; -def ROTIv4i32: - RI7Form<0b00011110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val), - "roti\t$rT, $rA, $val", RotateShift, - [(set (v4i32 VECREG:$rT), - (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)))]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate word, immediate +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)), - (ROTIv4i32 VECREG:$rA, imm:$val)>; +class ROTIInst pattern>: + RI7Form<0b00011110000, OOL, IOL, "roti\t$rT, $rA, $val", + RotateShift, pattern>; -def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)), - (ROTIv4i32 VECREG:$rA, imm:$val)>; +class ROTIVecInst: + ROTIInst<(outs VECREG:$rT), (ins VECREG:$rA, optype:$val), + [(set (vectype VECREG:$rT), + (SPUvec_rotl (vectype VECREG:$rA), (inttype pred:$val)))]>; -def ROTIr32: - RI7Form<0b00011110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val), - "roti\t$rT, $rA, $val", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, (i32 uimm7:$val)))]>; +class ROTIRegInst: + ROTIInst<(outs rclass:$rT), (ins rclass:$rA, optype:$val), + [(set rclass:$rT, (rotl rclass:$rA, (inttype pred:$val)))]>; -def ROTIr32_i16: - RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm:$val), - "roti\t$rT, $rA, $val", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, (i16 uimm7:$val)))]>; +multiclass RotateLeftWordImm +{ + def v4i32: ROTIVecInst; + def v4i32_i16: ROTIVecInst; + def v4i32_i8: ROTIVecInst; -def ROTIr32_i8: - RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i8:$val), - "roti\t$rT, $rA, $val", RotateShift, - [(set R32C:$rT, (rotl R32C:$rA, (i8 uimm7:$val)))]>; + def r32: ROTIRegInst; + def r32_i16: ROTIRegInst; + def r32_i8: ROTIRegInst; +} -// ROTQBY* vector forms: This rotates the entire vector, but vector registers -// are used here for type checking (instances where ROTQBI is used actually -// use vector registers) -def ROTQBYvec: - RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), - "rotqby\t$rT, $rA, $rB", RotateShift, - [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R32C:$rB))]>; +defm ROTI : RotateLeftWordImm; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate quad by byte (count) +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class ROTQBYInst pattern>: + RRForm<0b00111011100, OOL, IOL, "rotqby\t$rT, $rA, $rB", + RotateShift, pattern>; + +class ROTQBYVecInst: + ROTQBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [(set (vectype VECREG:$rT), + (SPUrotbytes_left (vectype VECREG:$rA), R32C:$rB))]>; + +multiclass RotateQuadLeftByBytes +{ + def v16i8: ROTQBYVecInst; + def v8i16: ROTQBYVecInst; + def v4i32: ROTQBYVecInst; + def v2i64: ROTQBYVecInst; +} + +defm ROTQBY: RotateQuadLeftByBytes; def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R32C:$rB), - (ROTQBYvec VECREG:$rA, R32C:$rB)>; + (ROTQBYv16i8 VECREG:$rA, R32C:$rB)>; +def : Pat<(SPUrotbytes_left_chained (v8i16 VECREG:$rA), R32C:$rB), + (ROTQBYv8i16 VECREG:$rA, R32C:$rB)>; +def : Pat<(SPUrotbytes_left_chained (v4i32 VECREG:$rA), R32C:$rB), + (ROTQBYv4i32 VECREG:$rA, R32C:$rB)>; +def : Pat<(SPUrotbytes_left_chained (v2i64 VECREG:$rA), R32C:$rB), + (ROTQBYv2i64 VECREG:$rA, R32C:$rB)>; -// See ROTQBY note above. -def ROTQBYIvec: - RI7Form<0b00111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), - "rotqbyi\t$rT, $rA, $val", RotateShift, - [(set (v16i8 VECREG:$rT), - (SPUrotbytes_left (v16i8 VECREG:$rA), (i16 uimm7:$val)))]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate quad by byte (count), immediate +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class ROTQBYIInst pattern>: + RI7Form<0b00111111100, OOL, IOL, "rotqbyi\t$rT, $rA, $val", + RotateShift, pattern>; + +class ROTQBYIVecInst: + ROTQBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val), + [(set (vectype VECREG:$rT), + (SPUrotbytes_left (vectype VECREG:$rA), (i16 uimm7:$val)))]>; + +multiclass RotateQuadByBytesImm +{ + def v16i8: ROTQBYIVecInst; + def v8i16: ROTQBYIVecInst; + def v4i32: ROTQBYIVecInst; + def v2i64: ROTQBYIVecInst; +} + +defm ROTQBYI: RotateQuadByBytesImm; def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), (i16 uimm7:$val)), - (ROTQBYIvec VECREG:$rA, uimm7:$val)>; + (ROTQBYIv16i8 VECREG:$rA, uimm7:$val)>; +def : Pat<(SPUrotbytes_left_chained (v8i16 VECREG:$rA), (i16 uimm7:$val)), + (ROTQBYIv8i16 VECREG:$rA, uimm7:$val)>; +def : Pat<(SPUrotbytes_left_chained (v4i32 VECREG:$rA), (i16 uimm7:$val)), + (ROTQBYIv4i32 VECREG:$rA, uimm7:$val)>; +def : Pat<(SPUrotbytes_left_chained (v2i64 VECREG:$rA), (i16 uimm7:$val)), + (ROTQBYIv2i64 VECREG:$rA, uimm7:$val)>; // See ROTQBY note above. def ROTQBYBIvec: @@ -2243,49 +2361,99 @@ def ROTQBYBIvec: "rotqbybi\t$rT, $rA, $val", RotateShift, [/* intrinsic */]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // See ROTQBY note above. // // Assume that the user of this instruction knows to shift the rotate count // into bit 29 -def ROTQBIvec: - RRForm<0b00011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "rotqbi\t$rT, $rA, $rB", RotateShift, - [/* insert intrinsic here */]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -// See ROTQBY note above. -def ROTQBIIvec: - RI7Form<0b00011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val), - "rotqbii\t$rT, $rA, $val", RotateShift, - [/* insert intrinsic here */]>; +class ROTQBIInst pattern>: + RRForm<0b00011011100, OOL, IOL, "rotqbi\t$rT, $rA, $rB", + RotateShift, pattern>; +class ROTQBIVecInst: + ROTQBIInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [/* no pattern yet */]>; + +class ROTQBIRegInst: + ROTQBIInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [/* no pattern yet */]>; + +multiclass RotateQuadByBitCount +{ + def v16i8: ROTQBIVecInst; + def v8i16: ROTQBIVecInst; + def v4i32: ROTQBIVecInst; + def v2i64: ROTQBIVecInst; + + def r128: ROTQBIRegInst; + def r64: ROTQBIRegInst; +} + +defm ROTQBI: RotateQuadByBitCount; + +class ROTQBIIInst pattern>: + RI7Form<0b00011111100, OOL, IOL, "rotqbii\t$rT, $rA, $val", + RotateShift, pattern>; + +class ROTQBIIVecInst: + ROTQBIIInst<(outs VECREG:$rT), (ins VECREG:$rA, optype:$val), + [/* no pattern yet */]>; + +class ROTQBIIRegInst: + ROTQBIIInst<(outs rclass:$rT), (ins rclass:$rA, optype:$val), + [/* no pattern yet */]>; + +multiclass RotateQuadByBitCountImm +{ + def v16i8: ROTQBIIVecInst; + def v8i16: ROTQBIIVecInst; + def v4i32: ROTQBIIVecInst; + def v2i64: ROTQBIIVecInst; + + def r128: ROTQBIIRegInst; + def r64: ROTQBIIRegInst; +} + +defm ROTQBII : RotateQuadByBitCountImm; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // ROTHM v8i16 form: // NOTE(1): No vector rotate is generated by the C/C++ frontend (today), // so this only matches a synthetically generated/lowered code // fragment. // NOTE(2): $rB must be negated before the right rotate! -def ROTHMv8i16: - RRForm<0b10111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), - "rothm\t$rT, $rA, $rB", RotateShift, - [/* see patterns below - $rB must be negated */]>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R32C:$rB), +class ROTHMInst pattern>: + RRForm<0b10111010000, OOL, IOL, "rothm\t$rT, $rA, $rB", + RotateShift, pattern>; + +def ROTHMv8i16: + ROTHMInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [/* see patterns below - $rB must be negated */]>; + +def : Pat<(SPUvec_srl (v8i16 VECREG:$rA), R32C:$rB), (ROTHMv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; -def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R16C:$rB), +def : Pat<(SPUvec_srl (v8i16 VECREG:$rA), R16C:$rB), (ROTHMv8i16 VECREG:$rA, (SFIr32 (XSHWr16 R16C:$rB), 0))>; -def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R8C:$rB), +def : Pat<(SPUvec_srl (v8i16 VECREG:$rA), R8C:$rB), (ROTHMv8i16 VECREG:$rA, (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>; // ROTHM r16 form: Rotate 16-bit quantity to right, zero fill at the left // Note: This instruction doesn't match a pattern because rB must be negated // for the instruction to work. Thus, the pattern below the instruction! + def ROTHMr16: - RRForm<0b10111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB), - "rothm\t$rT, $rA, $rB", RotateShift, - [/* see patterns below - $rB must be negated! */]>; + ROTHMInst<(outs R16C:$rT), (ins R16C:$rA, R32C:$rB), + [/* see patterns below - $rB must be negated! */]>; def : Pat<(srl R16C:$rA, R32C:$rB), (ROTHMr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>; @@ -2301,22 +2469,30 @@ def : Pat<(srl R16C:$rA, R8C:$rB), // ROTHMI v8i16 form: See the comment for ROTHM v8i16. The difference here is // that the immediate can be complemented, so that the user doesn't have to // worry about it. -def ROTHMIv8i16: - RI7Form<0b10111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val), - "rothmi\t$rT, $rA, $val", RotateShift, - [(set (v8i16 VECREG:$rT), - (SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i32 imm:$val)))]>; -def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)), +class ROTHMIInst pattern>: + RI7Form<0b10111110000, OOL, IOL, "rothmi\t$rT, $rA, $val", + RotateShift, pattern>; + +def ROTHMIv8i16: + ROTHMIInst<(outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val), + [/* no pattern */]>; + +def : Pat<(SPUvec_srl (v8i16 VECREG:$rA), (i32 imm:$val)), + (ROTHMIv8i16 VECREG:$rA, imm:$val)>; + +def: Pat<(SPUvec_srl (v8i16 VECREG:$rA), (i16 imm:$val)), (ROTHMIv8i16 VECREG:$rA, imm:$val)>; -def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)), +def: Pat<(SPUvec_srl (v8i16 VECREG:$rA), (i8 imm:$val)), (ROTHMIv8i16 VECREG:$rA, imm:$val)>; def ROTHMIr16: - RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val), - "rothmi\t$rT, $rA, $val", RotateShift, - [(set R16C:$rT, (srl R16C:$rA, (i32 uimm7:$val)))]>; + ROTHMIInst<(outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val), + [/* no pattern */]>; + +def: Pat<(srl R16C:$rA, (i32 uimm7:$val)), + (ROTHMIr16 R16C:$rA, uimm7:$val)>; def: Pat<(srl R16C:$rA, (i16 uimm7:$val)), (ROTHMIr16 R16C:$rA, uimm7:$val)>; @@ -2325,26 +2501,28 @@ def: Pat<(srl R16C:$rA, (i8 uimm7:$val)), (ROTHMIr16 R16C:$rA, uimm7:$val)>; // ROTM v4i32 form: See the ROTHM v8i16 comments. -def ROTMv4i32: - RRForm<0b10011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), - "rotm\t$rT, $rA, $rB", RotateShift, - [/* see patterns below - $rB must be negated */]>; +class ROTMInst pattern>: + RRForm<0b10011010000, OOL, IOL, "rotm\t$rT, $rA, $rB", + RotateShift, pattern>; -def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R32C:$rB), +def ROTMv4i32: + ROTMInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [/* see patterns below - $rB must be negated */]>; + +def : Pat<(SPUvec_srl VECREG:$rA, R32C:$rB), (ROTMv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; -def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R16C:$rB), +def : Pat<(SPUvec_srl VECREG:$rA, R16C:$rB), (ROTMv4i32 VECREG:$rA, (SFIr32 (XSHWr16 R16C:$rB), 0))>; -def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, /* R8C */ R16C:$rB), +def : Pat<(SPUvec_srl VECREG:$rA, R8C:$rB), (ROTMv4i32 VECREG:$rA, - (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /*)*/, 0))>; + (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>; def ROTMr32: - RRForm<0b10011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "rotm\t$rT, $rA, $rB", RotateShift, - [/* see patterns below - $rB must be negated */]>; + ROTMInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [/* see patterns below - $rB must be negated */]>; def : Pat<(srl R32C:$rA, R32C:$rB), (ROTMr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>; @@ -2362,12 +2540,12 @@ def ROTMIv4i32: RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), "rotmi\t$rT, $rA, $val", RotateShift, [(set (v4i32 VECREG:$rT), - (SPUvec_srl_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>; + (SPUvec_srl VECREG:$rA, (i32 uimm7:$val)))]>; -def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i16 uimm7:$val)), +def : Pat<(SPUvec_srl VECREG:$rA, (i16 uimm7:$val)), (ROTMIv4i32 VECREG:$rA, uimm7:$val)>; -def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i8 uimm7:$val)), +def : Pat<(SPUvec_srl VECREG:$rA, (i8 uimm7:$val)), (ROTMIv4i32 VECREG:$rA, uimm7:$val)>; // ROTMI r32 form: know how to complement the immediate value. @@ -2382,52 +2560,194 @@ def : Pat<(srl R32C:$rA, (i16 imm:$val)), def : Pat<(srl R32C:$rA, (i8 imm:$val)), (ROTMIr32 R32C:$rA, uimm7:$val)>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // ROTQMBYvec: This is a vector form merely so that when used in an // instruction pattern, type checking will succeed. This instruction assumes -// that the user knew to complement $rB. -def ROTQMBYvec: - RRForm<0b10111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), - "rotqmby\t$rT, $rA, $rB", RotateShift, - [(set (v16i8 VECREG:$rT), - (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), R32C:$rB))]>; +// that the user knew to negate $rB. +// +// Using the SPUrotquad_rz_bytes target-specific DAG node, the patterns +// ensure that $rB is negated. +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -def ROTQMBYIvec: - RI7Form<0b10111111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), - "rotqmbyi\t$rT, $rA, $val", RotateShift, - [(set (v16i8 VECREG:$rT), - (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), (i32 uimm7:$val)))]>; +class ROTQMBYInst pattern>: + RRForm<0b10111011100, OOL, IOL, "rotqmby\t$rT, $rA, $rB", + RotateShift, pattern>; -def : Pat<(SPUrotbytes_right_zfill VECREG:$rA, (i16 uimm7:$val)), - (ROTQMBYIvec VECREG:$rA, uimm7:$val)>; +class ROTQMBYVecInst: + ROTQMBYInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [/* no pattern, $rB must be negated */]>; -def ROTQMBYBIvec: - RRForm<0b10110011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "rotqmbybi\t$rT, $rA, $rB", RotateShift, - [/* intrinsic */]>; +class ROTQMBYRegInst: + ROTQMBYInst<(outs rclass:$rT), (ins rclass:$rA, R32C:$rB), + [(set rclass:$rT, + (SPUrotquad_rz_bytes rclass:$rA, R32C:$rB))]>; -def ROTQMBIvec: - RRForm<0b10011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "rotqmbi\t$rT, $rA, $rB", RotateShift, - [/* intrinsic */]>; +multiclass RotateQuadBytes +{ + def v16i8: ROTQMBYVecInst; + def v8i16: ROTQMBYVecInst; + def v4i32: ROTQMBYVecInst; + def v2i64: ROTQMBYVecInst; -def ROTQMBIIvec: - RI7Form<0b10011111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), - "rotqmbii\t$rT, $rA, $val", RotateShift, - [/* intrinsic */]>; + def r128: ROTQMBYRegInst; + def r64: ROTQMBYRegInst; +} + +defm ROTQMBY : RotateQuadBytes; + +def : Pat<(SPUrotquad_rz_bytes (v16i8 VECREG:$rA), R32C:$rB), + (ROTQMBYv16i8 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bytes (v8i16 VECREG:$rA), R32C:$rB), + (ROTQMBYv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bytes (v4i32 VECREG:$rA), R32C:$rB), + (ROTQMBYv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bytes (v2i64 VECREG:$rA), R32C:$rB), + (ROTQMBYv2i64 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bytes GPRC:$rA, R32C:$rB), + (ROTQMBYr128 GPRC:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bytes R64C:$rA, R32C:$rB), + (ROTQMBYr64 R64C:$rA, (SFIr32 R32C:$rB, 0))>; + +class ROTQMBYIInst pattern>: + RI7Form<0b10111111100, OOL, IOL, "rotqmbyi\t$rT, $rA, $val", + RotateShift, pattern>; + +class ROTQMBYIVecInst: + ROTQMBYIInst<(outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), + [(set (vectype VECREG:$rT), + (SPUrotquad_rz_bytes (vectype VECREG:$rA), (i32 uimm7:$val)))]>; + +class ROTQMBYIRegInst: + ROTQMBYIInst<(outs rclass:$rT), (ins rclass:$rA, optype:$val), + [(set rclass:$rT, + (SPUrotquad_rz_bytes rclass:$rA, (inttype pred:$val)))]>; + +multiclass RotateQuadBytesImm +{ + def v16i8: ROTQMBYIVecInst; + def v8i16: ROTQMBYIVecInst; + def v4i32: ROTQMBYIVecInst; + def v2i64: ROTQMBYIVecInst; + + def r128: ROTQMBYIRegInst; + def r64: ROTQMBYIRegInst; +} + +defm ROTQMBYI : RotateQuadBytesImm; + + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate right and mask by bit count +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class ROTQMBYBIInst pattern>: + RRForm<0b10110011100, OOL, IOL, "rotqmbybi\t$rT, $rA, $rB", + RotateShift, pattern>; + +class ROTQMBYBIVecInst: + ROTQMBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [/* no pattern, intrinsic? */]>; + +multiclass RotateMaskQuadByBitCount +{ + def v16i8: ROTQMBYBIVecInst; + def v8i16: ROTQMBYBIVecInst; + def v4i32: ROTQMBYBIVecInst; + def v2i64: ROTQMBYBIVecInst; +} + +defm ROTQMBYBI: RotateMaskQuadByBitCount; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate quad and mask by bits +// Note that the rotate amount has to be negated +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class ROTQMBIInst pattern>: + RRForm<0b10011011100, OOL, IOL, "rotqmbi\t$rT, $rA, $rB", + RotateShift, pattern>; + +class ROTQMBIVecInst: + ROTQMBIInst<(outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), + [/* no pattern */]>; + +class ROTQMBIRegInst: + ROTQMBIInst<(outs rclass:$rT), (ins rclass:$rA, R32C:$rB), + [/* no pattern */]>; + +multiclass RotateMaskQuadByBits +{ + def v16i8: ROTQMBIVecInst; + def v8i16: ROTQMBIVecInst; + def v4i32: ROTQMBIVecInst; + def v2i64: ROTQMBIVecInst; + + def r128: ROTQMBIRegInst; + def r64: ROTQMBIRegInst; +} + +defm ROTQMBI: RotateMaskQuadByBits; + +def : Pat<(SPUrotquad_rz_bits (v16i8 VECREG:$rA), R32C:$rB), + (ROTQMBIv16i8 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bits (v8i16 VECREG:$rA), R32C:$rB), + (ROTQMBIv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bits (v4i32 VECREG:$rA), R32C:$rB), + (ROTQMBIv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bits (v2i64 VECREG:$rA), R32C:$rB), + (ROTQMBIv2i64 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bits GPRC:$rA, R32C:$rB), + (ROTQMBIr128 GPRC:$rA, (SFIr32 R32C:$rB, 0))>; +def : Pat<(SPUrotquad_rz_bits R64C:$rA, R32C:$rB), + (ROTQMBIr64 R64C:$rA, (SFIr32 R32C:$rB, 0))>; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// Rotate quad and mask by bits, immediate +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class ROTQMBIIInst pattern>: + RI7Form<0b10011111100, OOL, IOL, "rotqmbii\t$rT, $rA, $val", + RotateShift, pattern>; + +class ROTQMBIIVecInst: + ROTQMBIIInst<(outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), + [(set (vectype VECREG:$rT), + (SPUrotquad_rz_bits (vectype VECREG:$rA), (i32 uimm7:$val)))]>; + +class ROTQMBIIRegInst: + ROTQMBIIInst<(outs rclass:$rT), (ins rclass:$rA, rotNeg7imm:$val), + [(set rclass:$rT, + (SPUrotquad_rz_bits rclass:$rA, (i32 uimm7:$val)))]>; + +multiclass RotateMaskQuadByBitsImm +{ + def v16i8: ROTQMBIIVecInst; + def v8i16: ROTQMBIIVecInst; + def v4i32: ROTQMBIIVecInst; + def v2i64: ROTQMBIIVecInst; + + def r128: ROTQMBIIRegInst; + def r64: ROTQMBIIRegInst; +} + +defm ROTQMBII: RotateMaskQuadByBitsImm; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ def ROTMAHv8i16: RRForm<0b01111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB), "rotmah\t$rT, $rA, $rB", RotateShift, [/* see patterns below - $rB must be negated */]>; -def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R32C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R32C:$rB), (ROTMAHv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>; -def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R16C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R16C:$rB), (ROTMAHv8i16 VECREG:$rA, (SFIr32 (XSHWr16 R16C:$rB), 0))>; -def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R8C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R8C:$rB), (ROTMAHv8i16 VECREG:$rA, (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>; @@ -2451,12 +2771,12 @@ def ROTMAHIv8i16: RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val), "rotmahi\t$rT, $rA, $val", RotateShift, [(set (v8i16 VECREG:$rT), - (SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>; + (SPUvec_sra (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>; -def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)), +def : Pat<(SPUvec_sra (v8i16 VECREG:$rA), (i16 uimm7:$val)), (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>; -def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)), +def : Pat<(SPUvec_sra (v8i16 VECREG:$rA), (i8 uimm7:$val)), (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>; def ROTMAHIr16: @@ -2475,14 +2795,14 @@ def ROTMAv4i32: "rotma\t$rT, $rA, $rB", RotateShift, [/* see patterns below - $rB must be negated */]>; -def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R32C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R32C:$rB), (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 R32C:$rB, 0))>; -def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R16C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R16C:$rB), (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 (XSHWr16 R16C:$rB), 0))>; -def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R8C:$rB), +def : Pat<(SPUvec_sra VECREG:$rA, R8C:$rB), (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>; @@ -2506,9 +2826,9 @@ def ROTMAIv4i32: RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val), "rotmai\t$rT, $rA, $val", RotateShift, [(set (v4i32 VECREG:$rT), - (SPUvec_sra_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>; + (SPUvec_sra VECREG:$rA, (i32 uimm7:$val)))]>; -def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, (i16 uimm7:$val)), +def : Pat<(SPUvec_sra VECREG:$rA, (i16 uimm7:$val)), (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>; def ROTMAIr32: @@ -2561,66 +2881,366 @@ let isTerminator = 1, isBarrier = 1 in { [/* no pattern to match */]>; } +//------------------------------------------------------------------------ // Comparison operators: -def CEQBr8: - RRForm<0b00001011110, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "ceqb\t$rT, $rA, $rB", ByteOp, - [/* no pattern to match */]>; +//------------------------------------------------------------------------ -def CEQBv16i8: - RRForm<0b00001011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "ceqb\t$rT, $rA, $rB", ByteOp, - [/* no pattern to match: intrinsic */]>; +class CEQBInst pattern> : + RRForm<0b00001011110, OOL, IOL, "ceqb\t$rT, $rA, $rB", + ByteOp, pattern>; -def CEQBIr8: - RI10Form<0b01111110, (outs R8C:$rT), (ins R8C:$rA, s7imm_i8:$val), - "ceqbi\t$rT, $rA, $val", ByteOp, - [/* no pattern to match: intrinsic */]>; +multiclass CmpEqualByte +{ + def v16i8 : + CEQBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v16i8 VECREG:$rT), (seteq (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; -def CEQBIv16i8: - RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm_i8:$val), - "ceqbi\t$rT, $rA, $val", ByteOp, - [/* no pattern to match: intrinsic */]>; + def r8 : + CEQBInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB), + [(set R8C:$rT, (seteq R8C:$rA, R8C:$rB))]>; +} -def CEQHr16: - RRForm<0b00010011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "ceqh\t$rT, $rA, $rB", ByteOp, - [/* no pattern to match */]>; +class CEQBIInst pattern> : + RI10Form<0b01111110, OOL, IOL, "ceqbi\t$rT, $rA, $val", + ByteOp, pattern>; -def CEQHv8i16: - RRForm<0b00010011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "ceqh\t$rT, $rA, $rB", ByteOp, - [/* no pattern to match: intrinsic */]>; +multiclass CmpEqualByteImm +{ + def v16i8 : + CEQBIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm_i8:$val), + [(set (v16i8 VECREG:$rT), (seteq (v16i8 VECREG:$rA), + v16i8SExt8Imm:$val))]>; + def r8: + CEQBIInst<(outs R8C:$rT), (ins R8C:$rA, s10imm_i8:$val), + [(set R8C:$rT, (seteq R8C:$rA, immSExt8:$val))]>; +} -def CEQHIr16: - RI10Form<0b10111110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val), - "ceqhi\t$rT, $rA, $val", ByteOp, - [/* no pattern to match: intrinsic */]>; +class CEQHInst pattern> : + RRForm<0b00010011110, OOL, IOL, "ceqh\t$rT, $rA, $rB", + ByteOp, pattern>; -def CEQHIv8i16: - RI10Form<0b10111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), - "ceqhi\t$rT, $rA, $val", ByteOp, - [/* no pattern to match: intrinsic */]>; +multiclass CmpEqualHalfword +{ + def v8i16 : CEQHInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v8i16 VECREG:$rT), (seteq (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; -def CEQr32: - RRForm<0b00000011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "ceq\t$rT, $rA, $rB", ByteOp, - [(set R32C:$rT, (seteq R32C:$rA, R32C:$rB))]>; + def r16 : CEQHInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB), + [(set R16C:$rT, (seteq R16C:$rA, R16C:$rB))]>; +} -def CEQv4i32: - RRForm<0b00000011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "ceq\t$rT, $rA, $rB", ByteOp, - [(set (v4i32 VECREG:$rT), (seteq (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; +class CEQHIInst pattern> : + RI10Form<0b10111110, OOL, IOL, "ceqhi\t$rT, $rA, $val", + ByteOp, pattern>; -def CEQIr32: - RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), - "ceqi\t$rT, $rA, $val", ByteOp, - [(set R32C:$rT, (seteq R32C:$rA, i32ImmSExt10:$val))]>; +multiclass CmpEqualHalfwordImm +{ + def v8i16 : CEQHIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v8i16 VECREG:$rT), + (seteq (v8i16 VECREG:$rA), + (v8i16 v8i16SExt10Imm:$val)))]>; + def r16 : CEQHIInst<(outs R16C:$rT), (ins R16C:$rA, s10imm:$val), + [(set R16C:$rT, (seteq R16C:$rA, i16ImmSExt10:$val))]>; +} -def CEQIv4i32: - RI10Form<0b00111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), - "ceqi\t$rT, $rA, $val", ByteOp, - [/* no pattern to match: intrinsic */]>; +class CEQInst pattern> : + RRForm<0b00000011110, OOL, IOL, "ceq\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpEqualWord +{ + def v4i32 : CEQInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v4i32 VECREG:$rT), + (seteq (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; + + def r32 : CEQInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [(set R32C:$rT, (seteq R32C:$rA, R32C:$rB))]>; +} + +class CEQIInst pattern> : + RI10Form<0b00111110, OOL, IOL, "ceqi\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpEqualWordImm +{ + def v4i32 : CEQIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v4i32 VECREG:$rT), + (seteq (v4i32 VECREG:$rA), + (v4i32 v4i32SExt16Imm:$val)))]>; + + def r32: CEQIInst<(outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (seteq R32C:$rA, i32ImmSExt10:$val))]>; +} + +class CGTBInst pattern> : + RRForm<0b00001010010, OOL, IOL, "cgtb\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpGtrByte +{ + def v16i8 : + CGTBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v16i8 VECREG:$rT), (setgt (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; + + def r8 : + CGTBInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB), + [(set R8C:$rT, (setgt R8C:$rA, R8C:$rB))]>; +} + +class CGTBIInst pattern> : + RI10Form<0b01110010, OOL, IOL, "cgtbi\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpGtrByteImm +{ + def v16i8 : + CGTBIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm_i8:$val), + [(set (v16i8 VECREG:$rT), (setgt (v16i8 VECREG:$rA), + v16i8SExt8Imm:$val))]>; + def r8: + CGTBIInst<(outs R8C:$rT), (ins R8C:$rA, s10imm_i8:$val), + [(set R8C:$rT, (setgt R8C:$rA, immSExt8:$val))]>; +} + +class CGTHInst pattern> : + RRForm<0b00010010010, OOL, IOL, "cgth\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpGtrHalfword +{ + def v8i16 : CGTHInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v8i16 VECREG:$rT), (setgt (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; + + def r16 : CGTHInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB), + [(set R16C:$rT, (setgt R16C:$rA, R16C:$rB))]>; +} + +class CGTHIInst pattern> : + RI10Form<0b10110010, OOL, IOL, "cgthi\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpGtrHalfwordImm +{ + def v8i16 : CGTHIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v8i16 VECREG:$rT), + (setgt (v8i16 VECREG:$rA), + (v8i16 v8i16SExt10Imm:$val)))]>; + def r16 : CGTHIInst<(outs R16C:$rT), (ins R16C:$rA, s10imm:$val), + [(set R16C:$rT, (setgt R16C:$rA, i16ImmSExt10:$val))]>; +} + +class CGTInst pattern> : + RRForm<0b00000010010, OOL, IOL, "cgt\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpGtrWord +{ + def v4i32 : CGTInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v4i32 VECREG:$rT), + (setgt (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; + + def r32 : CGTInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [(set R32C:$rT, (setgt R32C:$rA, R32C:$rB))]>; +} + +class CGTIInst pattern> : + RI10Form<0b00110010, OOL, IOL, "cgti\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpGtrWordImm +{ + def v4i32 : CGTIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v4i32 VECREG:$rT), + (setgt (v4i32 VECREG:$rA), + (v4i32 v4i32SExt16Imm:$val)))]>; + + def r32: CGTIInst<(outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (setgt R32C:$rA, i32ImmSExt10:$val))]>; +} + +class CLGTBInst pattern> : + RRForm<0b00001011010, OOL, IOL, "cgtb\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpLGtrByte +{ + def v16i8 : + CLGTBInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v16i8 VECREG:$rT), (setugt (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; + + def r8 : + CLGTBInst<(outs R8C:$rT), (ins R8C:$rA, R8C:$rB), + [(set R8C:$rT, (setugt R8C:$rA, R8C:$rB))]>; +} + +class CLGTBIInst pattern> : + RI10Form<0b01111010, OOL, IOL, "cgtbi\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpLGtrByteImm +{ + def v16i8 : + CLGTBIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm_i8:$val), + [(set (v16i8 VECREG:$rT), (setugt (v16i8 VECREG:$rA), + v16i8SExt8Imm:$val))]>; + def r8: + CLGTBIInst<(outs R8C:$rT), (ins R8C:$rA, s10imm_i8:$val), + [(set R8C:$rT, (setugt R8C:$rA, immSExt8:$val))]>; +} + +class CLGTHInst pattern> : + RRForm<0b00010011010, OOL, IOL, "cgth\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpLGtrHalfword +{ + def v8i16 : CLGTHInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v8i16 VECREG:$rT), (setugt (v8i16 VECREG:$rA), + (v8i16 VECREG:$rB)))]>; + + def r16 : CLGTHInst<(outs R16C:$rT), (ins R16C:$rA, R16C:$rB), + [(set R16C:$rT, (setugt R16C:$rA, R16C:$rB))]>; +} + +class CLGTHIInst pattern> : + RI10Form<0b10111010, OOL, IOL, "cgthi\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpLGtrHalfwordImm +{ + def v8i16 : CLGTHIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v8i16 VECREG:$rT), + (setugt (v8i16 VECREG:$rA), + (v8i16 v8i16SExt10Imm:$val)))]>; + def r16 : CLGTHIInst<(outs R16C:$rT), (ins R16C:$rA, s10imm:$val), + [(set R16C:$rT, (setugt R16C:$rA, i16ImmSExt10:$val))]>; +} + +class CLGTInst pattern> : + RRForm<0b00000011010, OOL, IOL, "cgt\t$rT, $rA, $rB", + ByteOp, pattern>; + +multiclass CmpLGtrWord +{ + def v4i32 : CLGTInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (v4i32 VECREG:$rT), + (setugt (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>; + + def r32 : CLGTInst<(outs R32C:$rT), (ins R32C:$rA, R32C:$rB), + [(set R32C:$rT, (setugt R32C:$rA, R32C:$rB))]>; +} + +class CLGTIInst pattern> : + RI10Form<0b00111010, OOL, IOL, "cgti\t$rT, $rA, $val", + ByteOp, pattern>; + +multiclass CmpLGtrWordImm +{ + def v4i32 : CLGTIInst<(outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val), + [(set (v4i32 VECREG:$rT), + (setugt (v4i32 VECREG:$rA), + (v4i32 v4i32SExt16Imm:$val)))]>; + + def r32: CLGTIInst<(outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val), + [(set R32C:$rT, (setugt R32C:$rA, i32ImmSExt10:$val))]>; +} + +defm CEQB : CmpEqualByte; +defm CEQBI : CmpEqualByteImm; +defm CEQH : CmpEqualHalfword; +defm CEQHI : CmpEqualHalfwordImm; +defm CEQ : CmpEqualWord; +defm CEQI : CmpEqualWordImm; +defm CGTB : CmpGtrByte; +defm CGTBI : CmpGtrByteImm; +defm CGTH : CmpGtrHalfword; +defm CGTHI : CmpGtrHalfwordImm; +defm CGT : CmpGtrWord; +defm CGTI : CmpGtrWordImm; +defm CLGTB : CmpLGtrByte; +defm CLGTBI : CmpLGtrByteImm; +defm CLGTH : CmpLGtrHalfword; +defm CLGTHI : CmpLGtrHalfwordImm; +defm CLGT : CmpLGtrWord; +defm CLGTI : CmpLGtrWordImm; + +// For SETCC primitives not supported above (setlt, setle, setge, etc.) +// define a pattern to generate the right code, as a binary operator +// (in a manner of speaking.) + +class SETCCNegCond: + Pat<(cond rclass:$rA, rclass:$rB), pattern>; + +class SETCCBinOpReg: + Pat<(cond rclass:$rA, rclass:$rB), + (binop (cmpOp1 rclass:$rA, rclass:$rB), + (cmpOp2 rclass:$rA, rclass:$rB))>; + +class SETCCBinOpImm: + Pat<(cond rclass:$rA, (immtype immpred:$imm)), + (binop (cmpOp1 rclass:$rA, (immtype immpred:$imm)), + (cmpOp2 rclass:$rA, (immtype immpred:$imm)))>; + +def CGTEQBr8: SETCCBinOpReg; +def CGTEQBIr8: SETCCBinOpImm; +def CLTBr8: SETCCBinOpReg; +def CLTBIr8: SETCCBinOpImm; +def CLTEQr8: Pat<(setle R8C:$rA, R8C:$rB), + (XORBIr8 (CGTBIr8 R8C:$rA, R8C:$rB), 0xff)>; +def CLTEQIr8: Pat<(setle R8C:$rA, immU8:$imm), + (XORBIr8 (CGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; + +def CGTEQHr16: SETCCBinOpReg; +def CGTEQHIr16: SETCCBinOpImm; +def CLTEQr16: Pat<(setle R16C:$rA, R16C:$rB), + (XORHIr16 (CGTHIr16 R16C:$rA, R16C:$rB), 0xffff)>; +def CLTEQIr16: Pat<(setle R16C:$rA, i16ImmUns10:$imm), + (XORHIr16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; + + +def CGTEQHr32: SETCCBinOpReg; +def CGTEQHIr32: SETCCBinOpImm; +def CLTEQr32: Pat<(setle R32C:$rA, R32C:$rB), + (XORIr32 (CGTIr32 R32C:$rA, R32C:$rB), 0xffffffff)>; +def CLTEQIr32: Pat<(setle R32C:$rA, i32ImmUns10:$imm), + (XORIr32 (CGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; + +def CLGTEQBr8: SETCCBinOpReg; +def CLGTEQBIr8: SETCCBinOpImm; +def CLLTBr8: SETCCBinOpReg; +def CLLTBIr8: SETCCBinOpImm; +def CLLTEQr8: Pat<(setule R8C:$rA, R8C:$rB), + (XORBIr8 (CLGTBIr8 R8C:$rA, R8C:$rB), 0xff)>; +def CLLTEQIr8: Pat<(setule R8C:$rA, immU8:$imm), + (XORBIr8 (CLGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; + +def CLGTEQHr16: SETCCBinOpReg; +def CLGTEQHIr16: SETCCBinOpImm; +def CLLTEQr16: Pat<(setule R16C:$rA, R16C:$rB), + (XORHIr16 (CLGTHIr16 R16C:$rA, R16C:$rB), 0xffff)>; +def CLLTEQIr16: Pat<(setule R16C:$rA, i16ImmUns10:$imm), + (XORHIr16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; + + +def CLGTEQHr32: SETCCBinOpReg; +def CLGTEQHIr32: SETCCBinOpImm; +def CLLTEQr32: Pat<(setule R32C:$rA, R32C:$rB), + (XORIr32 (CLGTIr32 R32C:$rA, R32C:$rB), 0xffffffff)>; +def CLLTEQIr32: Pat<(setule R32C:$rA, i32ImmUns10:$imm), + (XORIr32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; + +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ let isCall = 1, // All calls clobber the non-callee-saved registers: @@ -2720,23 +3340,121 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { def : Pat<(brcond (i16 (seteq R16C:$rA, 0)), bb:$dest), (BRHZ R16C:$rA, bb:$dest)>; -def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest), - (BRZ R32C:$rA, bb:$dest)>; - def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest), (BRHNZ R16C:$rA, bb:$dest)>; + +def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest), + (BRZ R32C:$rA, bb:$dest)>; def : Pat<(brcond (i32 (setne R32C:$rA, 0)), bb:$dest), (BRNZ R32C:$rA, bb:$dest)>; -def : Pat<(brcond (i16 (setne R16C:$rA, i16ImmSExt10:$val)), bb:$dest), - (BRHNZ (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>; -def : Pat<(brcond (i32 (setne R32C:$rA, i32ImmSExt10:$val)), bb:$dest), - (BRNZ (CEQIr32 R32C:$rA, i32ImmSExt10:$val), bb:$dest)>; +multiclass BranchCondEQ +{ + def r16imm: Pat<(brcond (i16 (cond R16C:$rA, i16ImmSExt10:$val)), bb:$dest), + (brinst16 (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>; -def : Pat<(brcond (i16 (setne R16C:$rA, R16C:$rB)), bb:$dest), - (BRHNZ (CEQHr16 R16C:$rA, R16:$rB), bb:$dest)>; -def : Pat<(brcond (i32 (setne R32C:$rA, R32C:$rB)), bb:$dest), - (BRNZ (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>; + def r16 : Pat<(brcond (i16 (cond R16C:$rA, R16C:$rB)), bb:$dest), + (brinst16 (CEQHr16 R16C:$rA, R16:$rB), bb:$dest)>; + + def r32imm : Pat<(brcond (i32 (cond R32C:$rA, i32ImmSExt10:$val)), bb:$dest), + (brinst32 (CEQIr32 R32C:$rA, i32ImmSExt10:$val), bb:$dest)>; + + def r32 : Pat<(brcond (i32 (cond R32C:$rA, R32C:$rB)), bb:$dest), + (brinst32 (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>; +} + +defm BRCONDeq : BranchCondEQ; +defm BRCONDne : BranchCondEQ; + +multiclass BranchCondLGT +{ + def r16imm : Pat<(brcond (i16 (cond R16C:$rA, i16ImmSExt10:$val)), bb:$dest), + (brinst16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>; + + def r16 : Pat<(brcond (i16 (cond R16C:$rA, R16C:$rB)), bb:$dest), + (brinst16 (CLGTHr16 R16C:$rA, R16:$rB), bb:$dest)>; + + def r32imm : Pat<(brcond (i32 (cond R32C:$rA, i32ImmSExt10:$val)), bb:$dest), + (brinst32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$val), bb:$dest)>; + + def r32 : Pat<(brcond (i32 (cond R32C:$rA, R32C:$rB)), bb:$dest), + (brinst32 (CLGTr32 R32C:$rA, R32C:$rB), bb:$dest)>; +} + +defm BRCONDugt : BranchCondLGT; +defm BRCONDule : BranchCondLGT; + +multiclass BranchCondLGTEQ +{ + def r16imm: Pat<(brcond (i16 (cond R16C:$rA, i16ImmSExt10:$val)), bb:$dest), + (brinst16 (orinst16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$val), + (CEQHIr16 R16C:$rA, i16ImmSExt10:$val)), + bb:$dest)>; + + def r16: Pat<(brcond (i16 (cond R16C:$rA, R16C:$rB)), bb:$dest), + (brinst16 (orinst16 (CLGTHr16 R16C:$rA, R16:$rB), + (CEQHr16 R16C:$rA, R16:$rB)), + bb:$dest)>; + + def r32imm : Pat<(brcond (i32 (cond R32C:$rA, i32ImmSExt10:$val)), bb:$dest), + (brinst32 (orinst32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$val), + (CEQIr32 R32C:$rA, i32ImmSExt10:$val)), + bb:$dest)>; + + def r32 : Pat<(brcond (i32 (cond R32C:$rA, R32C:$rB)), bb:$dest), + (brinst32 (orinst32 (CLGTr32 R32C:$rA, R32C:$rB), + (CEQr32 R32C:$rA, R32C:$rB)), + bb:$dest)>; +} + +defm BRCONDuge : BranchCondLGTEQ; +defm BRCONDult : BranchCondLGTEQ; + +multiclass BranchCondGT +{ + def r16imm : Pat<(brcond (i16 (cond R16C:$rA, i16ImmSExt10:$val)), bb:$dest), + (brinst16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>; + + def r16 : Pat<(brcond (i16 (cond R16C:$rA, R16C:$rB)), bb:$dest), + (brinst16 (CGTHr16 R16C:$rA, R16:$rB), bb:$dest)>; + + def r32imm : Pat<(brcond (i32 (cond R32C:$rA, i32ImmSExt10:$val)), bb:$dest), + (brinst32 (CGTIr32 R32C:$rA, i32ImmSExt10:$val), bb:$dest)>; + + def r32 : Pat<(brcond (i32 (cond R32C:$rA, R32C:$rB)), bb:$dest), + (brinst32 (CGTr32 R32C:$rA, R32C:$rB), bb:$dest)>; +} + +defm BRCONDgt : BranchCondGT; +defm BRCONDle : BranchCondGT; + +multiclass BranchCondGTEQ +{ + def r16imm: Pat<(brcond (i16 (cond R16C:$rA, i16ImmSExt10:$val)), bb:$dest), + (brinst16 (orinst16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$val), + (CEQHIr16 R16C:$rA, i16ImmSExt10:$val)), + bb:$dest)>; + + def r16: Pat<(brcond (i16 (cond R16C:$rA, R16C:$rB)), bb:$dest), + (brinst16 (orinst16 (CGTHr16 R16C:$rA, R16:$rB), + (CEQHr16 R16C:$rA, R16:$rB)), + bb:$dest)>; + + def r32imm : Pat<(brcond (i32 (cond R32C:$rA, i32ImmSExt10:$val)), bb:$dest), + (brinst32 (orinst32 (CGTIr32 R32C:$rA, i32ImmSExt10:$val), + (CEQIr32 R32C:$rA, i32ImmSExt10:$val)), + bb:$dest)>; + + def r32 : Pat<(brcond (i32 (cond R32C:$rA, R32C:$rB)), bb:$dest), + (brinst32 (orinst32 (CGTr32 R32C:$rA, R32C:$rB), + (CEQr32 R32C:$rA, R32C:$rB)), + bb:$dest)>; +} + +defm BRCONDge : BranchCondGTEQ; +defm BRCONDlt : BranchCondGTEQ; let isTerminator = 1, isBarrier = 1 in { let isReturn = 1 in { @@ -3165,7 +3883,7 @@ def : Pat<(fabs (v2f64 VECREG:$rA)), // in the odd pipeline) //===----------------------------------------------------------------------===// -def ENOP : I<(outs), (ins), "enop", ExecNOP> { +def ENOP : SPUInstr<(outs), (ins), "enop", ExecNOP> { let Pattern = []; let Inst{0-10} = 0b10000000010; @@ -3174,7 +3892,7 @@ def ENOP : I<(outs), (ins), "enop", ExecNOP> { let Inst{25-31} = 0; } -def LNOP : I<(outs), (ins), "lnop", LoadNOP> { +def LNOP : SPUInstr<(outs), (ins), "lnop", LoadNOP> { let Pattern = []; let Inst{0-10} = 0b10000000000; @@ -3276,7 +3994,7 @@ def : Pat<(SPUextract_i8_sext VECREG:$rSrc), // zext 8->16: Zero extend bytes to halfwords def : Pat<(i16 (zext R8C:$rSrc)), - (ANDHI1To2 R8C:$rSrc, 0xff)>; + (ANDHIi8i16 R8C:$rSrc, 0xff)>; // zext 8->32 from preferred slot in load/store def : Pat<(SPUextract_i8_zext VECREG:$rSrc), @@ -3285,33 +4003,32 @@ def : Pat<(SPUextract_i8_zext VECREG:$rSrc), // zext 8->32: Zero extend bytes to words def : Pat<(i32 (zext R8C:$rSrc)), - (ANDI1To4 R8C:$rSrc, 0xff)>; + (ANDIi8i32 R8C:$rSrc, 0xff)>; // anyext 8->16: Extend 8->16 bits, irrespective of sign def : Pat<(i16 (anyext R8C:$rSrc)), - (ORHI1To2 R8C:$rSrc, 0)>; + (ORHIi8i16 R8C:$rSrc, 0)>; // anyext 8->32: Extend 8->32 bits, irrespective of sign def : Pat<(i32 (anyext R8C:$rSrc)), - (ORI1To4 R8C:$rSrc, 0)>; + (ORIi8i32 R8C:$rSrc, 0)>; -// zext 16->32: Zero extend halfwords to words (note that we have to juggle the -// 0xffff constant since it will not fit into an immediate.) +// zext 16->32: Zero extend halfwords to words def : Pat<(i32 (zext R16C:$rSrc)), - (AND2To4 R16C:$rSrc, (ILAr32 0xffff))>; + (ANDi16i32 R16C:$rSrc, (ILAr32 0xffff))>; def : Pat<(i32 (zext (and R16C:$rSrc, 0xf))), - (ANDI2To4 R16C:$rSrc, 0xf)>; + (ANDIi16i32 R16C:$rSrc, 0xf)>; def : Pat<(i32 (zext (and R16C:$rSrc, 0xff))), - (ANDI2To4 R16C:$rSrc, 0xff)>; + (ANDIi16i32 R16C:$rSrc, 0xff)>; def : Pat<(i32 (zext (and R16C:$rSrc, 0xfff))), - (ANDI2To4 R16C:$rSrc, 0xfff)>; + (ANDIi16i32 R16C:$rSrc, 0xfff)>; // anyext 16->32: Extend 16->32 bits, irrespective of sign def : Pat<(i32 (anyext R16C:$rSrc)), - (ORI2To4 R16C:$rSrc, 0)>; + (ORIi16i32 R16C:$rSrc, 0)>; //===----------------------------------------------------------------------===// // Address generation: SPU, like PPC, has to split addresses into high and diff --git a/lib/Target/CellSPU/SPUNodes.td b/lib/Target/CellSPU/SPUNodes.td index a58a5526f47..c2db66783e6 100644 --- a/lib/Target/CellSPU/SPUNodes.td +++ b/lib/Target/CellSPU/SPUNodes.td @@ -32,7 +32,7 @@ def SPUcall : SDNode<"SPUISD::CALL", SDT_SPUCall, // Operand type constraints for vector shuffle/permute operations def SDT_SPUshuffle : SDTypeProfile<1, 3, [ - SDTCisVT<3, v16i8>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2> + SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2> ]>; // Unary, binary v16i8 operator type constraints: @@ -62,27 +62,12 @@ def SPUfsmbi_type: SDTypeProfile<1, 1, [ SDTCisVT<1, i32>]>; // SELB type constraints: -def SPUselb_type_v16i8: SDTypeProfile<1, 3, [ - SDTCisVT<0, v16i8>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisSameAs<0, 3> ]>; - -def SPUselb_type_v8i16: SDTypeProfile<1, 3, [ - SDTCisVT<0, v8i16>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisSameAs<0, 3> ]>; - -def SPUselb_type_v4i32: SDTypeProfile<1, 3, [ - SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisSameAs<0, 3> ]>; +def SPUselb_type: SDTypeProfile<1, 3, [ + SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisSameAs<0, 3> ]>; // SPU Vector shift pseudo-instruction type constraints -def SPUvecshift_type_v16i8: SDTypeProfile<1, 2, [ - SDTCisVT<0, v16i8>, SDTCisSameAs<0, 1>, SDTCisInt<2>]>; - -def SPUvecshift_type_v8i16: SDTypeProfile<1, 2, [ - SDTCisVT<0, v8i16>, SDTCisSameAs<0, 1>, SDTCisInt<2>]>; - -def SPUvecshift_type_v4i32: SDTypeProfile<1, 2, [ - SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisInt<2>]>; +def SPUvecshift_type: SDTypeProfile<1, 2, [ + SDTCisSameAs<0, 1>, SDTCisInt<2>]>; //===----------------------------------------------------------------------===// // Synthetic/pseudo-instructions @@ -116,41 +101,37 @@ def SPUmpyh_i32: SDNode<"SPUISD::MPYH", SDTIntBinOp, []>; // Used to compute intermediate products for 16-bit multiplies def SPUmpyhh_v8i16: SDNode<"SPUISD::MPYHH", SPUv8i16_binop, []>; +// Shift left quadword by bits and bytes +def SPUshlquad_l_bits: SDNode<"SPUISD::SHLQUAD_L_BITS", SPUvecshift_type, []>; +def SPUshlquad_l_bytes: SDNode<"SPUISD::SHLQUAD_L_BYTES", SPUvecshift_type, []>; + // Vector shifts (ISD::SHL,SRL,SRA are for _integers_ only): -def SPUvec_shl_v8i16: SDNode<"SPUISD::VEC_SHL", SPUvecshift_type_v8i16, []>; -def SPUvec_srl_v8i16: SDNode<"SPUISD::VEC_SRL", SPUvecshift_type_v8i16, []>; -def SPUvec_sra_v8i16: SDNode<"SPUISD::VEC_SRA", SPUvecshift_type_v8i16, []>; +def SPUvec_shl: SDNode<"SPUISD::VEC_SHL", SPUvecshift_type, []>; +def SPUvec_srl: SDNode<"SPUISD::VEC_SRL", SPUvecshift_type, []>; +def SPUvec_sra: SDNode<"SPUISD::VEC_SRA", SPUvecshift_type, []>; -def SPUvec_shl_v4i32: SDNode<"SPUISD::VEC_SHL", SPUvecshift_type_v4i32, []>; -def SPUvec_srl_v4i32: SDNode<"SPUISD::VEC_SRL", SPUvecshift_type_v4i32, []>; -def SPUvec_sra_v4i32: SDNode<"SPUISD::VEC_SRA", SPUvecshift_type_v4i32, []>; +def SPUvec_rotl: SDNode<"SPUISD::VEC_ROTL", SPUvecshift_type, []>; +def SPUvec_rotr: SDNode<"SPUISD::VEC_ROTR", SPUvecshift_type, []>; -def SPUvec_rotl_v8i16: SDNode<"SPUISD::VEC_ROTL", SPUvecshift_type_v8i16, []>; -def SPUvec_rotl_v4i32: SDNode<"SPUISD::VEC_ROTL", SPUvecshift_type_v4i32, []>; +def SPUrotquad_rz_bytes: SDNode<"SPUISD::ROTQUAD_RZ_BYTES", + SPUvecshift_type, []>; +def SPUrotquad_rz_bits: SDNode<"SPUISD::ROTQUAD_RZ_BITS", + SPUvecshift_type, []>; -def SPUvec_rotr_v8i16: SDNode<"SPUISD::VEC_ROTR", SPUvecshift_type_v8i16, []>; -def SPUvec_rotr_v4i32: SDNode<"SPUISD::VEC_ROTR", SPUvecshift_type_v4i32, []>; - -def SPUrotbytes_right_zfill: SDNode<"SPUISD::ROTBYTES_RIGHT_Z", - SPUvecshift_type_v16i8, []>; def SPUrotbytes_right_sfill: SDNode<"SPUISD::ROTBYTES_RIGHT_S", - SPUvecshift_type_v16i8, []>; + SPUvecshift_type, []>; + def SPUrotbytes_left: SDNode<"SPUISD::ROTBYTES_LEFT", - SPUvecshift_type_v16i8, []>; + SPUvecshift_type, []>; def SPUrotbytes_left_chained : SDNode<"SPUISD::ROTBYTES_LEFT_CHAINED", - SPUvecshift_type_v16i8, [SDNPHasChain]>; + SPUvecshift_type, [SDNPHasChain]>; // SPU form select mask for bytes, immediate def SPUfsmbi: SDNode<"SPUISD::FSMBI", SPUfsmbi_type, []>; // SPU select bits instruction -def SPUselb_v16i8: SDNode<"SPUISD::SELB", SPUselb_type_v16i8, []>; -def SPUselb_v8i16: SDNode<"SPUISD::SELB", SPUselb_type_v8i16, []>; -def SPUselb_v4i32: SDNode<"SPUISD::SELB", SPUselb_type_v4i32, []>; - -// SPU single precision floating point constant load -def SPUFPconstant: SDNode<"SPUISD::SFPConstant", SDTFPUnaryOp, []>; +def SPUselb: SDNode<"SPUISD::SELB", SPUselb_type, []>; // SPU floating point interpolate def SPUinterpolate : SDNode<"SPUISD::FPInterp", SDTFPBinOp, []>; @@ -158,8 +139,8 @@ def SPUinterpolate : SDNode<"SPUISD::FPInterp", SDTFPBinOp, []>; // SPU floating point reciprocal estimate (used for fdiv) def SPUreciprocalEst: SDNode<"SPUISD::FPRecipEst", SDTFPUnaryOp, []>; -def SDT_vec_promote : SDTypeProfile<1, 1, []>; -def SPUpromote_scalar: SDNode<"SPUISD::PROMOTE_SCALAR", SDT_vec_promote, []>; +def SDTpromote_scalar: SDTypeProfile<1, 1, []>; +def SPUpromote_scalar: SDNode<"SPUISD::PROMOTE_SCALAR", SDTpromote_scalar, []>; def SPU_vec_demote : SDTypeProfile<1, 1, []>; def SPUextract_elt0: SDNode<"SPUISD::EXTRACT_ELT0", SPU_vec_demote, []>; diff --git a/lib/Target/CellSPU/SPUOperands.td b/lib/Target/CellSPU/SPUOperands.td index 79d1a7a348a..94271428c85 100644 --- a/lib/Target/CellSPU/SPUOperands.td +++ b/lib/Target/CellSPU/SPUOperands.td @@ -63,7 +63,7 @@ def HI16_vec : SDNodeXFormgetValue()) << 25) >> 25); + int sextVal = int(N->getSignExtended()); return (sextVal >= -64 && sextVal <= 63); }]>; @@ -162,6 +162,13 @@ def hi16 : PatLeaf<(imm), [{ return false; }], HI16>; +def bitshift : PatLeaf<(imm), [{ + // bitshift predicate - returns true if 0 < imm <= 7 for SHLQBII + // (shift left quadword by bits immediate) + int64_t Val = N->getValue(); + return (Val > 0 && Val <= 7); +}]>; + //===----------------------------------------------------------------------===// // Floating point operands: //===----------------------------------------------------------------------===// @@ -447,6 +454,10 @@ def s10imm : Operand { let PrintMethod = "printS10ImmOperand"; } +def s10imm_i8: Operand { + let PrintMethod = "printS10ImmOperand"; +} + def s10imm_i32: Operand { let PrintMethod = "printS10ImmOperand"; } diff --git a/test/CodeGen/CellSPU/mul_ops.ll b/test/CodeGen/CellSPU/mul_ops.ll index e1509d27cba..a67c572a7c6 100644 --- a/test/CodeGen/CellSPU/mul_ops.ll +++ b/test/CodeGen/CellSPU/mul_ops.ll @@ -8,11 +8,10 @@ ; RUN: grep and %t1.s | count 2 ; RUN: grep selb %t1.s | count 6 ; RUN: grep fsmbi %t1.s | count 4 -; RUN: grep shli %t1.s | count 4 +; RUN: grep shli %t1.s | count 2 ; RUN: grep shlhi %t1.s | count 4 ; RUN: grep ila %t1.s | count 2 -; RUN: grep xsbh %t1.s | count 8 -; RUN: grep xshw %t1.s | count 4 +; RUN: grep xsbh %t1.s | count 4 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" target triple = "spu"