diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index bdcc82c42ea..b2095ef3551 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -424,8 +424,6 @@ public: const SDUse *Ops, unsigned NumOps); SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, const SDUse *Ops, unsigned NumOps); - SDValue getNode(unsigned Opcode, MVT VT, - const SDValue *Ops, unsigned NumOps); SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, const SDValue *Ops, unsigned NumOps); SDValue getNode(unsigned Opcode, DebugLoc DL, @@ -665,8 +663,6 @@ public: SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, SDValue Op1, SDValue Op2, SDValue Op3); - SDNode *getTargetNode(unsigned Opcode, MVT VT, - const SDValue *Ops, unsigned NumOps); SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, const SDValue *Ops, unsigned NumOps); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0b0d47b603e..7d515aec23d 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -892,7 +892,8 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, MVT VT, bool isT) { if (VT.isVector()) { SmallVector Ops; Ops.assign(VT.getVectorNumElements(), Result); - Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); + Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(), + VT, &Ops[0], Ops.size()); } return Result; } @@ -935,7 +936,9 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, MVT VT, bool isTarget){ if (VT.isVector()) { SmallVector Ops; Ops.assign(VT.getVectorNumElements(), Result); - Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); + // FIXME DebugLoc info might be appropriate here + Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(), + VT, &Ops[0], Ops.size()); } return Result; } @@ -3730,11 +3733,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT, return getNode(Opcode, DL, VT, &NewOps[0], NumOps); } -SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, - const SDValue *Ops, unsigned NumOps) { - return getNode(Opcode, DebugLoc::getUnknownLoc(), VT, Ops, NumOps); -} - SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT, const SDValue *Ops, unsigned NumOps) { switch (NumOps) { @@ -4450,10 +4448,6 @@ SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, return getNode(~Opcode, dl, VT, Op1, Op2, Op3).getNode(); } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, - const SDValue *Ops, unsigned NumOps) { - return getNode(~Opcode, VT, Ops, NumOps).getNode(); -} SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, const SDValue *Ops, unsigned NumOps) { return getNode(~Opcode, dl, VT, Ops, NumOps).getNode(); diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index fcb100ae66d..8d1aaab520b 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -532,6 +532,7 @@ static inline SDValue getAL(SelectionDAG *CurDAG) { SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.getNode(); + DebugLoc dl = N->getDebugLoc(); if (N->isMachineOpcode()) return NULL; // Already selected. @@ -556,7 +557,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDNode *ResNode; if (Subtarget->isThumb()) - ResNode = CurDAG->getTargetNode(ARM::tLDRcp, MVT::i32, MVT::Other, + ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other, CPIdx, CurDAG->getEntryNode()); else { SDValue Ops[] = { @@ -567,7 +568,8 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { CurDAG->getRegister(0, MVT::i32), CurDAG->getEntryNode() }; - ResNode=CurDAG->getTargetNode(ARM::LDRcp, MVT::i32, MVT::Other, Ops, 6); + ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, + Ops, 6); } ReplaceUses(Op, SDValue(ResNode, 0)); return NULL; @@ -632,20 +634,20 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { } break; case ARMISD::FMRRD: - return CurDAG->getTargetNode(ARM::FMRRD, MVT::i32, MVT::i32, + return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32, Op.getOperand(0), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32)); case ISD::UMUL_LOHI: { SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; - return CurDAG->getTargetNode(ARM::UMULL, MVT::i32, MVT::i32, Ops, 5); + return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5); } case ISD::SMUL_LOHI: { SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; - return CurDAG->getTargetNode(ARM::SMULL, MVT::i32, MVT::i32, Ops, 5); + return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5); } case ISD::LOAD: { LoadSDNode *LD = cast(Op); @@ -685,7 +687,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDValue Base = LD->getBasePtr(); SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; - return CurDAG->getTargetNode(Opcode, MVT::i32, MVT::i32, + return CurDAG->getTargetNode(Opcode, dl, MVT::i32, MVT::i32, MVT::Other, Ops, 6); } } @@ -855,7 +857,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { TLI.getPointerTy()); SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy()); SDValue Ops[] = { Tmp1, Tmp2, Chain }; - return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, + return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl, MVT::Other, Ops, 3); } break; diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index 08cc8d0d057..af8fd72dcba 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -662,6 +662,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { unsigned NewOpc; MVT OpVT = Op.getValueType(); SDValue Ops[8]; + DebugLoc dl = N->getDebugLoc(); if (N->isMachineOpcode()) { return NULL; // Already selected. @@ -680,7 +681,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { } else { NewOpc = SPU::Ar32; Ops[0] = CurDAG->getRegister(SPU::R1, Op.getValueType()); - Ops[1] = SDValue(CurDAG->getTargetNode(SPU::ILAr32, Op.getValueType(), + Ops[1] = SDValue(CurDAG->getTargetNode(SPU::ILAr32, dl, Op.getValueType(), TFI, Imm0), 0); n_ops = 2; } @@ -704,7 +705,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { /*NOTREACHED*/ break; case MVT::i32: - shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, MVT::v4i32, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x00010203, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), @@ -712,7 +713,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { break; case MVT::i16: - shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, MVT::v4i32, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x80800203, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), @@ -720,7 +721,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { break; case MVT::i8: - shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, MVT::v4i32, + shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, CurDAG->getConstant(0x80808080, MVT::i32), CurDAG->getConstant(0x80808003, MVT::i32), CurDAG->getConstant(0x80808080, MVT::i32), @@ -730,10 +731,10 @@ SPUDAGToDAGISel::Select(SDValue Op) { SDNode *shufMaskLoad = emitBuildVector(shufMask); SDNode *PromoteScalar = - SelectCode(CurDAG->getNode(SPUISD::PREFSLOT2VEC, Op0VecVT, Op0)); + SelectCode(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl, Op0VecVT, Op0)); SDValue zextShuffle = - CurDAG->getNode(SPUISD::SHUFB, OpVecVT, + CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT, SDValue(PromoteScalar, 0), SDValue(PromoteScalar, 0), SDValue(shufMaskLoad, 0)); @@ -742,27 +743,27 @@ SPUDAGToDAGISel::Select(SDValue Op) { // re-use it in the VEC2PREFSLOT selection without needing to explicitly // call SelectCode (it's already done for us.) SelectCode(CurDAG->getNode(ISD::BIT_CONVERT, OpVecVT, zextShuffle)); - return SelectCode(CurDAG->getNode(SPUISD::VEC2PREFSLOT, OpVT, + return SelectCode(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT, zextShuffle)); } else if (Opc == ISD::ADD && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { SDNode *CGLoad = - emitBuildVector(SPU::getCarryGenerateShufMask(*CurDAG)); + emitBuildVector(SPU::getCarryGenerateShufMask(*CurDAG, dl)); - return SelectCode(CurDAG->getNode(SPUISD::ADD64_MARKER, OpVT, + return SelectCode(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT, Op.getOperand(0), Op.getOperand(1), SDValue(CGLoad, 0))); } else if (Opc == ISD::SUB && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { SDNode *CGLoad = - emitBuildVector(SPU::getBorrowGenerateShufMask(*CurDAG)); + emitBuildVector(SPU::getBorrowGenerateShufMask(*CurDAG, dl)); - return SelectCode(CurDAG->getNode(SPUISD::SUB64_MARKER, OpVT, + return SelectCode(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT, Op.getOperand(0), Op.getOperand(1), SDValue(CGLoad, 0))); } else if (Opc == ISD::MUL && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { SDNode *CGLoad = - emitBuildVector(SPU::getCarryGenerateShufMask(*CurDAG)); + emitBuildVector(SPU::getCarryGenerateShufMask(*CurDAG, dl)); - return SelectCode(CurDAG->getNode(SPUISD::MUL64_MARKER, OpVT, + return SelectCode(CurDAG->getNode(SPUISD::MUL64_MARKER, dl, OpVT, Op.getOperand(0), Op.getOperand(1), SDValue(CGLoad, 0))); } else if (Opc == ISD::TRUNCATE) { @@ -780,7 +781,8 @@ SPUDAGToDAGISel::Select(SDValue Op) { if (shift_amt >= 32) { SDNode *hi32 = - CurDAG->getTargetNode(SPU::ORr32_r64, OpVT, Op0.getOperand(0)); + CurDAG->getTargetNode(SPU::ORr32_r64, dl, OpVT, + Op0.getOperand(0)); shift_amt -= 32; if (shift_amt > 0) { @@ -791,7 +793,8 @@ SPUDAGToDAGISel::Select(SDValue Op) { if (Op0.getOpcode() == ISD::SRL) Opc = SPU::ROTMr32; - hi32 = CurDAG->getTargetNode(Opc, OpVT, SDValue(hi32, 0), shift); + hi32 = CurDAG->getTargetNode(Opc, dl, OpVT, SDValue(hi32, 0), + shift); } return hi32; @@ -829,9 +832,9 @@ SPUDAGToDAGISel::Select(SDValue Op) { if (vtm->ldresult_imm) { SDValue Zero = CurDAG->getTargetConstant(0, VT); - Result = CurDAG->getTargetNode(Opc, VT, MVT::Other, Arg, Zero, Chain); + Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Zero, Chain); } else { - Result = CurDAG->getTargetNode(Opc, VT, MVT::Other, Arg, Arg, Chain); + Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Arg, Chain); } return Result; @@ -867,7 +870,7 @@ SPUDAGToDAGISel::Select(SDValue Op) { if (N->hasOneUse()) return CurDAG->SelectNodeTo(N, NewOpc, OpVT, Ops, n_ops); else - return CurDAG->getTargetNode(NewOpc, OpVT, Ops, n_ops); + return CurDAG->getTargetNode(NewOpc, dl, OpVT, Ops, n_ops); } else return SelectCode(Op); } @@ -892,13 +895,14 @@ SPUDAGToDAGISel::SelectSHLi64(SDValue &Op, MVT OpVT) { MVT ShiftAmtVT = ShiftAmt.getValueType(); SDNode *VecOp0, *SelMask, *ZeroFill, *Shift = 0; SDValue SelMaskVal; + DebugLoc dl = Op.getDebugLoc(); - VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, VecVT, Op0); + VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, dl, VecVT, Op0); SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, MVT::i16); - SelMask = CurDAG->getTargetNode(SPU::FSMBIv2i64, VecVT, SelMaskVal); - ZeroFill = CurDAG->getTargetNode(SPU::ILv2i64, VecVT, + SelMask = CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT, SelMaskVal); + ZeroFill = CurDAG->getTargetNode(SPU::ILv2i64, dl, VecVT, CurDAG->getTargetConstant(0, OpVT)); - VecOp0 = CurDAG->getTargetNode(SPU::SELBv2i64, VecVT, + VecOp0 = CurDAG->getTargetNode(SPU::SELBv2i64, dl, VecVT, SDValue(ZeroFill, 0), SDValue(VecOp0, 0), SDValue(SelMask, 0)); @@ -909,35 +913,35 @@ SPUDAGToDAGISel::SelectSHLi64(SDValue &Op, MVT OpVT) { if (bytes > 0) { Shift = - CurDAG->getTargetNode(SPU::SHLQBYIv2i64, VecVT, + CurDAG->getTargetNode(SPU::SHLQBYIv2i64, dl, VecVT, SDValue(VecOp0, 0), CurDAG->getTargetConstant(bytes, ShiftAmtVT)); } if (bits > 0) { Shift = - CurDAG->getTargetNode(SPU::SHLQBIIv2i64, VecVT, + CurDAG->getTargetNode(SPU::SHLQBIIv2i64, dl, VecVT, SDValue((Shift != 0 ? Shift : VecOp0), 0), CurDAG->getTargetConstant(bits, ShiftAmtVT)); } } else { SDNode *Bytes = - CurDAG->getTargetNode(SPU::ROTMIr32, ShiftAmtVT, + CurDAG->getTargetNode(SPU::ROTMIr32, dl, ShiftAmtVT, ShiftAmt, CurDAG->getTargetConstant(3, ShiftAmtVT)); SDNode *Bits = - CurDAG->getTargetNode(SPU::ANDIr32, ShiftAmtVT, + CurDAG->getTargetNode(SPU::ANDIr32, dl, ShiftAmtVT, ShiftAmt, CurDAG->getTargetConstant(7, ShiftAmtVT)); Shift = - CurDAG->getTargetNode(SPU::SHLQBYv2i64, VecVT, + CurDAG->getTargetNode(SPU::SHLQBYv2i64, dl, VecVT, SDValue(VecOp0, 0), SDValue(Bytes, 0)); Shift = - CurDAG->getTargetNode(SPU::SHLQBIv2i64, VecVT, + CurDAG->getTargetNode(SPU::SHLQBIv2i64, dl, VecVT, SDValue(Shift, 0), SDValue(Bits, 0)); } - return CurDAG->getTargetNode(SPU::ORi64_v2i64, OpVT, SDValue(Shift, 0)); + return CurDAG->getTargetNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0)); } /*! @@ -955,8 +959,9 @@ SPUDAGToDAGISel::SelectSRLi64(SDValue &Op, MVT OpVT) { SDValue ShiftAmt = Op.getOperand(1); MVT ShiftAmtVT = ShiftAmt.getValueType(); SDNode *VecOp0, *Shift = 0; + DebugLoc dl = Op.getDebugLoc(); - VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, VecVT, Op0); + VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, dl, VecVT, Op0); if (ConstantSDNode *CN = dyn_cast(ShiftAmt)) { unsigned bytes = unsigned(CN->getZExtValue()) >> 3; @@ -964,45 +969,45 @@ SPUDAGToDAGISel::SelectSRLi64(SDValue &Op, MVT OpVT) { if (bytes > 0) { Shift = - CurDAG->getTargetNode(SPU::ROTQMBYIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQMBYIv2i64, dl, VecVT, SDValue(VecOp0, 0), CurDAG->getTargetConstant(bytes, ShiftAmtVT)); } if (bits > 0) { Shift = - CurDAG->getTargetNode(SPU::ROTQMBIIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQMBIIv2i64, dl, VecVT, SDValue((Shift != 0 ? Shift : VecOp0), 0), CurDAG->getTargetConstant(bits, ShiftAmtVT)); } } else { SDNode *Bytes = - CurDAG->getTargetNode(SPU::ROTMIr32, ShiftAmtVT, + CurDAG->getTargetNode(SPU::ROTMIr32, dl, ShiftAmtVT, ShiftAmt, CurDAG->getTargetConstant(3, ShiftAmtVT)); SDNode *Bits = - CurDAG->getTargetNode(SPU::ANDIr32, ShiftAmtVT, + CurDAG->getTargetNode(SPU::ANDIr32, dl, ShiftAmtVT, ShiftAmt, CurDAG->getTargetConstant(7, ShiftAmtVT)); // Ensure that the shift amounts are negated! - Bytes = CurDAG->getTargetNode(SPU::SFIr32, ShiftAmtVT, + Bytes = CurDAG->getTargetNode(SPU::SFIr32, dl, ShiftAmtVT, SDValue(Bytes, 0), CurDAG->getTargetConstant(0, ShiftAmtVT)); - Bits = CurDAG->getTargetNode(SPU::SFIr32, ShiftAmtVT, + Bits = CurDAG->getTargetNode(SPU::SFIr32, dl, ShiftAmtVT, SDValue(Bits, 0), CurDAG->getTargetConstant(0, ShiftAmtVT)); Shift = - CurDAG->getTargetNode(SPU::ROTQMBYv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQMBYv2i64, dl, VecVT, SDValue(VecOp0, 0), SDValue(Bytes, 0)); Shift = - CurDAG->getTargetNode(SPU::ROTQMBIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQMBIv2i64, dl, VecVT, SDValue(Shift, 0), SDValue(Bits, 0)); } - return CurDAG->getTargetNode(SPU::ORi64_v2i64, OpVT, SDValue(Shift, 0)); + return CurDAG->getTargetNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0)); } /*! @@ -1019,24 +1024,25 @@ SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, MVT OpVT) { MVT VecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits())); SDValue ShiftAmt = Op.getOperand(1); MVT ShiftAmtVT = ShiftAmt.getValueType(); + DebugLoc dl = Op.getDebugLoc(); SDNode *VecOp0 = - CurDAG->getTargetNode(SPU::ORv2i64_i64, VecVT, Op.getOperand(0)); + CurDAG->getTargetNode(SPU::ORv2i64_i64, dl, VecVT, Op.getOperand(0)); SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT); SDNode *SignRot = - CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, MVT::v2i64, + CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64, SDValue(VecOp0, 0), SignRotAmt); SDNode *UpperHalfSign = - CurDAG->getTargetNode(SPU::ORi32_v4i32, MVT::i32, SDValue(SignRot, 0)); + CurDAG->getTargetNode(SPU::ORi32_v4i32, dl, MVT::i32, SDValue(SignRot, 0)); SDNode *UpperHalfSignMask = - CurDAG->getTargetNode(SPU::FSM64r32, VecVT, SDValue(UpperHalfSign, 0)); + CurDAG->getTargetNode(SPU::FSM64r32, dl, VecVT, SDValue(UpperHalfSign, 0)); SDNode *UpperLowerMask = - CurDAG->getTargetNode(SPU::FSMBIv2i64, VecVT, + CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT, CurDAG->getTargetConstant(0xff00ULL, MVT::i16)); SDNode *UpperLowerSelect = - CurDAG->getTargetNode(SPU::SELBv2i64, VecVT, + CurDAG->getTargetNode(SPU::SELBv2i64, dl, VecVT, SDValue(UpperHalfSignMask, 0), SDValue(VecOp0, 0), SDValue(UpperLowerMask, 0)); @@ -1050,7 +1056,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, MVT OpVT) { if (bytes > 0) { bytes = 31 - bytes; Shift = - CurDAG->getTargetNode(SPU::ROTQBYIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQBYIv2i64, dl, VecVT, SDValue(UpperLowerSelect, 0), CurDAG->getTargetConstant(bytes, ShiftAmtVT)); } @@ -1058,24 +1064,24 @@ SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, MVT OpVT) { if (bits > 0) { bits = 8 - bits; Shift = - CurDAG->getTargetNode(SPU::ROTQBIIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQBIIv2i64, dl, VecVT, SDValue((Shift != 0 ? Shift : UpperLowerSelect), 0), CurDAG->getTargetConstant(bits, ShiftAmtVT)); } } else { SDNode *NegShift = - CurDAG->getTargetNode(SPU::SFIr32, ShiftAmtVT, + CurDAG->getTargetNode(SPU::SFIr32, dl, ShiftAmtVT, ShiftAmt, CurDAG->getTargetConstant(0, ShiftAmtVT)); Shift = - CurDAG->getTargetNode(SPU::ROTQBYBIv2i64_r32, VecVT, + CurDAG->getTargetNode(SPU::ROTQBYBIv2i64_r32, dl, VecVT, SDValue(UpperLowerSelect, 0), SDValue(NegShift, 0)); Shift = - CurDAG->getTargetNode(SPU::ROTQBIv2i64, VecVT, + CurDAG->getTargetNode(SPU::ROTQBIv2i64, dl, VecVT, SDValue(Shift, 0), SDValue(NegShift, 0)); } - return CurDAG->getTargetNode(SPU::ORi64_v2i64, OpVT, SDValue(Shift, 0)); + return CurDAG->getTargetNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(Shift, 0)); } /*! @@ -1083,9 +1089,11 @@ SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, MVT OpVT) { */ SDNode *SPUDAGToDAGISel::SelectI64Constant(SDValue& Op, MVT OpVT) { ConstantSDNode *CN = cast(Op.getNode()); + // Currently there's no DL on the input, but won't hurt to pretend. + DebugLoc dl = Op.getDebugLoc(); MVT OpVecVT = MVT::getVectorVT(OpVT, 2); SDValue i64vec = - SPU::LowerSplat_v2i64(OpVecVT, *CurDAG, CN->getZExtValue()); + SPU::LowerSplat_v2i64(OpVecVT, *CurDAG, CN->getZExtValue(), dl); // Here's where it gets interesting, because we have to parse out the // subtree handed back in i64vec: @@ -1096,7 +1104,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(SDValue& Op, MVT OpVT) { SDValue Op0 = i64vec.getOperand(0); ReplaceUses(i64vec, Op0); - return CurDAG->getTargetNode(SPU::ORi64_v2i64, OpVT, + return CurDAG->getTargetNode(SPU::ORi64_v2i64, dl, OpVT, SDValue(emitBuildVector(Op0), 0)); } else if (i64vec.getOpcode() == SPUISD::SHUFB) { SDValue lhs = i64vec.getOperand(0); @@ -1131,11 +1139,12 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(SDValue& Op, MVT OpVT) { : emitBuildVector(shufmask)); SDNode *shufNode = - Select(CurDAG->getNode(SPUISD::SHUFB, OpVecVT, + Select(CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT, SDValue(lhsNode, 0), SDValue(rhsNode, 0), SDValue(shufMaskNode, 0))); - return CurDAG->getTargetNode(SPU::ORi64_v2i64, OpVT, SDValue(shufNode, 0)); + return CurDAG->getTargetNode(SPU::ORi64_v2i64, dl, OpVT, + SDValue(shufNode, 0)); } else { cerr << "SPUDAGToDAGISel::SelectI64Constant: Unhandled i64vec condition\n"; abort(); diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 58a66306bba..14bb8d00c81 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -1583,6 +1583,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); // If this is a vector of constants or undefs, get the bits. A bit in // UndefBits is set if the corresponding element of the vector is an // ISD::UNDEF value. For undefs, the corresponding VectorBits values are @@ -1610,8 +1611,9 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { && "LowerBUILD_VECTOR: Unexpected floating point vector element."); // NOTE: pretend the constant is an integer. LLVM won't load FP constants SDValue T = DAG.getConstant(Value32, MVT::i32); - return DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T)); + return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v4i32, T, T, T, T)); break; } case MVT::v2f64: { @@ -1620,8 +1622,8 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes."); // NOTE: pretend the constant is an integer. LLVM won't load FP constants SDValue T = DAG.getConstant(f64val, MVT::i64); - return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f64, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T)); + return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T)); break; } case MVT::v16i8: { @@ -1630,8 +1632,8 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { SDValue Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = DAG.getConstant(Value16, MVT::i16); - return DAG.getNode(ISD::BIT_CONVERT, VT, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v8i16, Ops, 8)); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, Ops, 8)); } case MVT::v8i16: { unsigned short Value16; @@ -1642,20 +1644,20 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { SDValue T = DAG.getConstant(Value16, VT.getVectorElementType()); SDValue Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = T; - return DAG.getNode(ISD::BUILD_VECTOR, VT, Ops, 8); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops, 8); } case MVT::v4i32: { unsigned int Value = SplatBits; SDValue T = DAG.getConstant(Value, VT.getVectorElementType()); - return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T, T, T); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T); } case MVT::v2i32: { unsigned int Value = SplatBits; SDValue T = DAG.getConstant(Value, VT.getVectorElementType()); - return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T); + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T); } case MVT::v2i64: { - return SPU::LowerSplat_v2i64(VT, DAG, SplatBits); + return SPU::LowerSplat_v2i64(VT, DAG, SplatBits, dl); } } @@ -1663,15 +1665,16 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { } SDValue -SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal) { +SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal, + DebugLoc dl) { uint32_t upper = uint32_t(SplatVal >> 32); uint32_t lower = uint32_t(SplatVal); if (upper == lower) { // Magic constant that can be matched by IL, ILA, et. al. SDValue Val = DAG.getTargetConstant(upper, MVT::i32); - return DAG.getNode(ISD::BIT_CONVERT, OpVT, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Val, Val, Val, Val)); } else { SDValue LO32; @@ -1691,16 +1694,16 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal) { // Create lower vector if not a special pattern if (!lower_special) { SDValue LO32C = DAG.getConstant(lower, MVT::i32); - LO32 = DAG.getNode(ISD::BIT_CONVERT, OpVT, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, LO32C, LO32C, LO32C, LO32C)); } // Create upper vector if not a special pattern if (!upper_special) { SDValue HI32C = DAG.getConstant(upper, MVT::i32); - HI32 = DAG.getNode(ISD::BIT_CONVERT, OpVT, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, HI32C, HI32C, HI32C, HI32C)); } @@ -1714,7 +1717,7 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal) { // Unhappy situation... both upper and lower are special, so punt with // a target constant: SDValue Zero = DAG.getConstant(0, MVT::i32); - HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero, + HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Zero, Zero, Zero, Zero); } @@ -1744,8 +1747,8 @@ SPU::LowerSplat_v2i64(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal) { ShufBytes.push_back(DAG.getConstant(val, MVT::i32)); } - return DAG.getNode(SPUISD::SHUFB, OpVT, HI32, LO32, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShufBytes[0], ShufBytes.size())); } } @@ -1883,6 +1886,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { SDValue Op0 = Op.getOperand(0); // Op0 = the scalar + DebugLoc dl = Op.getDebugLoc(); if (Op0.getNode()->getOpcode() == ISD::Constant) { // For a constant, build the appropriate constant vector, which will @@ -1909,7 +1913,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { for (size_t j = 0; j < n_copies; ++j) ConstVecValues.push_back(CValue); - return DAG.getNode(ISD::BUILD_VECTOR, Op.getValueType(), + return DAG.getNode(ISD::BUILD_VECTOR, dl, Op.getValueType(), &ConstVecValues[0], ConstVecValues.size()); } else { // Otherwise, copy the value from one register to another: @@ -1921,7 +1925,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { case MVT::i64: case MVT::f32: case MVT::f64: - return DAG.getNode(SPUISD::PREFSLOT2VEC, Op.getValueType(), Op0, Op0); + return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0); } } @@ -1932,6 +1936,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); SDValue N = Op.getOperand(0); SDValue Elt = Op.getOperand(1); + DebugLoc dl = Op.getDebugLoc(); SDValue retval; if (ConstantSDNode *C = dyn_cast(Elt)) { @@ -1950,7 +1955,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) { // i32 and i64: Element 0 is the preferred slot - return DAG.getNode(SPUISD::VEC2PREFSLOT, VT, N); + return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N); } // Need to generate shuffle mask and extract: @@ -2009,12 +2014,12 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { ShufMask[i] = DAG.getConstant(bits, MVT::i32); } - SDValue ShufMaskVec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + SDValue ShufMaskVec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShufMask[0], sizeof(ShufMask) / sizeof(ShufMask[0])); - retval = DAG.getNode(SPUISD::VEC2PREFSLOT, VT, - DAG.getNode(SPUISD::SHUFB, N.getValueType(), + retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, + DAG.getNode(SPUISD::SHUFB, dl, N.getValueType(), N, N, ShufMaskVec)); } else { // Variable index: Rotate the requested element into slot 0, then replicate @@ -2027,7 +2032,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { // Make life easier by making sure the index is zero-extended to i32 if (Elt.getValueType() != MVT::i32) - Elt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Elt); + Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt); // Scale the index to a bit/byte shift quantity APInt scaleFactor = @@ -2037,11 +2042,11 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { if (scaleShift > 0) { // Scale the shift factor: - Elt = DAG.getNode(ISD::SHL, MVT::i32, Elt, + Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt, DAG.getConstant(scaleShift, MVT::i32)); } - vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, VecVT, N, Elt); + vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt); // Replicate the bytes starting at byte 0 across the entire vector (for // consistency with the notion of a unified register set) @@ -2054,20 +2059,20 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { /*NOTREACHED*/ case MVT::i8: { SDValue factor = DAG.getConstant(0x00000000, MVT::i32); - replicate = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, factor, factor, + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, factor, factor); break; } case MVT::i16: { SDValue factor = DAG.getConstant(0x00010001, MVT::i32); - replicate = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, factor, factor, + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, factor, factor); break; } case MVT::i32: case MVT::f32: { SDValue factor = DAG.getConstant(0x00010203, MVT::i32); - replicate = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, factor, factor, + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, factor, factor, factor, factor); break; } @@ -2075,14 +2080,14 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { case MVT::f64: { SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32); SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32); - replicate = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, loFactor, hiFactor, - loFactor, hiFactor); + replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, + loFactor, hiFactor, loFactor, hiFactor); break; } } - retval = DAG.getNode(SPUISD::VEC2PREFSLOT, VT, - DAG.getNode(SPUISD::SHUFB, VecVT, + retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, + DAG.getNode(SPUISD::SHUFB, dl, VecVT, vecShift, vecShift, replicate)); } @@ -2093,6 +2098,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { SDValue VecOp = Op.getOperand(0); SDValue ValOp = Op.getOperand(1); SDValue IdxOp = Op.getOperand(2); + DebugLoc dl = Op.getDebugLoc(); MVT VT = Op.getValueType(); ConstantSDNode *CN = cast(IdxOp); @@ -2100,16 +2106,16 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Use $sp ($1) because it's always 16-byte aligned and it's available: - SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, PtrVT, + SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, DAG.getRegister(SPU::R1, PtrVT), DAG.getConstant(CN->getSExtValue(), PtrVT)); - SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, VT, Pointer); + SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, VT, Pointer); SDValue result = - DAG.getNode(SPUISD::SHUFB, VT, - DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, ValOp), + DAG.getNode(SPUISD::SHUFB, dl, VT, + DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp), VecOp, - DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, ShufMask)); + DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask)); return result; } @@ -2118,6 +2124,7 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc, const TargetLowering &TLI) { SDValue N0 = Op.getOperand(0); // Everything has at least one operand + DebugLoc dl = Op.getDebugLoc(); MVT ShiftVT = TLI.getShiftAmountTy(); assert(Op.getValueType() == MVT::i8); @@ -2130,10 +2137,10 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc, // 8-bit addition: Promote the arguments up to 16-bits and truncate // the result: SDValue N1 = Op.getOperand(1); - N0 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0); - N1 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N1); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, N0, N1)); + N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0); + N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, N0, N1)); } @@ -2141,81 +2148,81 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc, // 8-bit subtraction: Promote the arguments up to 16-bits and truncate // the result: SDValue N1 = Op.getOperand(1); - N0 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0); - N1 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N1); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, N0, N1)); + N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0); + N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, N0, N1)); } case ISD::ROTR: case ISD::ROTL: { SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant - ? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0) + ? DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0) : DAG.getConstant(cast(N0)->getZExtValue(), MVT::i16)); N1Opc = N1.getValueType().bitsLT(ShiftVT) ? ISD::ZERO_EXTEND : ISD::TRUNCATE; N1 = (N1.getOpcode() != ISD::Constant - ? DAG.getNode(N1Opc, ShiftVT, N1) + ? DAG.getNode(N1Opc, dl, ShiftVT, N1) : DAG.getConstant(cast(N1)->getZExtValue(), TLI.getShiftAmountTy())); SDValue ExpandArg = - DAG.getNode(ISD::OR, MVT::i16, N0, - DAG.getNode(ISD::SHL, MVT::i16, + DAG.getNode(ISD::OR, dl, MVT::i16, N0, + DAG.getNode(ISD::SHL, dl, MVT::i16, N0, DAG.getConstant(8, MVT::i32))); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, ExpandArg, N1)); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1)); } case ISD::SRL: case ISD::SHL: { SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant - ? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0) + ? DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0) : DAG.getConstant(cast(N0)->getZExtValue(), MVT::i32)); N1Opc = N1.getValueType().bitsLT(ShiftVT) ? ISD::ZERO_EXTEND : ISD::TRUNCATE; N1 = (N1.getOpcode() != ISD::Constant - ? DAG.getNode(N1Opc, ShiftVT, N1) + ? DAG.getNode(N1Opc, dl, ShiftVT, N1) : DAG.getConstant(cast(N1)->getZExtValue(), ShiftVT)); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, N0, N1)); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, N0, N1)); } case ISD::SRA: { SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant - ? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0) + ? DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0) : DAG.getConstant(cast(N0)->getSExtValue(), MVT::i16)); N1Opc = N1.getValueType().bitsLT(ShiftVT) ? ISD::SIGN_EXTEND : ISD::TRUNCATE; N1 = (N1.getOpcode() != ISD::Constant - ? DAG.getNode(N1Opc, ShiftVT, N1) + ? DAG.getNode(N1Opc, dl, ShiftVT, N1) : DAG.getConstant(cast(N1)->getZExtValue(), ShiftVT)); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, N0, N1)); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, N0, N1)); } case ISD::MUL: { SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant - ? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0) + ? DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0) : DAG.getConstant(cast(N0)->getZExtValue(), MVT::i16)); N1Opc = N1.getValueType().bitsLT(MVT::i16) ? ISD::SIGN_EXTEND : ISD::TRUNCATE; N1 = (N1.getOpcode() != ISD::Constant - ? DAG.getNode(N1Opc, MVT::i16, N1) + ? DAG.getNode(N1Opc, dl, MVT::i16, N1) : DAG.getConstant(cast(N1)->getSExtValue(), MVT::i16)); - return DAG.getNode(ISD::TRUNCATE, MVT::i8, - DAG.getNode(Opc, MVT::i16, N0, N1)); + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, + DAG.getNode(Opc, dl, MVT::i16, N0, N1)); break; } } @@ -2224,7 +2231,7 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc, } //! Generate the carry-generate shuffle mask. -SDValue SPU::getCarryGenerateShufMask(SelectionDAG &DAG) { +SDValue SPU::getCarryGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) { SmallVector ShufBytes; // Create the shuffle mask for "rotating" the borrow up one register slot @@ -2234,12 +2241,12 @@ SDValue SPU::getCarryGenerateShufMask(SelectionDAG &DAG) { ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32)); - return DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShufBytes[0], ShufBytes.size()); } //! Generate the borrow-generate shuffle mask -SDValue SPU::getBorrowGenerateShufMask(SelectionDAG &DAG) { +SDValue SPU::getBorrowGenerateShufMask(SelectionDAG &DAG, DebugLoc dl) { SmallVector ShufBytes; // Create the shuffle mask for "rotating" the borrow up one register slot @@ -2249,7 +2256,7 @@ SDValue SPU::getBorrowGenerateShufMask(SelectionDAG &DAG) { ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32)); - return DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, + return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShufBytes[0], ShufBytes.size()); } @@ -2259,6 +2266,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) { SDValue ConstVec; SDValue Arg; MVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); ConstVec = Op.getOperand(0); Arg = Op.getOperand(1); @@ -2292,8 +2300,9 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) { for (size_t i = 0; i < tcVecSize; ++i) tcVec[i] = tc; - return DAG.getNode(Op.getNode()->getOpcode(), VT, Arg, - DAG.getNode(ISD::BUILD_VECTOR, VT, tcVec, tcVecSize)); + return DAG.getNode(Op.getNode()->getOpcode(), dl, VT, Arg, + DAG.getNode(ISD::BUILD_VECTOR, dl, VT, + tcVec, tcVecSize)); } } diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index 24c2803fe65..9f1d9558c17 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -78,10 +78,11 @@ namespace llvm { SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUTargetMachine &TM); - SDValue LowerSplat_v2i64(MVT OpVT, SelectionDAG &DAG, uint64_t splat); + SDValue LowerSplat_v2i64(MVT OpVT, SelectionDAG &DAG, uint64_t splat, + DebugLoc dl); - SDValue getBorrowGenerateShufMask(SelectionDAG &DAG); - SDValue getCarryGenerateShufMask(SelectionDAG &DAG); + SDValue getBorrowGenerateShufMask(SelectionDAG &DAG, DebugLoc dl); + SDValue getCarryGenerateShufMask(SelectionDAG &DAG, DebugLoc dl); } class SPUTargetMachine; // forward dec'l. diff --git a/lib/Target/IA64/IA64ISelDAGToDAG.cpp b/lib/Target/IA64/IA64ISelDAGToDAG.cpp index 833a8257864..9800c506ca9 100644 --- a/lib/Target/IA64/IA64ISelDAGToDAG.cpp +++ b/lib/Target/IA64/IA64ISelDAGToDAG.cpp @@ -106,6 +106,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDValue Chain = N->getOperand(0); SDValue Tmp1 = N->getOperand(0); SDValue Tmp2 = N->getOperand(1); + DebugLoc dl = N->getDebugLoc(); bool isFP=false; @@ -140,26 +141,28 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { if(!isFP) { // first, load the inputs into FP regs. TmpF1 = - SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp1), 0); + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, Tmp1), 0); Chain = TmpF1.getValue(1); TmpF2 = - SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp2), 0); + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, Tmp2), 0); Chain = TmpF2.getValue(1); // next, convert the inputs to FP if(isSigned) { TmpF3 = - SDValue(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF1), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXF, dl, MVT::f64, TmpF1), 0); Chain = TmpF3.getValue(1); TmpF4 = - SDValue(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF2), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXF, dl, MVT::f64, TmpF2), 0); Chain = TmpF4.getValue(1); } else { // is unsigned TmpF3 = - SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF1), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, dl, MVT::f64, TmpF1), + 0); Chain = TmpF3.getValue(1); TmpF4 = - SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF2), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, dl, MVT::f64, TmpF2), + 0); Chain = TmpF4.getValue(1); } @@ -172,11 +175,11 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { // we start by computing an approximate reciprocal (good to 9 bits?) // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate) if(isFP) - TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS0, MVT::f64, MVT::i1, - TmpF3, TmpF4), 0); + TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS0, dl, MVT::f64, + MVT::i1, TmpF3, TmpF4), 0); else - TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS1, MVT::f64, MVT::i1, - TmpF3, TmpF4), 0); + TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS1, dl, MVT::f64, + MVT::i1, TmpF3, TmpF4), 0); TmpPR = TmpF5.getValue(1); Chain = TmpF5.getValue(2); @@ -184,7 +187,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDValue minusB; if(isModulus) { // for remainders, it'll be handy to have // copies of -input_b - minusB = SDValue(CurDAG->getTargetNode(IA64::SUB, MVT::i64, + minusB = SDValue(CurDAG->getTargetNode(IA64::SUB, dl, MVT::i64, CurDAG->getRegister(IA64::r0, MVT::i64), Tmp2), 0); Chain = minusB.getValue(1); } @@ -192,19 +195,19 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDValue TmpE0, TmpY1, TmpE1, TmpY2; SDValue OpsE0[] = { TmpF4, TmpF5, F1, TmpPR }; - TmpE0 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, + TmpE0 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, dl, MVT::f64, OpsE0, 4), 0); Chain = TmpE0.getValue(1); SDValue OpsY1[] = { TmpF5, TmpE0, TmpF5, TmpPR }; - TmpY1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpY1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsY1, 4), 0); Chain = TmpY1.getValue(1); SDValue OpsE1[] = { TmpE0, TmpE0, F0, TmpPR }; - TmpE1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpE1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsE1, 4), 0); Chain = TmpE1.getValue(1); SDValue OpsY2[] = { TmpY1, TmpE1, TmpY1, TmpPR }; - TmpY2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpY2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsY2, 4), 0); Chain = TmpY2.getValue(1); @@ -215,30 +218,30 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDValue TmpE2, TmpY3, TmpQ0, TmpR0; SDValue OpsE2[] = { TmpE1, TmpE1, F0, TmpPR }; - TmpE2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpE2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsE2, 4), 0); Chain = TmpE2.getValue(1); SDValue OpsY3[] = { TmpY2, TmpE2, TmpY2, TmpPR }; - TmpY3 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpY3 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsY3, 4), 0); Chain = TmpY3.getValue(1); SDValue OpsQ0[] = { Tmp1, TmpY3, F0, TmpPR }; TmpQ0 = - SDValue(CurDAG->getTargetNode(IA64::CFMADS1, MVT::f64, // double prec! - OpsQ0, 4), 0); + SDValue(CurDAG->getTargetNode(IA64::CFMADS1, dl, // double prec! + MVT::f64, OpsQ0, 4), 0); Chain = TmpQ0.getValue(1); SDValue OpsR0[] = { Tmp2, TmpQ0, Tmp1, TmpPR }; TmpR0 = - SDValue(CurDAG->getTargetNode(IA64::CFNMADS1, MVT::f64, // double prec! - OpsR0, 4), 0); + SDValue(CurDAG->getTargetNode(IA64::CFNMADS1, dl, // double prec! + MVT::f64, OpsR0, 4), 0); Chain = TmpR0.getValue(1); // we want Result to have the same target register as the frcpa, so // we two-address hack it. See the comment "for this to work..." on // page 48 of Intel application note #245415 SDValue Ops[] = { TmpF5, TmpY3, TmpR0, TmpQ0, TmpPR }; - Result = CurDAG->getTargetNode(IA64::TCFMADS0, MVT::f64, // d.p. s0 rndg! - Ops, 5); + Result = CurDAG->getTargetNode(IA64::TCFMADS0, dl, // d.p. s0 rndg! + MVT::f64, Ops, 5); Chain = SDValue(Result, 1); return Result; // XXX: early exit! } else { // this is *not* an FP divide, so there's a bit left to do: @@ -246,11 +249,11 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDValue TmpQ2, TmpR2, TmpQ3, TmpQ; SDValue OpsQ2[] = { TmpF3, TmpY2, F0, TmpPR }; - TmpQ2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + TmpQ2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, dl, MVT::f64, OpsQ2, 4), 0); Chain = TmpQ2.getValue(1); SDValue OpsR2[] = { TmpF4, TmpQ2, TmpF3, TmpPR }; - TmpR2 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, + TmpR2 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, dl, MVT::f64, OpsR2, 4), 0); Chain = TmpR2.getValue(1); @@ -258,7 +261,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { // should two-address hack it. See the comment "for this to work..." on page // 48 of Intel application note #245415 SDValue OpsQ3[] = { TmpF5, TmpR2, TmpY2, TmpQ2, TmpPR }; - TmpQ3 = SDValue(CurDAG->getTargetNode(IA64::TCFMAS1, MVT::f64, + TmpQ3 = SDValue(CurDAG->getTargetNode(IA64::TCFMAS1, dl, MVT::f64, OpsQ3, 5), 0); Chain = TmpQ3.getValue(1); @@ -267,26 +270,27 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { // arguments. Other fun bugs may also appear, e.g. 0/x = x, not 0. if(isSigned) - TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXTRUNCS1, + TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXTRUNCS1, dl, MVT::f64, TmpQ3), 0); else - TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXUTRUNCS1, + TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXUTRUNCS1, dl, MVT::f64, TmpQ3), 0); Chain = TmpQ.getValue(1); if(isModulus) { SDValue FPminusB = - SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, minusB), 0); + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, dl, MVT::f64, minusB), + 0); Chain = FPminusB.getValue(1); SDValue Remainder = - SDValue(CurDAG->getTargetNode(IA64::XMAL, MVT::f64, + SDValue(CurDAG->getTargetNode(IA64::XMAL, dl, MVT::f64, TmpQ, FPminusB, TmpF1), 0); Chain = Remainder.getValue(1); - Result = CurDAG->getTargetNode(IA64::GETFSIG, MVT::i64, Remainder); + Result = CurDAG->getTargetNode(IA64::GETFSIG, dl, MVT::i64, Remainder); Chain = SDValue(Result, 1); } else { // just an integer divide - Result = CurDAG->getTargetNode(IA64::GETFSIG, MVT::i64, TmpQ); + Result = CurDAG->getTargetNode(IA64::GETFSIG, dl, MVT::i64, TmpQ); Chain = SDValue(Result, 1); } diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp index 81f08eb90d8..0635c354953 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.cpp +++ b/lib/Target/PIC16/PIC16ISelLowering.cpp @@ -499,9 +499,9 @@ SDValue PIC16TargetLowering::getChain(SDValue &Op) { } void PIC16TargetLowering::GetExpandedParts(SDValue Op, SelectionDAG &DAG, - SDValue &Lo, SDValue &Hi) { SDNode *N = Op.getNode(); + DebugLoc dl = N->getDebugLoc(); MVT NewVT; std::vector Opers; NewVT = getTypeToTransformTo(N->getValueType(0)); @@ -509,12 +509,12 @@ void PIC16TargetLowering::GetExpandedParts(SDValue Op, SelectionDAG &DAG, // extract the lo component Opers.push_back(Op); Opers.push_back(DAG.getConstant(0,MVT::i8)); - Lo = DAG.getNode(ISD::EXTRACT_ELEMENT,NewVT,&Opers[0],Opers.size()); + Lo = DAG.getNode(ISD::EXTRACT_ELEMENT,dl,NewVT,&Opers[0],Opers.size()); // extract the hi component Opers.clear(); Opers.push_back(Op); Opers.push_back(DAG.getConstant(1,MVT::i8)); - Hi = DAG.getNode(ISD::EXTRACT_ELEMENT,NewVT,&Opers[0],Opers.size()); + Hi = DAG.getNode(ISD::EXTRACT_ELEMENT,dl,NewVT,&Opers[0],Opers.size()); } // Legalize FrameIndex into ExternalSymbol and offset. diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index fbe3fd6a35b..f7744955b81 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -418,6 +418,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); + DebugLoc dl = N->getDebugLoc(); APInt LKZ, LKO, RKZ, RKO; CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO); @@ -479,7 +480,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { SH &= 31; SDValue Ops[] = { Tmp3, Op1, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; - return CurDAG->getTargetNode(PPC::RLWIMI, MVT::i32, Ops, 5); + return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5); } } return 0; @@ -773,6 +774,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) { // target-specific node if it hasn't already been changed. SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.getNode(); + DebugLoc dl = Op.getDebugLoc(); if (N->isMachineOpcode()) return NULL; // Already selected. @@ -815,17 +817,17 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { // Simple value. if (isInt16(Imm)) { // Just the Lo bits. - Result = CurDAG->getTargetNode(PPC::LI8, MVT::i64, getI32Imm(Lo)); + Result = CurDAG->getTargetNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo)); } else if (Lo) { // Handle the Hi bits. unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8; - Result = CurDAG->getTargetNode(OpC, MVT::i64, getI32Imm(Hi)); + Result = CurDAG->getTargetNode(OpC, dl, MVT::i64, getI32Imm(Hi)); // And Lo bits. - Result = CurDAG->getTargetNode(PPC::ORI8, MVT::i64, + Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), getI32Imm(Lo)); } else { // Just the Hi bits. - Result = CurDAG->getTargetNode(PPC::LIS8, MVT::i64, getI32Imm(Hi)); + Result = CurDAG->getTargetNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi)); } // If no shift, we're done. @@ -833,18 +835,18 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { // Shift for next step if the upper 32-bits were not zero. if (Imm) { - Result = CurDAG->getTargetNode(PPC::RLDICR, MVT::i64, + Result = CurDAG->getTargetNode(PPC::RLDICR, dl, MVT::i64, SDValue(Result, 0), getI32Imm(Shift), getI32Imm(63 - Shift)); } // Add in the last bits as required. if ((Hi = (Remainder >> 16) & 0xFFFF)) { - Result = CurDAG->getTargetNode(PPC::ORIS8, MVT::i64, + Result = CurDAG->getTargetNode(PPC::ORIS8, dl, MVT::i64, SDValue(Result, 0), getI32Imm(Hi)); } if ((Lo = Remainder & 0xFFFF)) { - Result = CurDAG->getTargetNode(PPC::ORI8, MVT::i64, + Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), getI32Imm(Lo)); } @@ -865,7 +867,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { if (N->hasOneUse()) return CurDAG->SelectNodeTo(N, Opc, Op.getValueType(), TFI, getSmallIPtrImm(0)); - return CurDAG->getTargetNode(Opc, Op.getValueType(), TFI, + return CurDAG->getTargetNode(Opc, dl, Op.getValueType(), TFI, getSmallIPtrImm(0)); } @@ -873,10 +875,10 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDValue InFlag = N->getOperand(1); // Use MFOCRF if supported. if (PPCSubTarget.isGigaProcessor()) - return CurDAG->getTargetNode(PPC::MFOCRF, MVT::i32, + return CurDAG->getTargetNode(PPC::MFOCRF, dl, MVT::i32, N->getOperand(0), InFlag); else - return CurDAG->getTargetNode(PPC::MFCR, MVT::i32, InFlag); + return CurDAG->getTargetNode(PPC::MFCR, dl, MVT::i32, InFlag); } case ISD::SDIV: { @@ -890,16 +892,16 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDValue N0 = N->getOperand(0); if ((signed)Imm > 0 && isPowerOf2_32(Imm)) { SDNode *Op = - CurDAG->getTargetNode(PPC::SRAWI, MVT::i32, MVT::Flag, + CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag, N0, getI32Imm(Log2_32(Imm))); return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, SDValue(Op, 0), SDValue(Op, 1)); } else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) { SDNode *Op = - CurDAG->getTargetNode(PPC::SRAWI, MVT::i32, MVT::Flag, + CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag, N0, getI32Imm(Log2_32(-Imm))); SDValue PT = - SDValue(CurDAG->getTargetNode(PPC::ADDZE, MVT::i32, + SDValue(CurDAG->getTargetNode(PPC::ADDZE, dl, MVT::i32, SDValue(Op, 0), SDValue(Op, 1)), 0); return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT); @@ -954,7 +956,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDValue Base = LD->getBasePtr(); SDValue Ops[] = { Offset, Base, Chain }; // FIXME: PPC64 - return CurDAG->getTargetNode(Opcode, LD->getValueType(0), + return CurDAG->getTargetNode(Opcode, dl, LD->getValueType(0), PPCLowering.getPointerTy(), MVT::Other, Ops, 3); } else { @@ -998,7 +1000,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDValue Ops[] = { N->getOperand(0).getOperand(0), N->getOperand(0).getOperand(1), getI32Imm(0), getI32Imm(MB),getI32Imm(ME) }; - return CurDAG->getTargetNode(PPC::RLWIMI, MVT::i32, Ops, 5); + return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5); } } @@ -1048,7 +1050,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { // FIXME: Implement this optzn for PPC64. N->getValueType(0) == MVT::i32) { SDNode *Tmp = - CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, + CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag, N->getOperand(0), getI32Imm(~0U)); return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(Tmp, 0), N->getOperand(0), @@ -1099,7 +1101,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDValue Chain = N->getOperand(0); SDValue Target = N->getOperand(1); unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8; - Chain = SDValue(CurDAG->getTargetNode(Opc, MVT::Other, Target, + Chain = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Target, Chain), 0); return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain); } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index bd0402ad5b1..9099e757e7f 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3184,7 +3184,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, - SelectionDAG &DAG) { + SelectionDAG &DAG, DebugLoc dl) { assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); static const MVT VTys[] = { // canonical VT to use for each size. @@ -3203,28 +3203,28 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); SmallVector Ops; Ops.assign(CanonicalVT.getVectorNumElements(), Elt); - SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0], Ops.size()); - return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); + return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res); } /// BuildIntrinsicOp - Return a binary operator intrinsic node with the /// specified intrinsic ID. static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, - SelectionDAG &DAG, - MVT DestVT = MVT::Other) { + SelectionDAG &DAG, DebugLoc dl, + MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = LHS.getValueType(); - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, DAG.getConstant(IID, MVT::i32), LHS, RHS); } /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the /// specified intrinsic ID. static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, - SDValue Op2, SelectionDAG &DAG, - MVT DestVT = MVT::Other) { + SDValue Op2, SelectionDAG &DAG, + DebugLoc dl, MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op0.getValueType(); - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); } @@ -3232,17 +3232,17 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified /// amount. The result has the specified value type. static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, - MVT VT, SelectionDAG &DAG) { + MVT VT, SelectionDAG &DAG, DebugLoc dl) { // Force LHS/RHS to be the right type. - LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); - RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); + LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS); + RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS); SDValue Ops[16]; for (unsigned i = 0; i != 16; ++i) Ops[i] = DAG.getConstant(i+Amt, MVT::i8); - SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); - return DAG.getNode(ISD::BIT_CONVERT, VT, T); + SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16)); + return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); } // If this is a case we can't handle, return null and let the default @@ -3258,6 +3258,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // zero. uint64_t VectorBits[2]; uint64_t UndefBits[2]; + DebugLoc dl = Op.getDebugLoc(); if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits)) return SDValue(); // Not a constant vector. @@ -3276,8 +3277,8 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // Canonicalize all zero vectors to be v4i32. if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { SDValue Z = DAG.getConstant(0, MVT::i32); - Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); - Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); + Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); + Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z); } return Op; } @@ -3285,7 +3286,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); if (SextVal >= -16 && SextVal <= 15) - return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); + return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); // Two instruction sequences. @@ -3293,9 +3294,9 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // If this value is in the range [-32,30] and is even, use: // tmp = VSPLTI[bhw], result = add tmp, tmp if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { - SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG); - Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); + Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is @@ -3303,15 +3304,15 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // for fneg/fabs. if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { // Make -1 and vspltisw -1: - SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); + SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); // Make the VSLW intrinsic, computing 0x8000_0000. SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, - OnesV, DAG); + OnesV, DAG, dl); // xor by OnesV to invert it. - Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // Check to see if this is a wide variety of vsplti*, binop self cases. @@ -3332,63 +3333,63 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // vsplti + shl self. if (SextVal == (i << (int)TypeShiftAmt)) { - SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, Intrinsic::ppc_altivec_vslw }; - Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // vsplti + srl self. if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { - SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, Intrinsic::ppc_altivec_vsrw }; - Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // vsplti + sra self. if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { - SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, Intrinsic::ppc_altivec_vsraw }; - Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // vsplti + rol self. if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { - SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, Intrinsic::ppc_altivec_vrlw }; - Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); + Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); } // t = vsplti c, result = vsldoi t, t, 1 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { - SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); - return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); + return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); } // t = vsplti c, result = vsldoi t, t, 2 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { - SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); - return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); + return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); } // t = vsplti c, result = vsldoi t, t, 3 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { - SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); - return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); + return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); } } @@ -3396,17 +3397,17 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, // Odd, in range [17,31]: (vsplti C)-(vsplti -16). if (SextVal >= 0 && SextVal <= 31) { - SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); - SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); - LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); + SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); + SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); + LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); } // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). if (SextVal >= -31 && SextVal <= 0) { - SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); - SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); - LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS); - return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); + SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); + SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); + LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); + return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); } } @@ -3416,7 +3417,8 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit /// the specified operations to build the shuffle. static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, - SDValue RHS, SelectionDAG &DAG) { + SDValue RHS, SelectionDAG &DAG, + DebugLoc dl) { unsigned OpNum = (PFEntry >> 26) & 0x0F; unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); @@ -3441,8 +3443,8 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, } SDValue OpLHS, OpRHS; - OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); - OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); + OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); + OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); unsigned ShufIdxs[16]; switch (OpNum) { @@ -3476,18 +3478,19 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, ShufIdxs[i] = (i&3)+12; break; case OP_VSLDOI4: - return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); + return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); case OP_VSLDOI8: - return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); + return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); case OP_VSLDOI12: - return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); + return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); } SDValue Ops[16]; for (unsigned i = 0; i != 16; ++i) Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8); - return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); + return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(), + OpLHS, OpRHS, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16)); } /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this @@ -3496,6 +3499,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, /// lowered into a vperm. SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getDebugLoc(); SDValue V1 = Op.getOperand(0); SDValue V2 = Op.getOperand(1); SDValue PermMask = Op.getOperand(2); @@ -3584,7 +3588,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, // available, if this block is within a loop, we should avoid using vperm // for 3-operation perms and use a constant pool load instead. if (Cost < 3) - return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); + return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); } // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant @@ -3609,9 +3613,9 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, MVT::i8)); } - SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, + SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, &ResultMask[0], ResultMask.size()); - return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); + return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); } /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an @@ -3745,49 +3749,51 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, } SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getDebugLoc(); if (Op.getValueType() == MVT::v4i32) { SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); - SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. + SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); + SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. SDValue RHSSwap = // = vrlw RHS, 16 - BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); + BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); // Shrinkify inputs to v8i16. - LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); - RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); - RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); + LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS); + RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS); + RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap); // Low parts multiplied together, generating 32-bit results (we ignore the // top parts). SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, - LHS, RHS, DAG, MVT::v4i32); + LHS, RHS, DAG, dl, MVT::v4i32); SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, - LHS, RHSSwap, Zero, DAG, MVT::v4i32); + LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); // Shift the high parts up 16 bits. - HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); - return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); + HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, + Neg16, DAG, dl); + return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); } else if (Op.getValueType() == MVT::v8i16) { SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); + SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, - LHS, RHS, Zero, DAG); + LHS, RHS, Zero, DAG, dl); } else if (Op.getValueType() == MVT::v16i8) { SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); // Multiply the even 8-bit parts, producing 16-bit sums. SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, - LHS, RHS, DAG, MVT::v8i16); - EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); + LHS, RHS, DAG, dl, MVT::v8i16); + EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts); // Multiply the odd 8-bit parts, producing 16-bit sums. SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, - LHS, RHS, DAG, MVT::v8i16); - OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); + LHS, RHS, DAG, dl, MVT::v8i16); + OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts); // Merge the results together. SDValue Ops[16]; @@ -3795,8 +3801,8 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); } - return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, - DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); + return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts, + DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16)); } else { assert(0 && "Unknown mul to lower!"); abort(); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index d61e3436e94..8c5aac397a6 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -441,7 +441,8 @@ static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load, else Ops.push_back(Chain.getOperand(i)); SDValue NewChain = - CurDAG->getNode(ISD::TokenFactor, MVT::Other, &Ops[0], Ops.size()); + CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), + MVT::Other, &Ops[0], Ops.size()); Ops.clear(); Ops.push_back(NewChain); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b22d8a21a2f..e465cf3e846 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3973,7 +3973,7 @@ LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, Mask1[2] = PermMask.getOperand(HiIndex^1); Mask1[3] = DAG.getNode(ISD::UNDEF, dl, MaskEVT); V2 = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2, - DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &Mask1[0], 4)); + DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, &Mask1[0], 4)); if (HiIndex >= 2) { Mask1[0] = PermMask.getOperand(0); diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 21a11e3808b..8e3985f0998 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2373,6 +2373,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, std::vector AddrOps; std::vector BeforeOps; std::vector AfterOps; + DebugLoc dl = N->getDebugLoc(); unsigned NumOps = N->getNumOperands(); for (unsigned i = 0; i != NumOps-1; ++i) { SDValue Op = N->getOperand(i); @@ -2393,7 +2394,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, MVT VT = *RC->vt_begin(); bool isAligned = (RI.getStackAlignment() >= 16) || RI.needsStackRealignment(MF); - Load = DAG.getTargetNode(getLoadRegOpcode(RC, isAligned), + Load = DAG.getTargetNode(getLoadRegOpcode(RC, isAligned), dl, VT, MVT::Other, &AddrOps[0], AddrOps.size()); NewNodes.push_back(Load); @@ -2416,7 +2417,8 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, if (Load) BeforeOps.push_back(SDValue(Load, 0)); std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); - SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size()); + SDNode *NewNode= DAG.getTargetNode(Opc, dl, VTs, &BeforeOps[0], + BeforeOps.size()); NewNodes.push_back(NewNode); // Emit the store instruction. @@ -2426,7 +2428,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, AddrOps.push_back(Chain); bool isAligned = (RI.getStackAlignment() >= 16) || RI.needsStackRealignment(MF); - SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, isAligned), + SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, isAligned), dl, MVT::Other, &AddrOps[0], AddrOps.size()); NewNodes.push_back(Store); } diff --git a/utils/TableGen/DAGISelEmitter.cpp b/utils/TableGen/DAGISelEmitter.cpp index 14b261a1d48..43513525579 100644 --- a/utils/TableGen/DAGISelEmitter.cpp +++ b/utils/TableGen/DAGISelEmitter.cpp @@ -978,7 +978,8 @@ public: emitCode("}"); } emitCode("InChains.push_back(" + ChainName + ");"); - emitCode(ChainName + " = CurDAG->getNode(ISD::TokenFactor, MVT::Other, " + emitCode(ChainName + " = CurDAG->getNode(ISD::TokenFactor, " + "N.getDebugLoc(), MVT::Other, " "&InChains[0], InChains.size());"); if (GenDebug) { emitCode("CurDAG->setSubgraphColor(" + ChainName +".getNode(), \"yellow\");");