diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index 935a8274f73..799d22bc38b 100644 --- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -22,7 +22,7 @@ using namespace llvm; // Return true if Expr is in the range [MinValue, MaxValue]. static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { - if (const MCConstantExpr *CE = dyn_cast(Expr)) { + if (auto *CE = dyn_cast(Expr)) { int64_t Value = CE->getValue(); return Value >= MinValue && Value <= MaxValue; } @@ -112,7 +112,7 @@ private: // Add as immediates when possible. Null MCExpr = 0. if (Expr == 0) Inst.addOperand(MCOperand::CreateImm(0)); - else if (const MCConstantExpr *CE = dyn_cast(Expr)) + else if (auto *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::CreateImm(CE->getValue())); else Inst.addOperand(MCOperand::CreateExpr(Expr)); @@ -781,7 +781,7 @@ parsePCRel(SmallVectorImpl &Operands, // For consistency with the GNU assembler, treat immediates as offsets // from ".". - if (const MCConstantExpr *CE = dyn_cast(Expr)) { + if (auto *CE = dyn_cast(Expr)) { int64_t Value = CE->getValue(); if ((Value & 1) || Value < MinVal || Value > MaxVal) { Error(StartLoc, "offset out of range"); diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp index aced19ea0cc..8b18bc16e1c 100644 --- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -179,8 +179,7 @@ getModifierVariantKind(SystemZCP::SystemZCPModifier Modifier) { void SystemZAsmPrinter:: EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) { - SystemZConstantPoolValue *ZCPV = - static_cast(MCPV); + auto *ZCPV = static_cast(MCPV); const MCExpr *Expr = MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()), @@ -221,7 +220,7 @@ bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) { if (Subtarget->isTargetELF()) { - const TargetLoweringObjectFileELF &TLOFELF = + auto &TLOFELF = static_cast(getObjFileLowering()); MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo(); diff --git a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp index 6c7081169a9..19cec219e2d 100644 --- a/lib/Target/SystemZ/SystemZConstantPoolValue.cpp +++ b/lib/Target/SystemZ/SystemZConstantPoolValue.cpp @@ -43,7 +43,7 @@ getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment) { for (unsigned I = 0, E = Constants.size(); I != E; ++I) { if (Constants[I].isMachineConstantPoolEntry() && (Constants[I].getAlignment() & AlignMask) == 0) { - SystemZConstantPoolValue *ZCPV = + auto *ZCPV = static_cast(Constants[I].Val.MachineCPVal); if (ZCPV->GV == GV && ZCPV->Modifier == Modifier) return I; diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index e4b9b766c4c..f084560d8c5 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -312,7 +312,7 @@ static void emitIncrement(MachineBasicBlock &MBB, void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { MachineBasicBlock &MBB = MF.front(); MachineFrameInfo *MFFrame = MF.getFrameInfo(); - const SystemZInstrInfo *ZII = + auto *ZII = static_cast(MF.getTarget().getInstrInfo()); SystemZMachineFunctionInfo *ZFI = MF.getInfo(); MachineBasicBlock::iterator MBBI = MBB.begin(); @@ -412,7 +412,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const { void SystemZFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); - const SystemZInstrInfo *ZII = + auto *ZII = static_cast(MF.getTarget().getInstrInfo()); SystemZMachineFunctionInfo *ZFI = MF.getInfo(); diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index dc92ff69c2e..d8b5c364c4b 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -650,8 +650,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, return false; // We need a constant mask. - ConstantSDNode *MaskNode = - dyn_cast(Op.getOperand(1).getNode()); + auto *MaskNode = dyn_cast(Op.getOperand(1).getNode()); if (!MaskNode) return false; @@ -703,8 +702,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { if (RxSBG.Opcode == SystemZ::RNSBG) return false; - ConstantSDNode *MaskNode = - dyn_cast(N.getOperand(1).getNode()); + auto *MaskNode = dyn_cast(N.getOperand(1).getNode()); if (!MaskNode) return false; @@ -728,8 +726,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { if (RxSBG.Opcode != SystemZ::RNSBG) return false; - ConstantSDNode *MaskNode = - dyn_cast(N.getOperand(1).getNode()); + auto *MaskNode = dyn_cast(N.getOperand(1).getNode()); if (!MaskNode) return false; @@ -753,8 +750,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { // Any 64-bit rotate left can be merged into the RxSBG. if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64) return false; - ConstantSDNode *CountNode - = dyn_cast(N.getOperand(1).getNode()); + auto *CountNode = dyn_cast(N.getOperand(1).getNode()); if (!CountNode) return false; @@ -792,8 +788,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { } case ISD::SHL: { - ConstantSDNode *CountNode = - dyn_cast(N.getOperand(1).getNode()); + auto *CountNode = dyn_cast(N.getOperand(1).getNode()); if (!CountNode) return false; @@ -820,8 +815,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { case ISD::SRL: case ISD::SRA: { - ConstantSDNode *CountNode = - dyn_cast(N.getOperand(1).getNode()); + auto *CountNode = dyn_cast(N.getOperand(1).getNode()); if (!CountNode) return false; @@ -890,7 +884,7 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { SystemZ::isImmLF(~RISBG.Mask) || SystemZ::isImmHF(~RISBG.Mask)) { // Force the new mask into the DAG, since it may include known-one bits. - ConstantSDNode *MaskN = cast(N->getOperand(1).getNode()); + auto *MaskN = cast(N->getOperand(1).getNode()); if (MaskN->getZExtValue() != RISBG.Mask) { SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT); N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask); @@ -942,7 +936,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { // Prefer IC for character insertions from memory. if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) - if (LoadSDNode *Load = dyn_cast(Op0.getNode())) + if (auto *Load = dyn_cast(Op0.getNode())) if (Load->getMemoryVT() == MVT::i8) return 0; @@ -1010,8 +1004,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store, } bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { - StoreSDNode *Store = cast(N); - LoadSDNode *Load = cast(Store->getValue()); + auto *Store = cast(N); + auto *Load = cast(Store->getValue()); // Prefer not to use MVC if either address can use ... RELATIVE LONG // instructions. @@ -1030,9 +1024,9 @@ bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const { - StoreSDNode *StoreA = cast(N); - LoadSDNode *LoadA = cast(StoreA->getValue().getOperand(1 - I)); - LoadSDNode *LoadB = cast(StoreA->getValue().getOperand(I)); + auto *StoreA = cast(N); + auto *LoadA = cast(StoreA->getValue().getOperand(1 - I)); + auto *LoadB = cast(StoreA->getValue().getOperand(I)); return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB); } @@ -1063,7 +1057,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { // If this is a 64-bit operation in which both 32-bit halves are nonzero, // split the operation into two. if (!ResNode && Node->getValueType(0) == MVT::i64) - if (ConstantSDNode *Op1 = dyn_cast(Node->getOperand(1))) { + if (auto *Op1 = dyn_cast(Node->getOperand(1))) { uint64_t Val = Op1->getZExtValue(); if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0), diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 9cce34fa386..76a32cdc1a9 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -444,31 +444,31 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info, break; case 'I': // Unsigned 8-bit constant - if (ConstantInt *C = dyn_cast(CallOperandVal)) + if (auto *C = dyn_cast(CallOperandVal)) if (isUInt<8>(C->getZExtValue())) weight = CW_Constant; break; case 'J': // Unsigned 12-bit constant - if (ConstantInt *C = dyn_cast(CallOperandVal)) + if (auto *C = dyn_cast(CallOperandVal)) if (isUInt<12>(C->getZExtValue())) weight = CW_Constant; break; case 'K': // Signed 16-bit constant - if (ConstantInt *C = dyn_cast(CallOperandVal)) + if (auto *C = dyn_cast(CallOperandVal)) if (isInt<16>(C->getSExtValue())) weight = CW_Constant; break; case 'L': // Signed 20-bit displacement (on all targets we support) - if (ConstantInt *C = dyn_cast(CallOperandVal)) + if (auto *C = dyn_cast(CallOperandVal)) if (isInt<20>(C->getSExtValue())) weight = CW_Constant; break; case 'M': // 0x7fffffff - if (ConstantInt *C = dyn_cast(CallOperandVal)) + if (auto *C = dyn_cast(CallOperandVal)) if (C->getZExtValue() == 0x7fffffff) weight = CW_Constant; break; @@ -561,35 +561,35 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, if (Constraint.length() == 1) { switch (Constraint[0]) { case 'I': // Unsigned 8-bit constant - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (isUInt<8>(C->getZExtValue())) Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), Op.getValueType())); return; case 'J': // Unsigned 12-bit constant - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (isUInt<12>(C->getZExtValue())) Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), Op.getValueType())); return; case 'K': // Signed 16-bit constant - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (isInt<16>(C->getSExtValue())) Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), Op.getValueType())); return; case 'L': // Signed 20-bit displacement (on all targets we support) - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (isInt<20>(C->getSExtValue())) Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), Op.getValueType())); return; case 'M': // 0x7fffffff - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (C->getZExtValue() == 0x7fffffff) Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), Op.getValueType())); @@ -670,8 +670,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, MachineRegisterInfo &MRI = MF.getRegInfo(); SystemZMachineFunctionInfo *FuncInfo = MF.getInfo(); - const SystemZFrameLowering *TFL = - static_cast(TM.getFrameLowering()); + auto *TFL = static_cast(TM.getFrameLowering()); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; @@ -880,10 +879,10 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, // associated Target* opcodes. Force %r1 to be used for indirect // tail calls. SDValue Glue; - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (auto *G = dyn_cast(Callee)) { Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); - } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { + } else if (auto *E = dyn_cast(Callee)) { Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); } else if (IsTailCall) { @@ -1104,7 +1103,7 @@ static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { if (C.ICmpType == SystemZICMP::UnsignedOnly) return; - ConstantSDNode *ConstOp1 = dyn_cast(C.Op1.getNode()); + auto *ConstOp1 = dyn_cast(C.Op1.getNode()); if (!ConstOp1) return; @@ -1129,14 +1128,14 @@ static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { return; // We must have an 8- or 16-bit load. - LoadSDNode *Load = cast(C.Op0); + auto *Load = cast(C.Op0); unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); if (NumBits != 8 && NumBits != 16) return; // The load must be an extending one and the constant must be within the // range of the unextended value. - ConstantSDNode *ConstOp1 = cast(C.Op1); + auto *ConstOp1 = cast(C.Op1); uint64_t Value = ConstOp1->getZExtValue(); uint64_t Mask = (1 << NumBits) - 1; if (Load->getExtensionType() == ISD::SEXTLOAD) { @@ -1191,7 +1190,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { // Return true if Op is either an unextended load, or a load suitable // for integer register-memory comparisons of type ICmpType. static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { - LoadSDNode *Load = dyn_cast(Op.getNode()); + auto *Load = dyn_cast(Op.getNode()); if (Load) { // There are no instructions to compare a register with a memory byte. if (Load->getMemoryVT() == MVT::i8) @@ -1225,7 +1224,7 @@ static bool shouldSwapCmpOperands(const Comparison &C) { // Never swap comparisons with zero since there are many ways to optimize // those later. - ConstantSDNode *ConstOp1 = dyn_cast(C.Op1); + auto *ConstOp1 = dyn_cast(C.Op1); if (ConstOp1 && ConstOp1->getZExtValue() == 0) return false; @@ -1302,7 +1301,7 @@ static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { // negation to set CC, so avoiding separate LOAD AND TEST and // LOAD (NEGATIVE/COMPLEMENT) instructions. static void adjustForFNeg(Comparison &C) { - ConstantFPSDNode *C1 = dyn_cast(C.Op1); + auto *C1 = dyn_cast(C.Op1); if (C1 && C1->isZero()) { for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { SDNode *N = *I; @@ -1327,7 +1326,7 @@ static void adjustForLTGFR(Comparison &C) { C.Op0.getValueType() == MVT::i64 && C.Op1.getOpcode() == ISD::Constant && cast(C.Op1)->getZExtValue() == 0) { - ConstantSDNode *C1 = dyn_cast(C.Op0.getOperand(1)); + auto *C1 = dyn_cast(C.Op0.getOperand(1)); if (C1 && C1->getZExtValue() == 32) { SDValue ShlOp0 = C.Op0.getOperand(0); // See whether X has any SIGN_EXTEND_INREG uses. @@ -1351,7 +1350,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { C.Op0.getOperand(0).getOpcode() == ISD::LOAD && C.Op1.getOpcode() == ISD::Constant && cast(C.Op1)->getZExtValue() == 0) { - LoadSDNode *L = cast(C.Op0.getOperand(0)); + auto *L = cast(C.Op0.getOperand(0)); if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueType().getSizeInBits()) { unsigned Type = L->getExtensionType(); @@ -1367,7 +1366,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { // Return true if shift operation N has an in-range constant shift value. // Store it in ShiftVal if so. static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { - ConstantSDNode *Shift = dyn_cast(N.getOperand(1)); + auto *Shift = dyn_cast(N.getOperand(1)); if (!Shift) return false; @@ -1479,7 +1478,7 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, // Update the arguments with the TM version if so. static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { // Check that we have a comparison with a constant. - ConstantSDNode *ConstOp1 = dyn_cast(C.Op1); + auto *ConstOp1 = dyn_cast(C.Op1); if (!ConstOp1) return; uint64_t CmpVal = ConstOp1->getZExtValue(); @@ -1748,8 +1747,8 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, // Special case for handling -1/0 results. The shifts we use here // should get optimized with the IPM conversion sequence. - ConstantSDNode *TrueC = dyn_cast(TrueOp); - ConstantSDNode *FalseC = dyn_cast(FalseOp); + auto *TrueC = dyn_cast(TrueOp); + auto *FalseC = dyn_cast(FalseOp); if (TrueC && FalseC) { int64_t TrueVal = TrueC->getSExtValue(); int64_t FalseVal = FalseC->getSExtValue(); @@ -2183,10 +2182,10 @@ SDValue SystemZTargetLowering::lowerSIGN_EXTEND(SDValue Op, SDValue N0 = Op.getOperand(0); EVT VT = Op.getValueType(); if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { - ConstantSDNode *SraAmt = dyn_cast(N0.getOperand(1)); + auto *SraAmt = dyn_cast(N0.getOperand(1)); SDValue Inner = N0.getOperand(0); if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { - ConstantSDNode *ShlAmt = dyn_cast(Inner.getOperand(1)); + auto *ShlAmt = dyn_cast(Inner.getOperand(1)); if (ShlAmt) { unsigned Extra = (VT.getSizeInBits() - N0.getValueType().getSizeInBits()); @@ -2208,7 +2207,7 @@ SDValue SystemZTargetLowering::lowerSIGN_EXTEND(SDValue Op, // Op is an atomic load. Lower it into a normal volatile load. SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { - AtomicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), Node->getChain(), Node->getBasePtr(), Node->getMemoryVT(), Node->getMemOperand()); @@ -2218,7 +2217,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, // by a serialization. SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { - AtomicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), Node->getBasePtr(), Node->getMemoryVT(), Node->getMemOperand()); @@ -2231,7 +2230,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, unsigned Opcode) const { - AtomicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); // 32-bit operations need no code outside the main loop. EVT NarrowVT = Node->getMemoryVT(); @@ -2249,7 +2248,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, // Convert atomic subtracts of constants into additions. if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) - if (ConstantSDNode *Const = dyn_cast(Src2)) { + if (auto *Const = dyn_cast(Src2)) { Opcode = SystemZISD::ATOMIC_LOADW_ADD; Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); } @@ -2305,7 +2304,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, // operations into additions. SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const { - AtomicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); EVT MemVT = Node->getMemoryVT(); if (MemVT == MVT::i32 || MemVT == MVT::i64) { // A full-width operation. @@ -2314,7 +2313,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, SDValue NegSrc2; SDLoc DL(Src2); - if (ConstantSDNode *Op2 = dyn_cast(Src2)) { + if (auto *Op2 = dyn_cast(Src2)) { // Use an addition if the operand is constant and either LAA(G) is // available or the negative value is in the range of A(G)FHI. int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); @@ -2342,7 +2341,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, // into a fullword ATOMIC_CMP_SWAPW operation. SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { - AtomicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); // We have native support for 32-bit compare and swap. EVT NarrowVT = Node->getMemoryVT(); @@ -2409,7 +2408,7 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, bool IsWrite = cast(Op.getOperand(2))->getZExtValue(); unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; - MemIntrinsicSDNode *Node = cast(Op.getNode()); + auto *Node = cast(Op.getNode()); SDValue Ops[] = { Op.getOperand(0), DAG.getConstant(Code, MVT::i32), diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td index bffe1a56187..a3919618f8f 100644 --- a/lib/Target/SystemZ/SystemZOperators.td +++ b/lib/Target/SystemZ/SystemZOperators.td @@ -251,7 +251,7 @@ def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{ // Aligned loads. class AlignedLoad : PatFrag<(ops node:$addr), (load node:$addr), [{ - LoadSDNode *Load = cast(N); + auto *Load = cast(N); return Load->getAlignment() >= Load->getMemoryVT().getStoreSize(); }]>; def aligned_load : AlignedLoad; @@ -263,7 +263,7 @@ def aligned_azextloadi32 : AlignedLoad; // Aligned stores. class AlignedStore : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{ - StoreSDNode *Store = cast(N); + auto *Store = cast(N); return Store->getAlignment() >= Store->getMemoryVT().getStoreSize(); }]>; def aligned_store : AlignedStore; @@ -274,7 +274,7 @@ def aligned_truncstorei32 : AlignedStore; // location multiple times. class NonvolatileLoad : PatFrag<(ops node:$addr), (load node:$addr), [{ - LoadSDNode *Load = cast(N); + auto *Load = cast(N); return !Load->isVolatile(); }]>; def nonvolatile_load : NonvolatileLoad; @@ -285,7 +285,7 @@ def nonvolatile_anyextloadi32 : NonvolatileLoad; // Non-volatile stores. class NonvolatileStore : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{ - StoreSDNode *Store = cast(N); + auto *Store = cast(N); return !Store->isVolatile(); }]>; def nonvolatile_store : NonvolatileStore; diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index b61ae88f733..1ac4e320b30 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -63,8 +63,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, MachineBasicBlock &MBB = *MI->getParent(); MachineFunction &MF = *MBB.getParent(); - const SystemZInstrInfo &TII = - *static_cast(TM.getInstrInfo()); + auto *TII = static_cast(TM.getInstrInfo()); const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); DebugLoc DL = MI->getDebugLoc(); @@ -84,7 +83,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, // See if the offset is in range, or if an equivalent instruction that // accepts the offset exists. unsigned Opcode = MI->getOpcode(); - unsigned OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset); + unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); if (OpcodeForOffset) MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); else { @@ -94,7 +93,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, int64_t Mask = 0xffff; do { Offset = OldOffset & Mask; - OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset); + OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); Mask >>= 1; assert(Mask && "One offset must be OK"); } while (!OpcodeForOffset); @@ -107,21 +106,21 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, && MI->getOperand(FIOperandNum + 2).getReg() == 0) { // Load the offset into the scratch register and use it as an index. // The scratch register then dies here. - TII.loadImmediate(MBB, MI, ScratchReg, HighOffset); + TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, false, false, true); } else { // Load the anchor address into a scratch register. - unsigned LAOpcode = TII.getOpcodeForOffset(SystemZ::LA, HighOffset); + unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); if (LAOpcode) - BuildMI(MBB, MI, DL, TII.get(LAOpcode),ScratchReg) + BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) .addReg(BasePtr).addImm(HighOffset).addReg(0); else { // Load the high offset into the scratch register and use it as // an index. - TII.loadImmediate(MBB, MI, ScratchReg, HighOffset); - BuildMI(MBB, MI, DL, TII.get(SystemZ::AGR),ScratchReg) + TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); + BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg) .addReg(ScratchReg, RegState::Kill).addReg(BasePtr); } @@ -130,7 +129,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, false, false, true); } } - MI->setDesc(TII.get(OpcodeForOffset)); + MI->setDesc(TII->get(OpcodeForOffset)); MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); } diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp index c7ebb5d6b4e..7635bdccc3c 100644 --- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp +++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp @@ -62,7 +62,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain, if (IsVolatile) return SDValue(); - if (ConstantSDNode *CSize = dyn_cast(Size)) + if (auto *CSize = dyn_cast(Size)) return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP, Chain, Dst, Src, CSize->getZExtValue()); return SDValue(); @@ -93,11 +93,11 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain, if (IsVolatile) return SDValue(); - if (ConstantSDNode *CSize = dyn_cast(Size)) { + if (auto *CSize = dyn_cast(Size)) { uint64_t Bytes = CSize->getZExtValue(); if (Bytes == 0) return SDValue(); - if (ConstantSDNode *CByte = dyn_cast(Byte)) { + if (auto *CByte = dyn_cast(Byte)) { // Handle cases that can be done using at most two of // MVI, MVHI, MVHHI and MVGHI. The latter two can only be // used if ByteVal is all zeros or all ones; in other casees, @@ -137,7 +137,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain, assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already"); // Handle the special case of a memset of 0, which can use XC. - ConstantSDNode *CByte = dyn_cast(Byte); + auto *CByte = dyn_cast(Byte); if (CByte && CByte->getZExtValue() == 0) return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP, Chain, Dst, Dst, Bytes); @@ -194,7 +194,7 @@ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain, SDValue Src1, SDValue Src2, SDValue Size, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const { - if (ConstantSDNode *CSize = dyn_cast(Size)) { + if (auto *CSize = dyn_cast(Size)) { uint64_t Bytes = CSize->getZExtValue(); assert(Bytes > 0 && "Caller should have handled 0-size case"); Chain = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);