diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index f1a86275187..efbd28c412f 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -384,7 +384,6 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { // Implement VSELECT in terms of XOR, AND, OR // on platforms which do not support blend natively. EVT VT = Op.getOperand(0).getValueType(); - EVT OVT = Op.getOperand(1).getValueType(); DebugLoc DL = Op.getDebugLoc(); SDValue Mask = Op.getOperand(0); @@ -398,7 +397,8 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { !TLI.isOperationLegalOrCustom(ISD::OR, VT)) return DAG.UnrollVectorOp(Op.getNode()); - assert(VT.getSizeInBits() == OVT.getSizeInBits() && "Invalid mask size"); + assert(VT.getSizeInBits() == Op.getOperand(1).getValueType().getSizeInBits() + && "Invalid mask size"); // Bitcast the operands to be the same type as the mask. // This is needed when we select between FP types because // the mask is a vector of integers. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 632b6146ace..df94e0f6862 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2800,6 +2800,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, EVT.getVectorNumElements() == VT.getVectorNumElements()) && "Vector element counts must match in FP_ROUND_INREG"); assert(EVT.bitsLE(VT) && "Not rounding down!"); + (void)EVT; if (cast(N2)->getVT() == VT) return N1; // Not actually rounding. break; } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 309b68b3392..24bd2cde78d 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2474,7 +2474,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { size_t numCmps = Clusterify(Cases, SI); DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size() << ". Total compares: " << numCmps << '\n'); - numCmps = 0; + (void)numCmps; // Get the Value to be switched on and default basic blocks, which will be // inserted into CaseBlock records, representing basic blocks in the binary diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index c1ede78d7fb..1615f5b140d 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -4902,9 +4902,9 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { static void ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl& Results, SelectionDAG &DAG, unsigned NewOp) { - EVT T = Node->getValueType(0); DebugLoc dl = Node->getDebugLoc(); - assert (T == MVT::i64 && "Only know how to expand i64 atomics"); + assert (Node->getValueType(0) == MVT::i64 && + "Only know how to expand i64 atomics"); SmallVector Ops; Ops.push_back(Node->getOperand(0)); // Chain diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 07e0d0ed263..d54f4ae2a2c 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -1113,9 +1113,7 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) con // Skip the saved EBP. Offset += RI->getSlotSize(); } else { - unsigned Align = MFI->getObjectAlignment(FI); - assert((-(Offset + StackSize)) % Align == 0); - Align = 0; + assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0); return Offset + StackSize; } // FIXME: Support tail calls @@ -1267,7 +1265,7 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, true); assert(FrameIdx == MFI->getObjectIndexBegin() && "Slot for EBP register must be last in order to be found!"); - FrameIdx = 0; + (void)FrameIdx; } } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f75d695ac59..410cc953e9d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1753,6 +1753,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, // places. assert(VA.getValNo() != LastVal && "Don't support value assigned to multiple locs yet"); + (void)LastVal; LastVal = VA.getValNo(); if (VA.isRegLoc()) { @@ -10476,9 +10477,9 @@ static void ReplaceATOMIC_LOAD(SDNode *Node, void X86TargetLowering:: ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl&Results, SelectionDAG &DAG, unsigned NewOp) const { - EVT T = Node->getValueType(0); DebugLoc dl = Node->getDebugLoc(); - assert (T == MVT::i64 && "Only know how to expand i64 atomics"); + assert (Node->getValueType(0) == MVT::i64 && + "Only know how to expand i64 atomics"); SDValue Chain = Node->getOperand(0); SDValue In1 = Node->getOperand(1);