Comment out or remove unused parameter names so as to avoid a slew of

compiler warnings.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@142574 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan Sands
2011-10-20 08:57:54 +00:00
parent f6eede5258
commit 84dc1f0aaf

View File

@@ -193,7 +193,7 @@ public:
/// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to
/// different scheduling heuristics for different nodes. This function returns /// different scheduling heuristics for different nodes. This function returns
/// the preference (or none) for the given node. /// the preference (or none) for the given node.
virtual Sched::Preference getSchedulingPreference(SDNode *N) const { virtual Sched::Preference getSchedulingPreference(SDNode *) const {
return Sched::None; return Sched::None;
} }
@@ -328,15 +328,15 @@ public:
bool writeMem; // writes memory? bool writeMem; // writes memory?
}; };
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
const CallInst &I, unsigned Intrinsic) const { unsigned /*Intrinsic*/) const {
return false; return false;
} }
/// isFPImmLegal - Returns true if the target can instruction select the /// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will materialize /// specified FP immediate natively. If false, the legalizer will materialize
/// the FP immediate as a load from a constant pool. /// the FP immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const { virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
return false; return false;
} }
@@ -344,8 +344,8 @@ public:
/// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal. /// are assumed to be legal.
virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT VT) const { EVT /*VT*/) const {
return true; return true;
} }
@@ -358,8 +358,8 @@ public:
/// used by Targets can use this to indicate if there is a suitable /// used by Targets can use this to indicate if there is a suitable
/// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
/// pool entry. /// pool entry.
virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT VT) const { EVT /*VT*/) const {
return false; return false;
} }
@@ -587,7 +587,7 @@ public:
/// ShouldShrinkFPConstant - If true, then instruction selection should /// ShouldShrinkFPConstant - If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type /// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime. /// in order to save space and / or reduce runtime.
virtual bool ShouldShrinkFPConstant(EVT VT) const { return true; } virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
/// hasTargetDAGCombine - If true, the target has custom DAG combine /// hasTargetDAGCombine - If true, the target has custom DAG combine
/// transformations that it can perform for the specified node. /// transformations that it can perform for the specified node.
@@ -629,7 +629,7 @@ public:
/// use helps to ensure that such replacements don't generate code that causes /// use helps to ensure that such replacements don't generate code that causes
/// an alignment error (trap) on the target machine. /// an alignment error (trap) on the target machine.
/// @brief Determine if the target supports unaligned memory accesses. /// @brief Determine if the target supports unaligned memory accesses.
virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { virtual bool allowsUnalignedMemoryAccesses(EVT) const {
return false; return false;
} }
@@ -652,10 +652,11 @@ public:
/// constant so it does not need to be loaded. /// constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic /// It returns EVT::Other if the type should be determined using generic
/// target-independent logic. /// target-independent logic.
virtual EVT getOptimalMemOpType(uint64_t Size, virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
unsigned DstAlign, unsigned SrcAlign, unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
bool NonScalarIntSafe, bool MemcpyStrSrc, bool /*NonScalarIntSafe*/,
MachineFunction &MF) const { bool /*MemcpyStrSrc*/,
MachineFunction &/*MF*/) const {
return MVT::Other; return MVT::Other;
} }
@@ -745,20 +746,20 @@ public:
/// getPreIndexedAddressParts - returns true by value, base pointer and /// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address /// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address. /// can be legally represented as pre-indexed load / store address.
virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
SDValue &Offset, SDValue &/*Offset*/,
ISD::MemIndexedMode &AM, ISD::MemIndexedMode &/*AM*/,
SelectionDAG &DAG) const { SelectionDAG &/*DAG*/) const {
return false; return false;
} }
/// getPostIndexedAddressParts - returns true by value, base pointer and /// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be /// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store. /// combined with a load / store to form a post-indexed load / store.
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
SDValue &Base, SDValue &Offset, SDValue &/*Base*/, SDValue &/*Offset*/,
ISD::MemIndexedMode &AM, ISD::MemIndexedMode &/*AM*/,
SelectionDAG &DAG) const { SelectionDAG &/*DAG*/) const {
return false; return false;
} }
@@ -768,9 +769,9 @@ public:
virtual unsigned getJumpTableEncoding() const; virtual unsigned getJumpTableEncoding() const;
virtual const MCExpr * virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
const MachineBasicBlock *MBB, unsigned uid, const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
MCContext &Ctx) const { MCContext &/*Ctx*/) const {
assert(0 && "Need to implement this hook if target has custom JTIs"); assert(0 && "Need to implement this hook if target has custom JTIs");
return 0; return 0;
} }
@@ -796,7 +797,8 @@ public:
/// protector cookies at a fixed offset in some non-standard address /// protector cookies at a fixed offset in some non-standard address
/// space, and populates the address space and offset as /// space, and populates the address space and offset as
/// appropriate. /// appropriate.
virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const { virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
unsigned &/*Offset*/) const {
return false; return false;
} }
@@ -931,7 +933,7 @@ public:
/// the specified value type and it is 'desirable' to use the type for the /// the specified value type and it is 'desirable' to use the type for the
/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
/// instruction encodings are longer and some i16 instructions are slow. /// instruction encodings are longer and some i16 instructions are slow.
virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const { virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
// By default, assume all legal types are desirable. // By default, assume all legal types are desirable.
return isTypeLegal(VT); return isTypeLegal(VT);
} }
@@ -939,14 +941,15 @@ public:
/// isDesirableToPromoteOp - Return true if it is profitable for dag combiner /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner
/// to transform a floating point op of specified opcode to a equivalent op of /// to transform a floating point op of specified opcode to a equivalent op of
/// an integer type. e.g. f32 load -> i32 load can be profitable on ARM. /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
virtual bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const { virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
EVT /*VT*/) const {
return false; return false;
} }
/// IsDesirableToPromoteOp - This method query the target whether it is /// IsDesirableToPromoteOp - This method query the target whether it is
/// beneficial for dag combiner to promote the specified node. If true, it /// beneficial for dag combiner to promote the specified node. If true, it
/// should return the desired promotion type by reference. /// should return the desired promotion type by reference.
virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
return false; return false;
} }
@@ -1190,11 +1193,11 @@ public:
/// chain value. /// chain value.
/// ///
virtual SDValue virtual SDValue
LowerFormalArguments(SDValue Chain, LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
CallingConv::ID CallConv, bool isVarArg, bool /*isVarArg*/,
const SmallVectorImpl<ISD::InputArg> &Ins, const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
DebugLoc dl, SelectionDAG &DAG, DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &InVals) const { SmallVectorImpl<SDValue> &/*InVals*/) const {
assert(0 && "Not Implemented"); assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors return SDValue(); // this is here to silence compiler errors
} }
@@ -1233,13 +1236,14 @@ public:
/// InVals array with legal-type return values from the call, and return /// InVals array with legal-type return values from the call, and return
/// the resulting token chain value. /// the resulting token chain value.
virtual SDValue virtual SDValue
LowerCall(SDValue Chain, SDValue Callee, LowerCall(SDValue /*Chain*/, SDValue /*Callee*/,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &Outs, bool &/*isTailCall*/,
const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
const SmallVectorImpl<ISD::InputArg> &Ins, const SmallVectorImpl<SDValue> &/*OutVals*/,
DebugLoc dl, SelectionDAG &DAG, const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
SmallVectorImpl<SDValue> &InVals) const { DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
assert(0 && "Not Implemented"); assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors return SDValue(); // this is here to silence compiler errors
} }
@@ -1251,10 +1255,10 @@ public:
/// return values described by the Outs array can fit into the return /// return values described by the Outs array can fit into the return
/// registers. If false is returned, an sret-demotion is performed. /// registers. If false is returned, an sret-demotion is performed.
/// ///
virtual bool CanLowerReturn(CallingConv::ID CallConv, virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
MachineFunction &MF, bool isVarArg, MachineFunction &/*MF*/, bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
LLVMContext &Context) const LLVMContext &/*Context*/) const
{ {
// Return true by default to get preexisting behavior. // Return true by default to get preexisting behavior.
return true; return true;
@@ -1266,10 +1270,11 @@ public:
/// value. /// value.
/// ///
virtual SDValue virtual SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
const SmallVectorImpl<ISD::OutputArg> &Outs, bool /*isVarArg*/,
const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
DebugLoc dl, SelectionDAG &DAG) const { const SmallVectorImpl<SDValue> &/*OutVals*/,
DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
assert(0 && "Not Implemented"); assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors return SDValue(); // this is here to silence compiler errors
} }
@@ -1277,7 +1282,7 @@ public:
/// isUsedByReturnOnly - Return true if result of the specified node is used /// isUsedByReturnOnly - Return true if result of the specified node is used
/// by a return node only. This is used to determine whether it is possible /// by a return node only. This is used to determine whether it is possible
/// to codegen a libcall as tail call at legalization time. /// to codegen a libcall as tail call at legalization time.
virtual bool isUsedByReturnOnly(SDNode *N) const { virtual bool isUsedByReturnOnly(SDNode *) const {
return false; return false;
} }
@@ -1285,7 +1290,7 @@ public:
/// call instruction as a tail call. This is used by optimization passes to /// call instruction as a tail call. This is used by optimization passes to
/// determine if it's profitable to duplicate return instructions to enable /// determine if it's profitable to duplicate return instructions to enable
/// tailcall optimization. /// tailcall optimization.
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const { virtual bool mayBeEmittedAsTailCall(CallInst *) const {
return false; return false;
} }
@@ -1296,7 +1301,7 @@ public:
/// necessary for non-C calling conventions. The frontend should handle this /// necessary for non-C calling conventions. The frontend should handle this
/// and include all of the necessary information. /// and include all of the necessary information.
virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const { ISD::NodeType /*ExtendKind*/) const {
EVT MinVT = getRegisterType(Context, MVT::i32); EVT MinVT = getRegisterType(Context, MVT::i32);
return VT.bitsLT(MinVT) ? MinVT : VT; return VT.bitsLT(MinVT) ? MinVT : VT;
} }
@@ -1333,8 +1338,9 @@ public:
/// ///
/// If the target has no operations that require custom lowering, it need not /// If the target has no operations that require custom lowering, it need not
/// implement this. The default implementation aborts. /// implement this. The default implementation aborts.
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, virtual void ReplaceNodeResults(SDNode * /*N*/,
SelectionDAG &DAG) const { SmallVectorImpl<SDValue> &/*Results*/,
SelectionDAG &/*DAG*/) const {
assert(0 && "ReplaceNodeResults not implemented for this target!"); assert(0 && "ReplaceNodeResults not implemented for this target!");
} }
@@ -1344,7 +1350,7 @@ public:
/// createFastISel - This method returns a target specific FastISel object, /// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel. /// or null if the target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const { virtual FastISel *createFastISel(FunctionLoweringInfo &) const {
return 0; return 0;
} }
@@ -1356,7 +1362,7 @@ public:
/// call to be explicit llvm code if it wants to. This is useful for /// call to be explicit llvm code if it wants to. This is useful for
/// turning simple inline asms into LLVM intrinsics, which gives the /// turning simple inline asms into LLVM intrinsics, which gives the
/// compiler more information about the behavior of the code. /// compiler more information about the behavior of the code.
virtual bool ExpandInlineAsm(CallInst *CI) const { virtual bool ExpandInlineAsm(CallInst *) const {
return false; return false;
} }
@@ -1538,7 +1544,7 @@ public:
/// icmp immediate, that is the target has icmp instructions which can compare /// icmp immediate, that is the target has icmp instructions which can compare
/// a register against the immediate without having to materialize the /// a register against the immediate without having to materialize the
/// immediate into a register. /// immediate into a register.
virtual bool isLegalICmpImmediate(int64_t Imm) const { virtual bool isLegalICmpImmediate(int64_t) const {
return true; return true;
} }
@@ -1546,18 +1552,18 @@ public:
/// add immediate, that is the target has add instructions which can add /// add immediate, that is the target has add instructions which can add
/// a register with the immediate without having to materialize the /// a register with the immediate without having to materialize the
/// immediate into a register. /// immediate into a register.
virtual bool isLegalAddImmediate(int64_t Imm) const { virtual bool isLegalAddImmediate(int64_t) const {
return true; return true;
} }
/// isTruncateFree - Return true if it's free to truncate a value of /// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX. /// register EAX to i16 by referencing its sub-register AX.
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const { virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
return false; return false;
} }
virtual bool isTruncateFree(EVT VT1, EVT VT2) const { virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
return false; return false;
} }
@@ -1569,18 +1575,18 @@ public:
/// does not necessarily apply to truncate instructions. e.g. on x86-64, /// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the /// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits. /// result out to 64 bits.
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const { virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
return false; return false;
} }
virtual bool isZExtFree(EVT VT1, EVT VT2) const { virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
return false; return false;
} }
/// isNarrowingProfitable - Return true if it's profitable to narrow /// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16. /// from i32 to i8 but not from i32 to i16.
virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const { virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
return false; return false;
} }