Make isInt?? and isUint?? template specializations of the generic versions. This

makes calls a little bit more consistent and allows easy removal of the
specializations in the future. Convert all callers to the templated functions.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@99838 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Benjamin Kramer 2010-03-29 21:13:41 +00:00
parent 31441b7e95
commit 34247a0f35
10 changed files with 71 additions and 61 deletions

View File

@ -32,35 +32,43 @@ inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
/// is?Type - these functions produce optimal testing for integer data types.
inline bool isInt8 (int64_t Value) {
return static_cast<int8_t>(Value) == Value;
}
inline bool isUInt8 (int64_t Value) {
return static_cast<uint8_t>(Value) == Value;
}
inline bool isInt16 (int64_t Value) {
return static_cast<int16_t>(Value) == Value;
}
inline bool isUInt16(int64_t Value) {
return static_cast<uint16_t>(Value) == Value;
}
inline bool isInt32 (int64_t Value) {
return static_cast<int32_t>(Value) == Value;
}
inline bool isUInt32(int64_t Value) {
return static_cast<uint32_t>(Value) == Value;
}
/// isInt - Checks if an integer fits into the given bit width.
template<unsigned N>
inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
// Template specializations to get better code for common cases.
template<>
inline bool isInt<8>(int64_t x) {
return static_cast<int8_t>(x) == x;
}
template<>
inline bool isInt<16>(int64_t x) {
return static_cast<int16_t>(x) == x;
}
template<>
inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
inline bool isUint(uint64_t x) {
inline bool isUInt(uint64_t x) {
return N >= 64 || x < (UINT64_C(1)<<N);
}
// Template specializations to get better code for common cases.
template<>
inline bool isUInt<8>(uint64_t x) {
return static_cast<uint8_t>(x) == x;
}
template<>
inline bool isUInt<16>(uint64_t x) {
return static_cast<uint16_t>(x) == x;
}
template<>
inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
/// isMask_32 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (32 bit

View File

@ -65,23 +65,23 @@ def HI16 : SDNodeXForm<imm, [{
//===----------------------------------------------------------------------===//
def imm3 : PatLeaf<(imm), [{return isInt<3>(N->getSExtValue());}]>;
def uimm3 : PatLeaf<(imm), [{return isUint<3>(N->getZExtValue());}]>;
def uimm4 : PatLeaf<(imm), [{return isUint<4>(N->getZExtValue());}]>;
def uimm5 : PatLeaf<(imm), [{return isUint<5>(N->getZExtValue());}]>;
def uimm3 : PatLeaf<(imm), [{return isUInt<3>(N->getZExtValue());}]>;
def uimm4 : PatLeaf<(imm), [{return isUInt<4>(N->getZExtValue());}]>;
def uimm5 : PatLeaf<(imm), [{return isUInt<5>(N->getZExtValue());}]>;
def uimm5m2 : PatLeaf<(imm), [{
uint64_t value = N->getZExtValue();
return value % 2 == 0 && isUint<5>(value);
return value % 2 == 0 && isUInt<5>(value);
}]>;
def uimm6m4 : PatLeaf<(imm), [{
uint64_t value = N->getZExtValue();
return value % 4 == 0 && isUint<6>(value);
return value % 4 == 0 && isUInt<6>(value);
}]>;
def imm7 : PatLeaf<(imm), [{return isInt<7>(N->getSExtValue());}]>;
def imm16 : PatLeaf<(imm), [{return isInt<16>(N->getSExtValue());}]>;
def uimm16 : PatLeaf<(imm), [{return isUint<16>(N->getZExtValue());}]>;
def uimm16 : PatLeaf<(imm), [{return isUInt<16>(N->getZExtValue());}]>;
def ximm16 : PatLeaf<(imm), [{
int64_t value = N->getSExtValue();

View File

@ -164,7 +164,7 @@ void BlackfinRegisterInfo::loadConstant(MachineBasicBlock &MBB,
return;
}
if (isUint<16>(value)) {
if (isUInt<16>(value)) {
BuildMI(MBB, I, DL, TII.get(BF::LOADuimm16), Reg).addImm(value);
return;
}
@ -255,13 +255,13 @@ BlackfinRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
assert(FIPos==1 && "Bad frame index operand");
MI.getOperand(FIPos).ChangeToRegister(BaseReg, false);
MI.getOperand(FIPos+1).setImm(Offset);
if (isUint<6>(Offset)) {
if (isUInt<6>(Offset)) {
MI.setDesc(TII.get(isStore
? BF::STORE32p_uimm6m4
: BF::LOAD32p_uimm6m4));
return 0;
}
if (BaseReg == BF::FP && isUint<7>(-Offset)) {
if (BaseReg == BF::FP && isUInt<7>(-Offset)) {
MI.setDesc(TII.get(isStore
? BF::STORE32fp_nimm7m4
: BF::LOAD32fp_nimm7m4));

View File

@ -58,7 +58,7 @@ namespace {
bool
isI32IntU10Immediate(ConstantSDNode *CN)
{
return isUint<10>(CN->getSExtValue());
return isUInt<10>(CN->getSExtValue());
}
//! ConstantSDNode predicate for i16 sign-extended, 10-bit immediate values
@ -80,7 +80,7 @@ namespace {
bool
isI16IntU10Immediate(ConstantSDNode *CN)
{
return isUint<10>((short) CN->getZExtValue());
return isUInt<10>((short) CN->getZExtValue());
}
//! SDNode predicate for i16 sign-extended, 10-bit immediate values

View File

@ -424,7 +424,7 @@ void MSILWriter::printPtrLoad(uint64_t N) {
case Module::Pointer32:
printSimpleInstruction("ldc.i4",utostr(N).c_str());
// FIXME: Need overflow test?
if (!isUInt32(N)) {
if (!isUInt<32>(N)) {
errs() << "Value = " << utostr(N) << '\n';
llvm_unreachable("32-bit pointer overflowed");
}

View File

@ -130,7 +130,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
}
// If this branch is in range, ignore it.
if (isInt16(BranchSize)) {
if (isInt<16>(BranchSize)) {
MBBStartOffset += 4;
continue;
}

View File

@ -470,11 +470,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt32Immediate(RHS, Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt16(Imm))
if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt16((int)Imm))
if (isInt<16>((int)Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
@ -494,7 +494,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
}
Opc = PPC::CMPLW;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt32Immediate(RHS, Imm) && isUInt16(Imm))
if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLW;
@ -511,11 +511,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt64Immediate(RHS.getNode(), Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt16(Imm))
if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt16(Imm))
if (isInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
@ -528,7 +528,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// xoris r0,r3,0x1234
// cmpldi cr0,r0,0x5678
// beq cr0,L6
if (isUInt32(Imm)) {
if (isUInt<32>(Imm)) {
SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS,
getI64Imm(Imm >> 16)), 0);
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor,
@ -537,7 +537,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
}
Opc = PPC::CMPLD;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt64Immediate(RHS.getNode(), Imm) && isUInt16(Imm))
if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI64Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLD;
@ -761,12 +761,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Shift = 0;
// If it can't be represented as a 32 bit value.
if (!isInt32(Imm)) {
if (!isInt<32>(Imm)) {
Shift = CountTrailingZeros_64(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
// If the shifted value fits 32 bits.
if (isInt32(ImmSh)) {
if (isInt<32>(ImmSh)) {
// Go with the shifted value.
Imm = ImmSh;
} else {
@ -785,7 +785,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Hi = (Imm >> 16) & 0xFFFF;
// Simple value.
if (isInt16(Imm)) {
if (isInt<16>(Imm)) {
// Just the Lo bits.
Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
} else if (Lo) {

View File

@ -512,7 +512,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineInstr *MI = I;
DebugLoc dl = MI->getDebugLoc();
if (isInt16(CalleeAmt)) {
if (isInt<16>(CalleeAmt)) {
BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg).addReg(StackReg).
addImm(CalleeAmt);
} else {
@ -596,7 +596,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
else
Reg = PPC::R0;
if (MaxAlign < TargetAlign && isInt16(FrameSize)) {
if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
BuildMI(MBB, II, dl, TII.get(PPC::ADDI), Reg)
.addReg(PPC::R31)
.addImm(FrameSize);
@ -798,7 +798,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// clear can be encoded. This is extremely uncommon, because normally you
// only "std" to a stack slot that is at least 4-byte aligned, but it can
// happen in invalid code.
if (isInt16(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
if (isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
if (isIXAddr)
Offset >>= 2; // The actual encoded value has the low two bits zero.
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
@ -1375,8 +1375,9 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isPPC64) {
// PPC32.
if (ALIGN_STACK && MaxAlign > TargetAlign) {
assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
"Invalid alignment!");
assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0)
.addReg(PPC::R1)
@ -1390,7 +1391,7 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::R1)
.addReg(PPC::R1)
.addReg(PPC::R0);
} else if (isInt16(NegFrameSize)) {
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
.addReg(PPC::R1)
.addImm(NegFrameSize)
@ -1408,8 +1409,9 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
}
} else { // PPC64.
if (ALIGN_STACK && MaxAlign > TargetAlign) {
assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
"Invalid alignment!");
assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0)
.addReg(PPC::X1)
@ -1422,7 +1424,7 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::X1)
.addReg(PPC::X1)
.addReg(PPC::X0);
} else if (isInt16(NegFrameSize)) {
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
.addReg(PPC::X1)
.addImm(NegFrameSize / 4)
@ -1591,7 +1593,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
// enabled (=> hasFastCall()==true) the fastcc call might contain a tail
// call which invalidates the stack pointer value in SP(0). So we use the
// value of R31 in this case.
if (FI->hasFastCall() && isInt16(FrameSize)) {
if (FI->hasFastCall() && isInt<16>(FrameSize)) {
assert(hasFP(MF) && "Expecting a valid the frame pointer.");
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
.addReg(PPC::R31).addImm(FrameSize);
@ -1605,7 +1607,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addReg(PPC::R1)
.addReg(PPC::R31)
.addReg(PPC::R0);
} else if (isInt16(FrameSize) &&
} else if (isInt<16>(FrameSize) &&
(!ALIGN_STACK || TargetAlign >= MaxAlign) &&
!MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
@ -1615,7 +1617,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addImm(0).addReg(PPC::R1);
}
} else {
if (FI->hasFastCall() && isInt16(FrameSize)) {
if (FI->hasFastCall() && isInt<16>(FrameSize)) {
assert(hasFP(MF) && "Expecting a valid the frame pointer.");
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
.addReg(PPC::X31).addImm(FrameSize);
@ -1629,7 +1631,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addReg(PPC::X1)
.addReg(PPC::X31)
.addReg(PPC::X0);
} else if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
} else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign &&
!MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
.addReg(PPC::X1).addImm(FrameSize);
@ -1678,7 +1680,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
unsigned LISInstr = isPPC64 ? PPC::LIS8 : PPC::LIS;
unsigned ORIInstr = isPPC64 ? PPC::ORI8 : PPC::ORI;
if (CallerAllocatedAmt && isInt16(CallerAllocatedAmt)) {
if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
BuildMI(MBB, MBBI, dl, TII.get(ADDIInstr), StackReg)
.addReg(StackReg).addImm(CallerAllocatedAmt);
} else {

View File

@ -383,7 +383,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
// They have to fit in the 32-bit signed displacement field though.
if (isInt32(Disp)) {
if (isInt<32>(Disp)) {
AM.Disp = (uint32_t)Disp;
return X86SelectAddress(U->getOperand(0), AM);
}
@ -427,7 +427,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
}
}
// Check for displacement overflow.
if (!isInt32(Disp))
if (!isInt<32>(Disp))
break;
// Ok, the GEP indices were covered by constant-offset and scaled-index
// addressing. Update the address state and move on to examining the base.

View File

@ -2424,7 +2424,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement) {
// Offset should fit into 32 bit immediate field.
if (!isInt32(Offset))
if (!isInt<32>(Offset))
return false;
// If we don't have a symbolic displacement - we don't have any extra