diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h index dd7703b1dbf..c60eba2bd2a 100644 --- a/include/llvm/CodeGen/CallingConvLower.h +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -312,13 +312,13 @@ public: /// produce a single value. void AnalyzeCallResult(MVT VT, CCAssignFn Fn); - /// getFirstUnallocated - Return the first unallocated register in the set, or - /// NumRegs if they are all allocated. - unsigned getFirstUnallocated(const MCPhysReg *Regs, unsigned NumRegs) const { - for (unsigned i = 0; i != NumRegs; ++i) + /// getFirstUnallocated - Return the index of the first unallocated register + /// in the set, or Regs.size() if they are all allocated. + unsigned getFirstUnallocated(ArrayRef Regs) const { + for (unsigned i = 0; i < Regs.size(); ++i) if (!isAllocated(Regs[i])) return i; - return NumRegs; + return Regs.size(); } /// AllocateReg - Attempt to allocate one register. If it is not available, @@ -341,9 +341,9 @@ public: /// AllocateReg - Attempt to allocate one of the specified registers. If none /// are available, return zero. Otherwise, return the first one available, /// marking it and any aliases as allocated. - unsigned AllocateReg(const MCPhysReg *Regs, unsigned NumRegs) { - unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs); - if (FirstUnalloc == NumRegs) + unsigned AllocateReg(ArrayRef Regs) { + unsigned FirstUnalloc = getFirstUnallocated(Regs); + if (FirstUnalloc == Regs.size()) return 0; // Didn't find the reg. // Mark the register and any aliases as allocated. @@ -382,10 +382,9 @@ public: } /// Version of AllocateReg with list of registers to be shadowed. - unsigned AllocateReg(const MCPhysReg *Regs, const MCPhysReg *ShadowRegs, - unsigned NumRegs) { - unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs); - if (FirstUnalloc == NumRegs) + unsigned AllocateReg(ArrayRef Regs, const MCPhysReg *ShadowRegs) { + unsigned FirstUnalloc = getFirstUnallocated(Regs); + if (FirstUnalloc == Regs.size()) return 0; // Didn't find the reg. // Mark the register and any aliases as allocated. @@ -415,8 +414,8 @@ public: /// Version of AllocateStack with list of extra registers to be shadowed. /// Note that, unlike AllocateReg, this shadows ALL of the shadow registers. unsigned AllocateStack(unsigned Size, unsigned Align, - const MCPhysReg *ShadowRegs, unsigned NumShadowRegs) { - for (unsigned i = 0; i < NumShadowRegs; ++i) + ArrayRef ShadowRegs) { + for (unsigned i = 0; i < ShadowRegs.size(); ++i) MarkAllocated(ShadowRegs[i]); return AllocateStack(Size, Align); } diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 481a039474e..332c8796c0a 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2203,8 +2203,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, AArch64::X3, AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 }; static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs); - unsigned FirstVariadicGPR = - CCInfo.getFirstUnallocated(GPRArgRegs, NumGPRArgRegs); + unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs); unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); int GPRIdx = 0; @@ -2232,8 +2231,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs); - unsigned FirstVariadicFPR = - CCInfo.getFirstUnallocated(FPRArgRegs, NumFPRArgRegs); + unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs); unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); int FPRIdx = 0; diff --git a/lib/Target/ARM/ARMCallingConv.h b/lib/Target/ARM/ARMCallingConv.h index e0d0559ba98..87393fe7442 100644 --- a/lib/Target/ARM/ARMCallingConv.h +++ b/lib/Target/ARM/ARMCallingConv.h @@ -31,7 +31,7 @@ static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; // Try to get the first register. - if (unsigned Reg = State.AllocateReg(RegList, 4)) + if (unsigned Reg = State.AllocateReg(RegList)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else { // For the 2nd half of a v2f64, do not fail. @@ -46,7 +46,7 @@ static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, } // Try to get the second register. - if (unsigned Reg = State.AllocateReg(RegList, 4)) + if (unsigned Reg = State.AllocateReg(RegList)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, @@ -76,11 +76,11 @@ static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 }; static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; - unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2); + unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList); if (Reg == 0) { // If we had R3 unallocated only, now we still must to waste it. - Reg = State.AllocateReg(GPRArgRegs, 4); + Reg = State.AllocateReg(GPRArgRegs); assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64"); // For the 2nd half of a v2f64, do not just fail. @@ -126,7 +126,7 @@ static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT, static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 }; static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 }; - unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2); + unsigned Reg = State.AllocateReg(HiRegList, LoRegList); if (Reg == 0) return false; // we didn't handle it diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 4f9bdc0ab76..a66d9ab00f4 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1865,7 +1865,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, void ARMTargetLowering::HandleByVal( CCState *State, unsigned &size, unsigned Align) const { - unsigned reg = State->AllocateReg(GPRArgRegs, 4); + unsigned reg = State->AllocateReg(GPRArgRegs); assert((State->getCallOrPrologue() == Prologue || State->getCallOrPrologue() == Call) && "unhandled ParmContext"); @@ -1875,7 +1875,7 @@ ARMTargetLowering::HandleByVal( unsigned AlignInRegs = Align / 4; unsigned Waste = (ARM::R4 - reg) % AlignInRegs; for (unsigned i = 0; i < Waste; ++i) - reg = State->AllocateReg(GPRArgRegs, 4); + reg = State->AllocateReg(GPRArgRegs); } if (reg != 0) { unsigned excess = 4 * (ARM::R4 - reg); @@ -1886,7 +1886,7 @@ ARMTargetLowering::HandleByVal( // remained registers. const unsigned NSAAOffset = State->getNextStackOffset(); if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) { - while (State->AllocateReg(GPRArgRegs, 4)) + while (State->AllocateReg(GPRArgRegs)) ; return; } @@ -1903,7 +1903,7 @@ ARMTargetLowering::HandleByVal( // Note, first register is allocated in the beginning of function already, // allocate remained amount of registers we need. for (unsigned i = reg+1; i != ByValRegEnd; ++i) - State->AllocateReg(GPRArgRegs, 4); + State->AllocateReg(GPRArgRegs); // A byval parameter that is split between registers and memory needs its // size truncated here. // In the case where the entire structure fits in registers, we set the @@ -2838,9 +2838,7 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, NumGPRs = REnd - RBegin; } else { unsigned int firstUnalloced; - firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, - sizeof(GPRArgRegs) / - sizeof(GPRArgRegs[0])); + firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs); NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; } @@ -2911,8 +2909,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, firstRegToSaveIndex = RBegin - ARM::R0; lastRegToSaveIndex = REnd - ARM::R0; } else { - firstRegToSaveIndex = CCInfo.getFirstUnallocated - (GPRArgRegs, array_lengthof(GPRArgRegs)); + firstRegToSaveIndex = CCInfo.getFirstUnallocated(GPRArgRegs); lastRegToSaveIndex = 4; } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 362c0a7391c..661be51e1b3 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -188,7 +188,7 @@ static bool CC_Hexagon32(unsigned ValNo, MVT ValVT, Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4, Hexagon::R5 }; - if (unsigned Reg = State.AllocateReg(RegList, 6)) { + if (unsigned Reg = State.AllocateReg(RegList)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } @@ -213,7 +213,7 @@ static bool CC_Hexagon64(unsigned ValNo, MVT ValVT, static const MCPhysReg RegList2[] = { Hexagon::R1, Hexagon::R3 }; - if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) { + if (unsigned Reg = State.AllocateReg(RegList1, RegList2)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index ca83b6d3b18..6f55a38f460 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -328,7 +328,7 @@ static void AnalyzeArguments(CCState &State, if (!UseStack && Parts <= RegsLeft) { unsigned FirstVal = ValNo; for (unsigned j = 0; j < Parts; j++) { - unsigned Reg = State.AllocateReg(RegList, NbRegs); + unsigned Reg = State.AllocateReg(RegList); State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo)); RegsLeft--; } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 4cd20e70b13..67e380bb190 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -2272,12 +2272,10 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op, static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State, const MCPhysReg *F64Regs) { + CCState &State, ArrayRef F64Regs) { const MipsSubtarget &Subtarget = static_cast( State.getMachineFunction().getSubtarget()); - static const unsigned IntRegsSize = 4, FloatRegsSize = 2; - static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 }; static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 }; @@ -2314,39 +2312,39 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following // is true: function is vararg, argument is 3rd or higher, there is previous // argument which is not f32 or f64. - bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 - || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo; + bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 || + State.getFirstUnallocated(F32Regs) != ValNo; unsigned OrigAlign = ArgFlags.getOrigAlign(); bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8); if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) { - Reg = State.AllocateReg(IntRegs, IntRegsSize); + Reg = State.AllocateReg(IntRegs); // If this is the first part of an i64 arg, // the allocated register must be either A0 or A2. if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3)) - Reg = State.AllocateReg(IntRegs, IntRegsSize); + Reg = State.AllocateReg(IntRegs); LocVT = MVT::i32; } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) { // Allocate int register and shadow next int register. If first // available register is Mips::A1 or Mips::A3, shadow it too. - Reg = State.AllocateReg(IntRegs, IntRegsSize); + Reg = State.AllocateReg(IntRegs); if (Reg == Mips::A1 || Reg == Mips::A3) - Reg = State.AllocateReg(IntRegs, IntRegsSize); - State.AllocateReg(IntRegs, IntRegsSize); + Reg = State.AllocateReg(IntRegs); + State.AllocateReg(IntRegs); LocVT = MVT::i32; } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) { // we are guaranteed to find an available float register if (ValVT == MVT::f32) { - Reg = State.AllocateReg(F32Regs, FloatRegsSize); + Reg = State.AllocateReg(F32Regs); // Shadow int register - State.AllocateReg(IntRegs, IntRegsSize); + State.AllocateReg(IntRegs); } else { - Reg = State.AllocateReg(F64Regs, FloatRegsSize); + Reg = State.AllocateReg(F64Regs); // Shadow int registers - unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize); + unsigned Reg2 = State.AllocateReg(IntRegs); if (Reg2 == Mips::A1 || Reg2 == Mips::A3) - State.AllocateReg(IntRegs, IntRegsSize); - State.AllocateReg(IntRegs, IntRegsSize); + State.AllocateReg(IntRegs); + State.AllocateReg(IntRegs); } } else llvm_unreachable("Cannot handle this ValVT."); @@ -3648,7 +3646,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, SelectionDAG &DAG, CCState &State) const { const ArrayRef ArgRegs = ABI.GetVarArgRegs(); - unsigned Idx = State.getFirstUnallocated(ArgRegs.data(), ArgRegs.size()); + unsigned Idx = State.getFirstUnallocated(ArgRegs); unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes(); MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8); const TargetRegisterClass *RC = getRegClassFor(RegTy); @@ -3715,7 +3713,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size, "Byval argument's alignment should be a multiple of" "RegSizeInBytes."); - FirstReg = State->getFirstUnallocated(IntArgRegs.data(), IntArgRegs.size()); + FirstReg = State->getFirstUnallocated(IntArgRegs); // If Align > RegSizeInBytes, the first arg register must be even. // FIXME: This condition happens to do the right thing but it's not the diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 1258d96cf62..7346dff8602 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2186,7 +2186,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, }; const unsigned NumArgRegs = array_lengthof(ArgRegs); - unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); + unsigned RegNum = State.getFirstUnallocated(ArgRegs); // Skip one register if the first unallocated register has an even register // number and there are still argument registers available which have not been @@ -2214,7 +2214,7 @@ bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, const unsigned NumArgRegs = array_lengthof(ArgRegs); - unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); + unsigned RegNum = State.getFirstUnallocated(ArgRegs); // If there is only one Floating-point register left we need to put both f64 // values of a split ppc_fp128 value on the stack. @@ -2541,10 +2541,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4( if (DisablePPCFloatInVariadic) NumFPArgRegs = 0; - FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, - NumGPArgRegs)); - FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, - NumFPArgRegs)); + FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); + FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); // Make room for NumGPArgRegs and NumFPArgRegs. int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 30b8c452908..dc89efd46a3 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -587,8 +587,8 @@ SDValue SITargetLowering::LowerFormalArguments( } if (Info->getShaderType() != ShaderType::COMPUTE) { - unsigned ScratchIdx = CCInfo.getFirstUnallocated( - AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs()); + unsigned ScratchIdx = CCInfo.getFirstUnallocated(ArrayRef( + AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs())); Info->ScratchOffsetReg = AMDGPU::SGPR_32RegClass.getRegister(ScratchIdx); } return Chain; diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 891978ee541..beff028e17b 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -57,7 +57,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; // Try to get first reg. - if (unsigned Reg = State.AllocateReg(RegList, 6)) { + if (unsigned Reg = State.AllocateReg(RegList)) { State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } else { // Assign whole thing in stack. @@ -68,7 +68,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, } // Try to get second reg. - if (unsigned Reg = State.AllocateReg(RegList, 6)) + if (unsigned Reg = State.AllocateReg(RegList)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, @@ -497,7 +497,7 @@ LowerFormalArguments_32(SDValue Chain, static const MCPhysReg ArgRegs[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; - unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); + unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs); const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = CCInfo.getNextStackOffset(); if (NumAllocated == 6) diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index da53a795725..a17f0523256 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -2989,7 +2989,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 }; - unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); + unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); assert((Subtarget->hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7c281df31ac..b4b8499c1bc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2573,10 +2573,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, // Find the first unallocated argument registers. ArrayRef ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget); ArrayRef ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget); - unsigned NumIntRegs = - CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size()); - unsigned NumXMMRegs = - CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size()); + unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs); + unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs); assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && "SSE register cannot be used when SSE is disabled!"); @@ -3002,7 +3000,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 }; - unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); + unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); assert((Subtarget->hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 5ef25548465..d5a63547de6 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1371,8 +1371,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, XCore::R0, XCore::R1, XCore::R2, XCore::R3 }; XCoreFunctionInfo *XFI = MF.getInfo(); - unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, - array_lengthof(ArgRegs)); + unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); if (FirstVAReg < array_lengthof(ArgRegs)) { int offset = 0; // Save remaining registers, storing higher register numbers at a higher diff --git a/utils/TableGen/CallingConvEmitter.cpp b/utils/TableGen/CallingConvEmitter.cpp index 276a8b1813c..051a7e98141 100644 --- a/utils/TableGen/CallingConvEmitter.cpp +++ b/utils/TableGen/CallingConvEmitter.cpp @@ -124,7 +124,7 @@ void CallingConvEmitter::EmitAction(Record *Action, } O << "\n" << IndentStr << "};\n"; O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList" - << Counter << ", " << RegList->getSize() << ")) {\n"; + << Counter << ")) {\n"; } O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, " << "Reg, LocVT, LocInfo));\n"; @@ -166,7 +166,7 @@ void CallingConvEmitter::EmitAction(Record *Action, O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList" << RegListNumber << ", " << "RegList" << ShadowRegListNumber - << ", " << RegList->getSize() << ")) {\n"; + << ")) {\n"; } O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, " << "Reg, LocVT, LocInfo));\n"; @@ -215,8 +215,7 @@ void CallingConvEmitter::EmitAction(Record *Action, O << IndentStr << "unsigned Offset" << ++Counter << " = State.AllocateStack(" << Size << ", " << Align << ", " - << "ShadowRegList" << ShadowRegListNumber << ", " - << ShadowRegList->getSize() << ");\n"; + << "ShadowRegList" << ShadowRegListNumber << ");\n"; O << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset" << Counter << ", LocVT, LocInfo));\n"; O << IndentStr << "return false;\n";