Change TargetLowering::getRegClassFor to take an MVT, instead of EVT.

Accordingly, add helper funtions getSimpleValueType (in parallel to
getValueType) in SDValue, SDNode, and TargetLowering.

This is the first, in a series of patches.

This is the second attempt. In the first attempt (r169837), a few
getSimpleVT() were hoisted too far, detected by bootstrap failures.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@170104 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Patrik Hagglund
2012-12-13 06:34:11 +00:00
parent e3ec87a6f7
commit a61b17c18a
13 changed files with 89 additions and 81 deletions

View File

@@ -178,24 +178,24 @@ class ARMFastISel : public FastISel {
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt);
bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
unsigned Alignment = 0, bool isZExt = true,
bool allocReg = true);
bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
unsigned Alignment = 0);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
bool ARMIsMemCpySmall(uint64_t Len);
bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
unsigned Alignment);
unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
unsigned ARMMaterializeInt(const Constant *C, MVT VT);
unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
unsigned ARMSelectCallOp(bool UseReg);
unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT);
unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
// Call handling routines.
private:
@@ -487,7 +487,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
// checks from the various callers.
unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
@@ -497,7 +497,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
return MoveReg;
}
unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
@@ -510,7 +510,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
// For double width floating point we need to materialize two constants
// (the high and the low) into integer registers then use a move to get
// the combined constant into an FP reg.
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
const APFloat Val = CFP->getValueAPF();
bool is64bit = VT == MVT::f64;
@@ -554,7 +554,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
return DestReg;
}
unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
return false;
@@ -616,7 +616,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
return DestReg;
}
unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
// For now 32-bit only.
if (VT != MVT::i32) return 0;
@@ -719,10 +719,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
}
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
EVT VT = TLI.getValueType(C->getType(), true);
// Only handle simple types.
if (!VT.isSimple()) return 0;
MVT VT = TLI.getSimpleValueType(C->getType(), true);
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return ARMMaterializeFP(CFP, VT);
@@ -1003,14 +1000,13 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
AddOptionalDefs(MIB);
}
bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
unsigned Alignment, bool isZExt, bool allocReg) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
bool useAM3 = false;
bool needVMOV = false;
const TargetRegisterClass *RC;
switch (VT.getSimpleVT().SimpleTy) {
switch (VT.SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1:
@@ -1127,11 +1123,11 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
return true;
}
bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
unsigned Alignment) {
unsigned StrOpc;
bool useAM3 = false;
switch (VT.getSimpleVT().SimpleTy) {
switch (VT.SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
@@ -1405,8 +1401,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt) {
Type *Ty = Src1Value->getType();
EVT SrcVT = TLI.getValueType(Ty, true);
if (!SrcVT.isSimple()) return false;
MVT SrcVT = TLI.getSimpleValueType(Ty, true);
bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
if (isFloat && !Subtarget->hasVFP2())
@@ -1443,7 +1438,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
unsigned CmpOpc;
bool isICmp = true;
bool needsExt = false;
switch (SrcVT.getSimpleVT().SimpleTy) {
switch (SrcVT.SimpleTy) {
default: return false;
// TODO: Verify compares.
case MVT::f32:
@@ -1595,7 +1590,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
return false;
Value *Src = I->getOperand(0);
EVT SrcVT = TLI.getValueType(Src->getType(), true);
MVT SrcVT = TLI.getSimpleValueType(Src->getType(), true);
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
return false;
@@ -1604,7 +1599,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
// Handle sign-extension.
if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
EVT DestVT = MVT::i32;
MVT DestVT = MVT::i32;
SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
/*isZExt*/!isSigned);
if (SrcReg == 0) return false;
@@ -1811,7 +1806,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
EVT VT = TLI.getValueType(I->getType(), true);
MVT VT = TLI.getSimpleValueType(I->getType(), true);
// We can get here in the case when we want to use NEON for our fp
// operations, but can't figure out how to. Just use the vfp instructions
@@ -2055,7 +2050,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
if (RVLocs.size() == 2 && RetVT == MVT::f64) {
// For this move we copy into two registers and then move into the
// double fp reg we want.
EVT DestVT = RVLocs[0].getValVT();
MVT DestVT = RVLocs[0].getValVT();
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
unsigned ResultReg = createResultReg(DstRC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -2070,7 +2065,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
UpdateValueMap(I, ResultReg);
} else {
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
EVT CopyVT = RVLocs[0].getValVT();
MVT CopyVT = RVLocs[0].getValVT();
// Special handling for extended integers.
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
@@ -2129,8 +2124,8 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
return false;
unsigned SrcReg = Reg + VA.getValNo();
EVT RVVT = TLI.getValueType(RV->getType());
EVT DestVT = VA.getValVT();
MVT RVVT = TLI.getSimpleValueType(RV->getType());
MVT DestVT = VA.getValVT();
// Special handling for extended integers.
if (RVVT != DestVT) {
if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
@@ -2175,7 +2170,7 @@ unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
GlobalValue::ExternalLinkage, 0, Name);
return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
return ARMMaterializeGV(GV, TLI.getSimpleValueType(GV->getType()));
}
// A quick function that will emit a call for a named libcall in F with the
@@ -2587,7 +2582,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
return true;
}
unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
bool isZExt) {
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
return 0;
@@ -2595,8 +2590,7 @@ unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
unsigned Opc;
bool isBoolZext = false;
const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
if (!SrcVT.isSimple()) return 0;
switch (SrcVT.getSimpleVT().SimpleTy) {
switch (SrcVT.SimpleTy) {
default: return 0;
case MVT::i16:
if (!Subtarget->hasV6Ops()) return 0;
@@ -2643,9 +2637,9 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
Value *Src = I->getOperand(0);
Type *SrcTy = Src->getType();
EVT SrcVT, DestVT;
SrcVT = TLI.getValueType(SrcTy, true);
DestVT = TLI.getValueType(DestTy, true);
MVT SrcVT, DestVT;
SrcVT = TLI.getSimpleValueType(SrcTy, true);
DestVT = TLI.getSimpleValueType(DestTy, true);
bool isZExt = isa<ZExtInst>(I);
unsigned SrcReg = getRegForValue(Src);
@@ -2830,7 +2824,7 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
}
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
unsigned Align, EVT VT) {
unsigned Align, MVT VT) {
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
ARMConstantPoolConstant *CPV =
ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);