Refactor load/store handling again. Simplify and make some room for

reg+reg handling.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120526 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eric Christopher 2010-12-01 01:40:24 +00:00
parent a63bf704b4
commit 564857f776

View File

@ -192,6 +192,8 @@ class ARMFastISel : public FastISel {
private:
bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB);
};
} // end anonymous namespace
@ -785,16 +787,52 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
}
}
void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
VT.getSimpleVT().SimpleTy == MVT::f64)
Addr.Offset /= 4;
// Frame base works a bit differently. Handle it separately.
if (Addr.BaseType == Address::FrameIndexBase) {
int FI = Addr.Base.FI;
int Offset = Addr.Offset;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(FI, Offset),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
// Now add the rest of the operands.
MIB.addFrameIndex(FI);
// ARM halfword load/stores need an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
MIB.addImm(Addr.Offset);
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
// ARM halfword load/stores need an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
MIB.addImm(Addr.Offset);
}
AddOptionalDefs(MIB);
}
bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
TargetRegisterClass *RC;
bool isFloat = false;
switch (VT.getSimpleVT().SimpleTy) {
default:
// This is mostly going to be Neon/vector support.
return false;
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i16:
Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
RC = ARM::GPRRegisterClass;
@ -810,55 +848,20 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
case MVT::f32:
Opc = ARM::VLDRS;
RC = TLI.getRegClassFor(VT);
isFloat = true;
break;
case MVT::f64:
Opc = ARM::VLDRD;
RC = TLI.getRegClassFor(VT);
isFloat = true;
break;
}
ResultReg = createResultReg(RC);
// Simplify this down to something we can handle.
ARMSimplifyAddress(Addr, VT);
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (isFloat)
Addr.Offset /= 4;
if (Addr.BaseType == Address::FrameIndexBase) {
int FI = Addr.Base.FI;
int Offset = Addr.Offset;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(FI, Offset),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
// LDRH needs an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addFrameIndex(FI).addReg(0).addImm(Offset)
.addMemOperand(MMO));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO));
return true;
}
// LDRH needs an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addReg(Addr.Base.Reg).addReg(0).addImm(Addr.Offset));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addReg(Addr.Base.Reg).addImm(Addr.Offset));
// Create the base instruction, then add the operands.
ResultReg = createResultReg(RC);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
AddLoadStoreOperands(VT, Addr, MIB);
return true;
}
@ -868,24 +871,20 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
if (!isLoadTypeLegal(I->getType(), VT))
return false;
// Our register and offset with innocuous defaults.
// See if we can handle this address.
Address Addr;
// See if we can handle this as Reg + Offset
if (!ARMComputeAddress(I->getOperand(0), Addr))
return false;
if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
unsigned ResultReg;
if (!ARMEmitLoad(VT, ResultReg, Addr)) return false;
UpdateValueMap(I, ResultReg);
return true;
}
bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
unsigned StrOpc;
bool isFloat = false;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass :
@ -908,58 +907,20 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
StrOpc = ARM::VSTRS;
isFloat = true;
break;
case MVT::f64:
if (!Subtarget->hasVFP2()) return false;
StrOpc = ARM::VSTRD;
isFloat = true;
break;
}
// Simplify this down to something we can handle.
ARMSimplifyAddress(Addr, VT);
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (isFloat)
Addr.Offset /= 4;
if (Addr.BaseType == Address::FrameIndexBase) {
int FI = Addr.Base.FI;
int Offset = Addr.Offset;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(FI, Offset),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
// ARM::STRH needs an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true))
.addFrameIndex(FI).addReg(0).addImm(Offset)
.addMemOperand(MMO));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true))
.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO));
return true;
}
// ARM::STRH needs an additional operand.
if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg).addReg(Addr.Base.Reg)
.addReg(0).addImm(Addr.Offset));
else
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg).addReg(Addr.Base.Reg).addImm(Addr.Offset));
// Create the base instruction, then add the operands.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
.addReg(SrcReg, getKillRegState(true));
AddLoadStoreOperands(VT, Addr, MIB);
return true;
}
@ -967,25 +928,21 @@ bool ARMFastISel::SelectStore(const Instruction *I) {
Value *Op0 = I->getOperand(0);
unsigned SrcReg = 0;
// Yay type legalization
// Verify we have a legal type before going any further.
MVT VT;
if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
return false;
// Get the value to be stored into a register.
SrcReg = getRegForValue(Op0);
if (SrcReg == 0)
return false;
if (SrcReg == 0) return false;
// Our register and offset with innocuous defaults.
// See if we can handle this address.
Address Addr;
// See if we can handle this as Reg + Offset
if (!ARMComputeAddress(I->getOperand(1), Addr))
return false;
if (!ARMEmitStore(VT, SrcReg, Addr)) return false;
return true;
}