[mips][mips64r6] [ls][dw][lr] are not available in MIPS32r6/MIPS64r6

Summary:
Instead the system is required to provide some means of handling unaligned
load/store without special instructions. Options include full hardware
support, full trap-and-emulate, and hybrids such as hardware support within
a cache line and trap-and-emulate for multi-line accesses.

MipsSETargetLowering::allowsUnalignedMemoryAccesses() has been configured to
assume that unaligned accesses are 'fast' on the basis that I expect few
hardware implementations will opt for pure-software handling of unaligned
accesses. The ones that do handle it purely in software can override this.

mips64-load-store-left-right.ll has been merged into load-store-left-right.ll

The stricter testing revealed a Bits!=Bytes bug in passByValArg(). This has
been fixed and the variables renamed to clarify the units they hold.

Reviewers: zoran.jovanovic, jkolek, vmedic

Reviewed By: vmedic

Differential Revision: http://reviews.llvm.org/D3872

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209512 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Daniel Sanders
2014-05-23 13:18:02 +00:00
parent f2938bf8da
commit 36b0fd51de
15 changed files with 602 additions and 143 deletions

View File

@@ -35,7 +35,6 @@ include "Mips32r6InstrFormats.td"
// Removed: jalx
// Removed: ldxc1
// Removed: luxc1
// Removed: lwl, lwr, lwle, lwre, swl, swr, swle, swre
// Removed: lwxc1
// Removed: madd.[ds], nmadd.[ds], nmsub.[ds], sub.[ds]
// Removed: mfhi, mflo, mthi, mtlo, madd, maddu, msub, msubu, mul

View File

@@ -155,13 +155,13 @@ def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd, II_SWR>, LW_FM<0x2e>;
}
def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd, II_LDL>, LW_FM<0x1a>,
ISA_MIPS3;
ISA_MIPS3_NOT_32R6_64R6;
def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd, II_LDR>, LW_FM<0x1b>,
ISA_MIPS3;
ISA_MIPS3_NOT_32R6_64R6;
def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd, II_SDL>, LW_FM<0x2c>,
ISA_MIPS3;
ISA_MIPS3_NOT_32R6_64R6;
def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, II_SDR>, LW_FM<0x2d>,
ISA_MIPS3;
ISA_MIPS3_NOT_32R6_64R6;
/// Load-linked, Store-conditional
def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>, ISA_MIPS3;

View File

@@ -17,7 +17,6 @@
// Removed: daddi
// Removed: ddiv, ddivu, dmult, dmultu
// Removed: div, divu
// Removed: ldl, ldr, ldle, ldre, sdl, sdr, sdle, sdre
//===----------------------------------------------------------------------===//
//

View File

@@ -202,8 +202,9 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
#ifndef NDEBUG
case ISD::LOAD:
case ISD::STORE:
assert(cast<MemSDNode>(Node)->getMemoryVT().getSizeInBits() / 8 <=
cast<MemSDNode>(Node)->getAlignment() &&
assert((Subtarget.systemSupportsUnalignedAccess() ||
cast<MemSDNode>(Node)->getMemoryVT().getSizeInBits() / 8 <=
cast<MemSDNode>(Node)->getAlignment()) &&
"Unexpected unaligned loads/stores.");
break;
#endif

View File

@@ -1941,6 +1941,9 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT MemVT = LD->getMemoryVT();
if (Subtarget->systemSupportsUnalignedAccess())
return Op;
// Return if load is aligned or if MemVT is neither i32 nor i64.
if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
((MemVT != MVT::i32) && (MemVT != MVT::i64)))
@@ -2064,7 +2067,8 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT MemVT = SD->getMemoryVT();
// Lower unaligned integer stores.
if ((SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
if (!Subtarget->systemSupportsUnalignedAccess() &&
(SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
((MemVT == MVT::i32) || (MemVT == MVT::i64)))
return lowerUnalignedIntStore(SD, DAG, Subtarget->isLittle());
@@ -3485,21 +3489,22 @@ passByValArg(SDValue Chain, SDLoc DL,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
unsigned ByValSize = Flags.getByValSize();
unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
unsigned RegSize = CC.regSize();
unsigned Alignment = std::min(Flags.getByValAlign(), RegSize);
EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSize * 8);
unsigned ByValSizeInBytes = Flags.getByValSize();
unsigned OffsetInBytes = 0; // From beginning of struct
unsigned RegSizeInBytes = CC.regSize();
unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes);
EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
if (ByVal.NumRegs) {
const MCPhysReg *ArgRegs = CC.intArgRegs();
bool LeftoverBytes = (ByVal.NumRegs * RegSize > ByValSize);
bool LeftoverBytes = (ByVal.NumRegs * RegSizeInBytes > ByValSizeInBytes);
unsigned I = 0;
// Copy words to registers.
for (; I < ByVal.NumRegs - LeftoverBytes; ++I, Offset += RegSize) {
for (; I < ByVal.NumRegs - LeftoverBytes;
++I, OffsetInBytes += RegSizeInBytes) {
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(Offset, PtrTy));
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
MachinePointerInfo(), false, false, false,
Alignment);
@@ -3509,38 +3514,38 @@ passByValArg(SDValue Chain, SDLoc DL,
}
// Return if the struct has been fully copied.
if (ByValSize == Offset)
if (ByValSizeInBytes == OffsetInBytes)
return;
// Copy the remainder of the byval argument with sub-word loads and shifts.
if (LeftoverBytes) {
assert((ByValSize > Offset) && (ByValSize < Offset + RegSize) &&
"Size of the remainder should be smaller than RegSize.");
assert((ByValSizeInBytes > OffsetInBytes) &&
(ByValSizeInBytes < OffsetInBytes + RegSizeInBytes) &&
"Size of the remainder should be smaller than RegSizeInBytes.");
SDValue Val;
for (unsigned LoadSize = RegSize / 2, TotalSizeLoaded = 0;
Offset < ByValSize; LoadSize /= 2) {
unsigned RemSize = ByValSize - Offset;
for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
if (RemSize < LoadSize)
if (RemainingSizeInBytes < LoadSizeInBytes)
continue;
// Load subword.
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(Offset, PtrTy));
SDValue LoadVal =
DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr,
MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
false, false, Alignment);
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue LoadVal = DAG.getExtLoad(
ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
// Shift the loaded value.
unsigned Shamt;
if (isLittle)
Shamt = TotalSizeLoaded;
Shamt = TotalBytesLoaded * 8;
else
Shamt = (RegSize - (TotalSizeLoaded + LoadSize)) * 8;
Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
DAG.getConstant(Shamt, MVT::i32));
@@ -3550,9 +3555,9 @@ passByValArg(SDValue Chain, SDLoc DL,
else
Val = Shift;
Offset += LoadSize;
TotalSizeLoaded += LoadSize;
Alignment = std::min(Alignment, LoadSize);
OffsetInBytes += LoadSizeInBytes;
TotalBytesLoaded += LoadSizeInBytes;
Alignment = std::min(Alignment, LoadSizeInBytes);
}
unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
@@ -3562,9 +3567,9 @@ passByValArg(SDValue Chain, SDLoc DL,
}
// Copy remainder of byval arg to it with memcpy.
unsigned MemCpySize = ByValSize - Offset;
unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(Offset, PtrTy));
DAG.getConstant(OffsetInBytes, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
DAG.getIntPtrConstant(ByVal.Address));
Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),

View File

@@ -225,6 +225,9 @@ class ISA_MIPS1_NOT_32R6_64R6 {
}
class ISA_MIPS2 { list<Predicate> InsnPredicates = [HasMips2]; }
class ISA_MIPS3 { list<Predicate> InsnPredicates = [HasMips3]; }
class ISA_MIPS3_NOT_32R6_64R6 {
list<Predicate> InsnPredicates = [HasMips3, NotMips32r6, NotMips64r6];
}
class ISA_MIPS32 { list<Predicate> InsnPredicates = [HasMips32]; }
class ISA_MIPS32R2 { list<Predicate> InsnPredicates = [HasMips32r2]; }
class ISA_MIPS64 { list<Predicate> InsnPredicates = [HasMips64]; }
@@ -1087,10 +1090,14 @@ def SW : Store<"sw", GPR32Opnd, store, II_SW>, MMRel, LW_FM<0x2b>;
/// load/store left/right
let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
AdditionalPredicates = [NotInMicroMips] in {
def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, II_LWL>, LW_FM<0x22>;
def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, II_LWR>, LW_FM<0x26>;
def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, II_SWL>, LW_FM<0x2a>;
def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, II_SWR>, LW_FM<0x2e>;
def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, II_LWL>, LW_FM<0x22>,
ISA_MIPS1_NOT_32R6_64R6;
def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, II_LWR>, LW_FM<0x26>,
ISA_MIPS1_NOT_32R6_64R6;
def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, II_SWL>, LW_FM<0x2a>,
ISA_MIPS1_NOT_32R6_64R6;
def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, II_SWR>, LW_FM<0x2e>,
ISA_MIPS1_NOT_32R6_64R6;
}
def SYNC : MMRel, SYNC_FT<"sync">, SYNC_FM;

View File

@@ -254,6 +254,16 @@ MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
bool *Fast) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
if (Subtarget->systemSupportsUnalignedAccess()) {
// MIPS32r6/MIPS64r6 is required to support unaligned access. It's
// implementation defined whether this is handled by hardware, software, or
// a hybrid of the two but it's expected that most implementations will
// handle the majority of cases in hardware.
if (Fast)
*Fast = true;
return true;
}
switch (SVT) {
case MVT::i64:
case MVT::i32:

View File

@@ -234,7 +234,12 @@ public:
/// \brief Reset the subtarget for the Mips target.
void resetSubtarget(MachineFunction *MF);
/// Does the system support unaligned memory access.
///
/// MIPS32r6/MIPS64r6 require full unaligned access support but does not
/// specify which component of the system provides it. Hardware, software, and
/// hybrid implementations are all valid.
bool systemSupportsUnalignedAccess() const { return hasMips32r6(); }
};
} // End llvm namespace