Fix handling of double precision loads and stores when Mips1 is targeted.

Mips1 does not support double precision loads or stores, therefore two single
precision loads or stores must be used in place of these instructions. This 
patch treats double precision loads and stores as if they are legal
instructions until MCInstLowering, instead of generating the single precision
instructions during instruction selection or Prolog/Epilog code insertion.

Without the changes made in this patch, llc produces code that has the same 
problem described in r137484 or bails out when
MipsInstrInfo::storeRegToStackSlot or loadRegFromStackSlot is called before
register allocation.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137711 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Akira Hatanaka 2011-08-16 03:51:51 +00:00
parent 8957481e6a
commit 614051a1c5
7 changed files with 74 additions and 168 deletions

View File

@ -18,6 +18,7 @@
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsMCInstLower.h"
#include "MipsMCSymbolRefExpr.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "llvm/BasicBlock.h"
#include "llvm/Instructions.h"
@ -36,6 +37,7 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
@ -53,9 +55,26 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
MipsMCInstLower MCInstLowering(Mang, *MF, *this);
unsigned Opc = MI->getOpcode();
// If target is Mips1, expand double precision load/store to two single
// precision loads/stores (and delay slot if MI is a load).
if (Subtarget->isMips1() && (Opc == Mips::LDC1 || Opc == Mips::SDC1)) {
SmallVector<MCInst, 4> MCInsts;
const unsigned* SubReg =
TM.getRegisterInfo()->getSubRegisters(MI->getOperand(0).getReg());
MCInstLowering.LowerMips1F64LoadStore(MI, Opc, MCInsts,
Subtarget->isLittle(), SubReg);
for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
I != MCInsts.end(); ++I)
OutStreamer.EmitInstruction(*I);
return;
}
MCInst TmpInst0;
MCInstLowering.Lower(MI, TmpInst0);
unsigned Opc = MI->getOpcode();
// Convert aligned loads/stores to their unaligned counterparts.
// FIXME: expand other unaligned memory accesses too.

View File

@ -86,9 +86,6 @@ private:
// Complex Pattern.
bool SelectAddr(SDValue N, SDValue &Base, SDValue &Offset);
SDNode *SelectLoadFp64(SDNode *N);
SDNode *SelectStoreFp64(SDNode *N);
// getI32Imm - Return a target constant with the specified
// value, of type i32.
inline SDValue getI32Imm(unsigned Imm) {
@ -184,130 +181,6 @@ SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
return true;
}
SDNode *MipsDAGToDAGISel::SelectLoadFp64(SDNode *N) {
MVT::SimpleValueType NVT =
N->getValueType(0).getSimpleVT().SimpleTy;
if (!Subtarget.isMips1() || NVT != MVT::f64)
return NULL;
LoadSDNode *LN = cast<LoadSDNode>(N);
if (LN->getExtensionType() != ISD::NON_EXTLOAD ||
LN->getAddressingMode() != ISD::UNINDEXED)
return NULL;
SDValue Chain = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue Offset0, Offset1, Base;
if (!SelectAddr(N1, Base, Offset0) ||
N1.getValueType() != MVT::i32)
return NULL;
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
DebugLoc dl = N->getDebugLoc();
// The second load should start after for 4 bytes.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Offset0))
Offset1 = CurDAG->getTargetConstant(C->getSExtValue()+4, MVT::i32);
else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Offset0))
Offset1 = CurDAG->getTargetConstantPool(CP->getConstVal(),
MVT::i32,
CP->getAlignment(),
CP->getOffset()+4,
CP->getTargetFlags());
else
return NULL;
// Choose the offsets depending on the endianess
if (TM.getTargetData()->isBigEndian())
std::swap(Offset0, Offset1);
// Instead of:
// ldc $f0, X($3)
// Generate:
// lwc $f0, X($3)
// lwc $f1, X+4($3)
SDNode *LD0 = CurDAG->getMachineNode(Mips::LWC1, dl, MVT::f32,
MVT::Other, Base, Offset0, Chain);
SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, NVT), 0);
SDValue I0 = CurDAG->getTargetInsertSubreg(Mips::sub_fpeven, dl,
MVT::f64, Undef, SDValue(LD0, 0));
SDNode *LD1 = CurDAG->getMachineNode(Mips::LWC1, dl, MVT::f32,
MVT::Other, Base, Offset1, SDValue(LD0, 1));
SDValue I1 = CurDAG->getTargetInsertSubreg(Mips::sub_fpodd, dl,
MVT::f64, I0, SDValue(LD1, 0));
ReplaceUses(SDValue(N, 0), I1);
ReplaceUses(SDValue(N, 1), Chain);
cast<MachineSDNode>(LD0)->setMemRefs(MemRefs0, MemRefs0 + 1);
cast<MachineSDNode>(LD1)->setMemRefs(MemRefs0, MemRefs0 + 1);
return I1.getNode();
}
SDNode *MipsDAGToDAGISel::SelectStoreFp64(SDNode *N) {
if (!Subtarget.isMips1() ||
N->getOperand(1).getValueType() != MVT::f64)
return NULL;
SDValue Chain = N->getOperand(0);
StoreSDNode *SN = cast<StoreSDNode>(N);
if (SN->isTruncatingStore() || SN->getAddressingMode() != ISD::UNINDEXED)
return NULL;
SDValue N1 = N->getOperand(1);
SDValue N2 = N->getOperand(2);
SDValue Offset0, Offset1, Base;
if (!SelectAddr(N2, Base, Offset0) ||
N1.getValueType() != MVT::f64 ||
N2.getValueType() != MVT::i32)
return NULL;
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
DebugLoc dl = N->getDebugLoc();
// Get the even and odd part from the f64 register
SDValue FPOdd = CurDAG->getTargetExtractSubreg(Mips::sub_fpodd,
dl, MVT::f32, N1);
SDValue FPEven = CurDAG->getTargetExtractSubreg(Mips::sub_fpeven,
dl, MVT::f32, N1);
// The second store should start after for 4 bytes.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Offset0))
Offset1 = CurDAG->getTargetConstant(C->getSExtValue()+4, MVT::i32);
else
return NULL;
// Choose the offsets depending on the endianess
if (TM.getTargetData()->isBigEndian())
std::swap(Offset0, Offset1);
// Instead of:
// sdc $f0, X($3)
// Generate:
// swc $f0, X($3)
// swc $f1, X+4($3)
SDValue Ops0[] = { FPEven, Base, Offset0, Chain };
Chain = SDValue(CurDAG->getMachineNode(Mips::SWC1, dl,
MVT::Other, Ops0, 4), 0);
cast<MachineSDNode>(Chain.getNode())->setMemRefs(MemRefs0, MemRefs0 + 1);
SDValue Ops1[] = { FPOdd, Base, Offset1, Chain };
Chain = SDValue(CurDAG->getMachineNode(Mips::SWC1, dl,
MVT::Other, Ops1, 4), 0);
cast<MachineSDNode>(Chain.getNode())->setMemRefs(MemRefs0, MemRefs0 + 1);
ReplaceUses(SDValue(N, 0), Chain);
return Chain.getNode();
}
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
@ -423,18 +296,6 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
break;
}
case ISD::LOAD:
if (SDNode *ResNode = SelectLoadFp64(Node))
return ResNode;
// Other cases are autogenerated.
break;
case ISD::STORE:
if (SDNode *ResNode = SelectStoreFp64(Node))
return ResNode;
// Other cases are autogenerated.
break;
case MipsISD::ThreadPointer: {
unsigned SrcReg = Mips::HWR29;
unsigned DestReg = Mips::V1;

View File

@ -190,7 +190,7 @@ def FMOV_D32 : FFR<0x11, 0b000110, 0x1, (outs AFGR64:$fd), (ins AFGR64:$fs),
"mov.d\t$fd, $fs", []>;
/// Floating Point Memory Instructions
let Predicates = [IsNotSingleFloat, IsNotMipsI] in {
let Predicates = [IsNotSingleFloat] in {
def LDC1 : FFI<0b110101, (outs AFGR64:$ft), (ins mem:$addr),
"ldc1\t$ft, $addr", [(set AFGR64:$ft, (load addr:$addr))]>;

View File

@ -175,21 +175,9 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
BuildMI(MBB, I, DL, get(Mips::SWC1)).addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0);
else if (RC == Mips::AFGR64RegisterClass) {
if (!TM.getSubtarget<MipsSubtarget>().isMips1()) {
BuildMI(MBB, I, DL, get(Mips::SDC1))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0);
} else {
const TargetRegisterInfo *TRI =
MBB.getParent()->getTarget().getRegisterInfo();
const unsigned *SubSet = TRI->getSubRegisters(SrcReg);
BuildMI(MBB, I, DL, get(Mips::SWC1))
.addReg(SubSet[0], getKillRegState(isKill))
.addFrameIndex(FI).addImm(0);
BuildMI(MBB, I, DL, get(Mips::SWC1))
.addReg(SubSet[1], getKillRegState(isKill))
.addFrameIndex(FI).addImm(4);
}
BuildMI(MBB, I, DL, get(Mips::SDC1))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0);
} else
llvm_unreachable("Register class not handled!");
}
@ -208,17 +196,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
else if (RC == Mips::FGR32RegisterClass)
BuildMI(MBB, I, DL, get(Mips::LWC1), DestReg).addFrameIndex(FI).addImm(0);
else if (RC == Mips::AFGR64RegisterClass) {
if (!TM.getSubtarget<MipsSubtarget>().isMips1()) {
BuildMI(MBB, I, DL, get(Mips::LDC1), DestReg).addFrameIndex(FI).addImm(0);
} else {
const TargetRegisterInfo *TRI =
MBB.getParent()->getTarget().getRegisterInfo();
const unsigned *SubSet = TRI->getSubRegisters(DestReg);
BuildMI(MBB, I, DL, get(Mips::LWC1), SubSet[0])
.addFrameIndex(FI).addImm(0);
BuildMI(MBB, I, DL, get(Mips::LWC1), SubSet[1])
.addFrameIndex(FI).addImm(4);
}
BuildMI(MBB, I, DL, get(Mips::LDC1), DestReg).addFrameIndex(FI).addImm(0);
} else
llvm_unreachable("Register class not handled!");
}

View File

@ -83,6 +83,50 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
Ctx));
}
// If target is Mips1, expand double precision load/store to two single
// precision loads/stores.
//
// ldc1 $f0, lo($CPI0_0)($5) gets expanded to the following two instructions:
// (little endian)
// lwc1 $f0, lo($CPI0_0)($5) and
// lwc1 $f1, lo($CPI0_0+4)($5)
// (big endian)
// lwc1 $f1, lo($CPI0_0)($5) and
// lwc1 $f0, lo($CPI0_0+4)($5)
void MipsMCInstLower::LowerMips1F64LoadStore(const MachineInstr *MI,
unsigned Opc,
SmallVector<MCInst, 4>& MCInsts,
bool isLittle,
const unsigned *SubReg) const {
MCInst InstLo, InstHi, DelaySlot;
unsigned SingleOpc = (Opc == Mips::LDC1 ? Mips::LWC1 : Mips::SWC1);
unsigned RegLo = isLittle ? *SubReg : *(SubReg + 1);
unsigned RegHi = isLittle ? *(SubReg + 1) : *SubReg;
const MachineOperand &MO1 = MI->getOperand(1);
const MachineOperand &MO2 = MI->getOperand(2);
InstLo.setOpcode(SingleOpc);
InstLo.addOperand(MCOperand::CreateReg(RegLo));
InstLo.addOperand(LowerOperand(MO1));
InstLo.addOperand(LowerOperand(MO2));
MCInsts.push_back(InstLo);
InstHi.setOpcode(SingleOpc);
InstHi.addOperand(MCOperand::CreateReg(RegHi));
InstHi.addOperand(LowerOperand(MO1));
if (MO2.isImm())// The offset of addr operand is an immediate: e.g. 0($sp)
InstHi.addOperand(MCOperand::CreateImm(MO2.getImm() + 4));
else// Otherwise, the offset must be a symbol: e.g. lo($CPI0_0)($5)
InstHi.addOperand(LowerSymbolOperand(MO2, MO2.getType(), 4));
MCInsts.push_back(InstHi);
// Need to insert a NOP in LWC1's delay slot.
if (SingleOpc == Mips::LWC1) {
DelaySlot.setOpcode(Mips::NOP);
MCInsts.push_back(DelaySlot);
}
}
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO) const {
MachineOperandType MOTy = MO.getType();

View File

@ -9,6 +9,7 @@
#ifndef MIPSMCINSTLOWER_H
#define MIPSMCINSTLOWER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Support/Compiler.h"
@ -34,6 +35,9 @@ public:
MipsMCInstLower(Mangler *mang, const MachineFunction &MF,
MipsAsmPrinter &asmprinter);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
void LowerMips1F64LoadStore(const MachineInstr *MI, unsigned Opc,
SmallVector<MCInst, 4>& MCInsts,
bool isLittle, const unsigned *SubReg) const;
private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;

View File

@ -25,7 +25,7 @@ entry:
define double @sel2_1(i32 %s, double %f0, double %f1) nounwind readnone {
entry:
; CHECK-MIPS32R2: movn.d
; CHECK-MIPS1: beq
; CHECK-MIPS1: bne
%tobool = icmp ne i32 %s, 0
%cond = select i1 %tobool, double %f0, double %f1
ret double %cond