- Remove custom lowering for BRCOND
- Add remaining functionality for branches in SPUInstrInfo, such as branch
  condition reversal and load/store folding. Updated BrCond test to reflect
  branch reversal.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61597 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Scott Michel 2009-01-03 00:27:53 +00:00
parent 3d45f53b7a
commit 52d0001cfc
4 changed files with 123 additions and 110 deletions

View File

@ -147,10 +147,6 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
}
}
// Custom lower BRCOND for i8 to "promote" the result to whatever the result
// operand happens to be:
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
// Expand the jumptable branches
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
@ -903,33 +899,6 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
static SDValue
LowerBRCOND(SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) {
SDValue Cond = Op.getOperand(1);
MVT CondVT = Cond.getValueType();
unsigned CondOpc;
if (CondVT == MVT::i8) {
SDValue CondOp0 = Cond.getOperand(0);
if (Cond.getOpcode() == ISD::TRUNCATE) {
// Use the truncate's value type and ANY_EXTEND the condition (DAGcombine
// will then remove the truncate)
CondVT = CondOp0.getValueType();
CondOpc = ISD::ANY_EXTEND;
} else {
CondVT = MVT::i32; // default to something reasonable
CondOpc = ISD::ZERO_EXTEND;
}
Cond = DAG.getNode(CondOpc, CondVT, Op.getOperand(1));
return DAG.getNode(ISD::BRCOND, Op.getValueType(),
Op.getOperand(0), Cond, Op.getOperand(2));
}
return SDValue(); // Unchanged
}
static SDValue
LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex)
{
@ -2526,9 +2495,8 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
MVT Op0VT = Op0.getValueType();
MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits()));
// Create shuffle mask
if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
// least significant doubleword of quadword
// Create shuffle mask, least significant doubleword of quadword
unsigned maskHigh = 0x08090a0b;
unsigned maskLow = 0x0c0d0e0f;
// Use a shuffle to perform the truncation
@ -2587,8 +2555,6 @@ SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
return LowerConstant(Op, DAG);
case ISD::ConstantFP:
return LowerConstantFP(Op, DAG);
case ISD::BRCOND:
return LowerBRCOND(Op, DAG, *this);
case ISD::FORMAL_ARGUMENTS:
return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
case ISD::CALL:

View File

@ -32,6 +32,7 @@ namespace {
|| opc == SPU::BI);
}
//! Predicate for a conditional branch instruction
inline bool isCondBranch(const MachineInstr *I) {
unsigned opc = I->getOpcode();
@ -50,9 +51,7 @@ SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
: TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
TM(tm),
RI(*TM.getSubtargetImpl(), *this)
{
/* NOP */
}
{ /* NOP */ }
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
@ -135,7 +134,7 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
assert(MI.getNumOperands() == 2 &&
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
"invalid SPU OR<type>_<vec> instruction!");
"invalid SPU OR<type>_<vec> or LR instruction!");
if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
sourceReg = MI.getOperand(0).getReg();
destReg = MI.getOperand(0).getReg();
@ -146,6 +145,9 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
case SPU::ORv16i8:
case SPU::ORv8i16:
case SPU::ORv4i32:
case SPU::ORv2i64:
case SPU::ORr8:
case SPU::ORr16:
case SPU::ORr32:
case SPU::ORr64:
case SPU::ORf32:
@ -182,29 +184,12 @@ SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case SPU::LQDr16: {
const MachineOperand MOp1 = MI->getOperand(1);
const MachineOperand MOp2 = MI->getOperand(2);
if (MOp1.isImm()
&& (MOp2.isFI()
|| (MOp2.isReg() && MOp2.getReg() == SPU::R1))) {
if (MOp2.isFI())
FrameIndex = MOp2.getIndex();
else
FrameIndex = MOp1.getImm() / SPUFrameInfo::stackSlotSize();
if (MOp1.isImm() && MOp2.isFI()) {
FrameIndex = MOp2.getIndex();
return MI->getOperand(0).getReg();
}
break;
}
case SPU::LQXv4i32:
case SPU::LQXr128:
case SPU::LQXr64:
case SPU::LQXr32:
case SPU::LQXr16:
if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
&& (MI->getOperand(2).getReg() == SPU::R1
|| MI->getOperand(1).getReg() == SPU::R1)) {
FrameIndex = MI->getOperand(2).getIndex();
return MI->getOperand(0).getReg();
}
break;
}
return 0;
}
@ -232,25 +217,6 @@ SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
}
break;
}
#if 0
case SPU::STQXv16i8:
case SPU::STQXv8i16:
case SPU::STQXv4i32:
case SPU::STQXv4f32:
case SPU::STQXv2f64:
case SPU::STQXr128:
case SPU::STQXr64:
case SPU::STQXr32:
case SPU::STQXr16:
case SPU::STQXr8:
if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
&& (MI->getOperand(2).getReg() == SPU::R1
|| MI->getOperand(1).getReg() == SPU::R1)) {
FrameIndex = MI->getOperand(2).getIndex();
return MI->getOperand(0).getReg();
}
break;
#endif
}
return 0;
}
@ -445,6 +411,34 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
}
}
//! Return true if the specified load or store can be folded
bool
SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
// Make sure this is a reg-reg copy.
unsigned Opc = MI->getOpcode();
switch (Opc) {
case SPU::ORv16i8:
case SPU::ORv8i16:
case SPU::ORv4i32:
case SPU::ORv2i64:
case SPU::ORr8:
case SPU::ORr16:
case SPU::ORr32:
case SPU::ORr64:
case SPU::ORf32:
case SPU::ORf64:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
return true;
break;
}
return false;
}
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *
@ -453,38 +447,46 @@ SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const
{
#if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
if (Ops.size() != 1) return NULL;
if (Ops.size() != 1) return 0;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = 0;
if ((Opc == SPU::ORr32
|| Opc == SPU::ORv4i32)
&& MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
switch (Opc) {
case SPU::ORv16i8:
case SPU::ORv8i16:
case SPU::ORv4i32:
case SPU::ORv2i64:
case SPU::ORr8:
case SPU::ORr16:
case SPU::ORr32:
case SPU::ORr64:
case SPU::ORf32:
case SPU::ORf64:
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
NewMI = addFrameReference(BuildMI(MF, TII.get(SPU::STQDr32))
.addReg(InReg, false, false, isKill),
FrameIndex);
MachineInstrBuilder MIB = BuildMI(MF, get(SPU::STQDr32));
MIB.addReg(InReg, false, false, isKill);
NewMI = addFrameReference(MIB, FrameIndex);
}
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
MachineInstrBuilder MIB = BuildMI(MF, get(Opc));
MIB.addReg(OutReg, true, false, false, isDead);
Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
? SPU::STQDr32 : SPU::STQXr32;
NewMI = addFrameReference(BuildMI(MF, TII.get(Opc))
.addReg(OutReg, true, false, false, isDead), FrameIndex);
}
NewMI = addFrameReference(MIB, FrameIndex);
break;
}
}
return NewMI;
#else
return 0;
#endif
}
//! Branch analysis
@ -625,4 +627,38 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
}
}
bool
SPUInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
return (!MBB.empty() && isUncondBranch(&MBB.back()));
}
//! Reverses a branch's condition, returning false on success.
bool
SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
const {
// Pretty brainless way of inverting the condition, but it works, considering
// there are only two conditions...
static struct {
unsigned Opc; //! The incoming opcode
unsigned RevCondOpc; //! The reversed condition opcode
} revconds[] = {
{ SPU::BRNZr32, SPU::BRZr32 },
{ SPU::BRNZv4i32, SPU::BRZv4i32 },
{ SPU::BRZr32, SPU::BRNZr32 },
{ SPU::BRZv4i32, SPU::BRNZv4i32 },
{ SPU::BRHNZr16, SPU::BRHZr16 },
{ SPU::BRHNZv8i16, SPU::BRHZv8i16 },
{ SPU::BRHZr16, SPU::BRHNZr16 },
{ SPU::BRHZv8i16, SPU::BRHNZv8i16 }
};
unsigned Opc = unsigned(Cond[0].getImm());
// Pretty dull mapping between the two conditions that SPU can generate:
for (int i = sizeof(revconds)/sizeof(revconds[0]); i >= 0; --i) {
if (revconds[i].Opc == Opc) {
Cond[0].setImm(revconds[i].RevCondOpc);
return false;
}
}
return true;
}

View File

@ -23,6 +23,19 @@ namespace llvm {
class SPUInstrInfo : public TargetInstrInfoImpl {
SPUTargetMachine &TM;
const SPURegisterInfo RI;
protected:
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
public:
explicit SPUInstrInfo(SPUTargetMachine &tm);
@ -34,7 +47,7 @@ namespace llvm {
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
virtual const TargetRegisterClass *getPointerRegClass() const;
virtual const TargetRegisterClass *getPointerRegClass() const;
// Return true if the instruction is a register to register move and
// leave the source and dest operands in the passed parameters.
@ -47,13 +60,13 @@ namespace llvm {
int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
//! Store a register to a stack slot, based on its register class.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@ -77,27 +90,25 @@ namespace llvm {
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
//! Fold spills into load/store instructions
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
//! Fold any load/store to an operand
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
//! Return true if the specified load or store can be folded
virtual
bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
//! Return true if the specified block does not fall through
virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
//! Reverses a branch's condition, returning false on success.
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const;

View File

@ -1,4 +1,4 @@
; RUN: llvm-as < %s | llc -march=cellspu -o - | grep brnz
; RUN: llvm-as < %s | llc -march=cellspu -o - | grep brz
; PR3274
target datalayout = "E-p:32:32:128-i1:8:128-i8:8:128-i16:16:128-i32:32:128-i64:32:128-f32:32:128-f64:64:128-v64:64:64-v128:128:128-a0:0:128-s0:128:128"