Optimize redundant sign extends and negation of predicates

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@150601 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sirish Pande 2012-02-15 18:22:18 +00:00
parent 40d552e0be
commit 11241abca5
13 changed files with 745 additions and 80 deletions

View File

@ -20,7 +20,7 @@ add_llvm_target(HexagonCodeGen
HexagonInstrInfo.cpp
HexagonISelDAGToDAG.cpp
HexagonISelLowering.cpp
HexagonOptimizeSZExtends.cpp
HexagonPeephole.cpp
HexagonRegisterInfo.cpp
HexagonRemoveSZExtArgs.cpp
HexagonSelectionDAGInfo.cpp

View File

@ -35,7 +35,7 @@ namespace llvm {
FunctionPass* createHexagonExpandPredSpillCode(HexagonTargetMachine &TM);
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonOptimizeSZExtends();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
} // end namespace llvm;

View File

@ -125,6 +125,11 @@ namespace {
O << -value;
}
void printHexagonNOneImmOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) const {
O << -1;
}
void printHexagonMEMriOperand(const MachineInstr *MI, unsigned OpNo,
raw_ostream &O) {
const MachineOperand &MO1 = MI->getOperand(OpNo);

View File

@ -51,8 +51,8 @@ private:
char HexagonCFGOptimizer::ID = 0;
static bool IsConditionalBranch(int Opc) {
return (Opc == Hexagon::JMP_Pred) || (Opc == Hexagon::JMP_PredNot)
|| (Opc == Hexagon::JMP_PredPt) || (Opc == Hexagon::JMP_PredNotPt);
return (Opc == Hexagon::JMP_c) || (Opc == Hexagon::JMP_cNot)
|| (Opc == Hexagon::JMP_cdnPt) || (Opc == Hexagon::JMP_cdnNotPt);
}
@ -67,20 +67,20 @@ HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
const HexagonInstrInfo *QII = QTM.getInstrInfo();
int NewOpcode = 0;
switch(MI->getOpcode()) {
case Hexagon::JMP_Pred:
NewOpcode = Hexagon::JMP_PredNot;
case Hexagon::JMP_c:
NewOpcode = Hexagon::JMP_cNot;
break;
case Hexagon::JMP_PredNot:
NewOpcode = Hexagon::JMP_Pred;
case Hexagon::JMP_cNot:
NewOpcode = Hexagon::JMP_c;
break;
case Hexagon::JMP_PredPt:
NewOpcode = Hexagon::JMP_PredNotPt;
case Hexagon::JMP_cdnPt:
NewOpcode = Hexagon::JMP_cdnNotPt;
break;
case Hexagon::JMP_PredNotPt:
NewOpcode = Hexagon::JMP_PredPt;
case Hexagon::JMP_cdnNotPt:
NewOpcode = Hexagon::JMP_cdnPt;
break;
default:
@ -155,8 +155,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// The target of the unconditional branch must be JumpAroundTarget.
// TODO: If not, we should not invert the unconditional branch.
MachineBasicBlock* CondBranchTarget = NULL;
if ((MI->getOpcode() == Hexagon::JMP_Pred) ||
(MI->getOpcode() == Hexagon::JMP_PredNot)) {
if ((MI->getOpcode() == Hexagon::JMP_c) ||
(MI->getOpcode() == Hexagon::JMP_cNot)) {
CondBranchTarget = MI->getOperand(1).getMBB();
}

View File

@ -517,8 +517,8 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
// The loop ends with either:
// - a conditional branch followed by an unconditional branch, or
// - a conditional branch to the loop start.
if (LastI->getOpcode() == Hexagon::JMP_Pred ||
LastI->getOpcode() == Hexagon::JMP_PredNot) {
if (LastI->getOpcode() == Hexagon::JMP_c ||
LastI->getOpcode() == Hexagon::JMP_cNot) {
// delete one and change/add an uncond. branch to out of the loop
MachineBasicBlock *BranchTarget = LastI->getOperand(1).getMBB();
LastI = LastMBB->erase(LastI);

View File

@ -238,7 +238,7 @@ static unsigned doesIntrinsicContainPredicate(unsigned ID)
case Intrinsic::hexagon_C2_or:
return Hexagon::OR_pp;
case Intrinsic::hexagon_C2_not:
return Hexagon::NOT_pp;
return Hexagon::NOT_p;
case Intrinsic::hexagon_C2_any8:
return Hexagon::ANY_pp;
case Intrinsic::hexagon_C2_all8:
@ -1178,7 +1178,7 @@ SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
SDValue(IntRegTFR, 0));
// not(Pd)
SDNode* NotPd = CurDAG->getMachineNode(Hexagon::NOT_pp, dl, MVT::i1,
SDNode* NotPd = CurDAG->getMachineNode(Hexagon::NOT_p, dl, MVT::i1,
SDValue(Pd, 0));
// xor(not(Pd))

View File

@ -52,12 +52,12 @@ def s10Imm : Operand<i32> {
let PrintMethod = "printHexagonImmOperand";
}
def s8Imm : Operand<i32> {
def s9Imm : Operand<i32> {
// For now, we use a generic print function for all operands.
let PrintMethod = "printHexagonImmOperand";
}
def s9Imm : Operand<i32> {
def s8Imm : Operand<i32> {
// For now, we use a generic print function for all operands.
let PrintMethod = "printHexagonImmOperand";
}
@ -197,6 +197,11 @@ def u2Imm : Operand<i32> {
let PrintMethod = "printHexagonImmOperand";
}
def u1Imm : Operand<i32> {
// For now, we use a generic print function for all operands.
let PrintMethod = "printHexagonImmOperand";
}
def n8Imm : Operand<i32> {
// For now, we use a generic print function for all operands.
let PrintMethod = "printHexagonImmOperand";
@ -207,6 +212,11 @@ def m6Imm : Operand<i32> {
let PrintMethod = "printHexagonImmOperand";
}
def nOneImm : Operand<i32> {
// For now, we use a generic print function for all operands.
let PrintMethod = "printHexagonNOneImmOperand";
}
//
// Immediate predicates
//
@ -489,3 +499,10 @@ def n8ImmPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
return (-255 <= v && v <= 0);
}]>;
def nOneImmPred : PatLeaf<(i32 imm), [{
// nOneImmPred predicate - True if the immediate is -1.
int64_t v = (int64_t)N->getSExtValue();
return (-1 == v);
}]>;

View File

@ -124,16 +124,16 @@ HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
DebugLoc DL) const{
int BOpc = Hexagon::JMP;
int BccOpc = Hexagon::JMP_Pred;
int BccOpc = Hexagon::JMP_c;
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
int regPos = 0;
// Check if ReverseBranchCondition has asked to reverse this branch
// If we want to reverse the branch an odd number of times, we want
// JMP_PredNot.
// JMP_cNot.
if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
BccOpc = Hexagon::JMP_PredNot;
BccOpc = Hexagon::JMP_cNot;
regPos = 1;
}
@ -221,13 +221,13 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
TBB = LastInst->getOperand(0).getMBB();
return false;
}
if (LastInst->getOpcode() == Hexagon::JMP_Pred) {
if (LastInst->getOpcode() == Hexagon::JMP_c) {
// Block ends with fall-through true condbranch.
TBB = LastInst->getOperand(1).getMBB();
Cond.push_back(LastInst->getOperand(0));
return false;
}
if (LastInst->getOpcode() == Hexagon::JMP_PredNot) {
if (LastInst->getOpcode() == Hexagon::JMP_cNot) {
// Block ends with fall-through false condbranch.
TBB = LastInst->getOperand(1).getMBB();
Cond.push_back(MachineOperand::CreateImm(0));
@ -248,7 +248,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If the block ends with Hexagon::BRCOND and Hexagon:JMP, handle it.
if (((SecondLastInst->getOpcode() == Hexagon::BRCOND) ||
(SecondLastInst->getOpcode() == Hexagon::JMP_Pred)) &&
(SecondLastInst->getOpcode() == Hexagon::JMP_c)) &&
LastInst->getOpcode() == Hexagon::JMP) {
TBB = SecondLastInst->getOperand(1).getMBB();
Cond.push_back(SecondLastInst->getOperand(0));
@ -256,8 +256,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
}
// If the block ends with Hexagon::JMP_PredNot and Hexagon:JMP, handle it.
if ((SecondLastInst->getOpcode() == Hexagon::JMP_PredNot) &&
// If the block ends with Hexagon::JMP_cNot and Hexagon:JMP, handle it.
if ((SecondLastInst->getOpcode() == Hexagon::JMP_cNot) &&
LastInst->getOpcode() == Hexagon::JMP) {
TBB = SecondLastInst->getOperand(1).getMBB();
Cond.push_back(MachineOperand::CreateImm(0));
@ -284,8 +284,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
int BOpc = Hexagon::JMP;
int BccOpc = Hexagon::JMP_Pred;
int BccOpcNot = Hexagon::JMP_PredNot;
int BccOpc = Hexagon::JMP_c;
int BccOpcNot = Hexagon::JMP_cNot;
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin()) return 0;
@ -346,9 +346,9 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Hexagon::CRRegsRegClass.contains(DestReg, SrcReg)) {
BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
return;
}
assert (0 && "Unimplemented");
}
llvm_unreachable("Unimplemented");
}
@ -557,6 +557,463 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
return true;
}
unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
switch(Opc) {
case Hexagon::TFR_cPt:
return Hexagon::TFR_cNotPt;
case Hexagon::TFR_cNotPt:
return Hexagon::TFR_cPt;
case Hexagon::TFRI_cPt:
return Hexagon::TFRI_cNotPt;
case Hexagon::TFRI_cNotPt:
return Hexagon::TFRI_cPt;
case Hexagon::JMP_c:
return Hexagon::JMP_cNot;
case Hexagon::JMP_cNot:
return Hexagon::JMP_c;
case Hexagon::ADD_ri_cPt:
return Hexagon::ADD_ri_cNotPt;
case Hexagon::ADD_ri_cNotPt:
return Hexagon::ADD_ri_cPt;
case Hexagon::ADD_rr_cPt:
return Hexagon::ADD_rr_cNotPt;
case Hexagon::ADD_rr_cNotPt:
return Hexagon::ADD_rr_cPt;
case Hexagon::XOR_rr_cPt:
return Hexagon::XOR_rr_cNotPt;
case Hexagon::XOR_rr_cNotPt:
return Hexagon::XOR_rr_cPt;
case Hexagon::AND_rr_cPt:
return Hexagon::AND_rr_cNotPt;
case Hexagon::AND_rr_cNotPt:
return Hexagon::AND_rr_cPt;
case Hexagon::OR_rr_cPt:
return Hexagon::OR_rr_cNotPt;
case Hexagon::OR_rr_cNotPt:
return Hexagon::OR_rr_cPt;
case Hexagon::SUB_rr_cPt:
return Hexagon::SUB_rr_cNotPt;
case Hexagon::SUB_rr_cNotPt:
return Hexagon::SUB_rr_cPt;
case Hexagon::COMBINE_rr_cPt:
return Hexagon::COMBINE_rr_cNotPt;
case Hexagon::COMBINE_rr_cNotPt:
return Hexagon::COMBINE_rr_cPt;
case Hexagon::ASLH_cPt_V4:
return Hexagon::ASLH_cNotPt_V4;
case Hexagon::ASLH_cNotPt_V4:
return Hexagon::ASLH_cPt_V4;
case Hexagon::ASRH_cPt_V4:
return Hexagon::ASRH_cNotPt_V4;
case Hexagon::ASRH_cNotPt_V4:
return Hexagon::ASRH_cPt_V4;
case Hexagon::SXTB_cPt_V4:
return Hexagon::SXTB_cNotPt_V4;
case Hexagon::SXTB_cNotPt_V4:
return Hexagon::SXTB_cPt_V4;
case Hexagon::SXTH_cPt_V4:
return Hexagon::SXTH_cNotPt_V4;
case Hexagon::SXTH_cNotPt_V4:
return Hexagon::SXTH_cPt_V4;
case Hexagon::ZXTB_cPt_V4:
return Hexagon::ZXTB_cNotPt_V4;
case Hexagon::ZXTB_cNotPt_V4:
return Hexagon::ZXTB_cPt_V4;
case Hexagon::ZXTH_cPt_V4:
return Hexagon::ZXTH_cNotPt_V4;
case Hexagon::ZXTH_cNotPt_V4:
return Hexagon::ZXTH_cPt_V4;
case Hexagon::JMPR_cPt:
return Hexagon::JMPR_cNotPt;
case Hexagon::JMPR_cNotPt:
return Hexagon::JMPR_cPt;
// V4 indexed+scaled load.
case Hexagon::LDrid_indexed_cPt_V4:
return Hexagon::LDrid_indexed_cNotPt_V4;
case Hexagon::LDrid_indexed_cNotPt_V4:
return Hexagon::LDrid_indexed_cPt_V4;
case Hexagon::LDrid_indexed_shl_cPt_V4:
return Hexagon::LDrid_indexed_shl_cNotPt_V4;
case Hexagon::LDrid_indexed_shl_cNotPt_V4:
return Hexagon::LDrid_indexed_shl_cPt_V4;
case Hexagon::LDrib_indexed_cPt_V4:
return Hexagon::LDrib_indexed_cNotPt_V4;
case Hexagon::LDrib_indexed_cNotPt_V4:
return Hexagon::LDrib_indexed_cPt_V4;
case Hexagon::LDriub_indexed_cPt_V4:
return Hexagon::LDriub_indexed_cNotPt_V4;
case Hexagon::LDriub_indexed_cNotPt_V4:
return Hexagon::LDriub_indexed_cPt_V4;
case Hexagon::LDrib_indexed_shl_cPt_V4:
return Hexagon::LDrib_indexed_shl_cNotPt_V4;
case Hexagon::LDrib_indexed_shl_cNotPt_V4:
return Hexagon::LDrib_indexed_shl_cPt_V4;
case Hexagon::LDriub_indexed_shl_cPt_V4:
return Hexagon::LDriub_indexed_shl_cNotPt_V4;
case Hexagon::LDriub_indexed_shl_cNotPt_V4:
return Hexagon::LDriub_indexed_shl_cPt_V4;
case Hexagon::LDrih_indexed_cPt_V4:
return Hexagon::LDrih_indexed_cNotPt_V4;
case Hexagon::LDrih_indexed_cNotPt_V4:
return Hexagon::LDrih_indexed_cPt_V4;
case Hexagon::LDriuh_indexed_cPt_V4:
return Hexagon::LDriuh_indexed_cNotPt_V4;
case Hexagon::LDriuh_indexed_cNotPt_V4:
return Hexagon::LDriuh_indexed_cPt_V4;
case Hexagon::LDrih_indexed_shl_cPt_V4:
return Hexagon::LDrih_indexed_shl_cNotPt_V4;
case Hexagon::LDrih_indexed_shl_cNotPt_V4:
return Hexagon::LDrih_indexed_shl_cPt_V4;
case Hexagon::LDriuh_indexed_shl_cPt_V4:
return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
case Hexagon::LDriuh_indexed_shl_cNotPt_V4:
return Hexagon::LDriuh_indexed_shl_cPt_V4;
case Hexagon::LDriw_indexed_cPt_V4:
return Hexagon::LDriw_indexed_cNotPt_V4;
case Hexagon::LDriw_indexed_cNotPt_V4:
return Hexagon::LDriw_indexed_cPt_V4;
case Hexagon::LDriw_indexed_shl_cPt_V4:
return Hexagon::LDriw_indexed_shl_cNotPt_V4;
case Hexagon::LDriw_indexed_shl_cNotPt_V4:
return Hexagon::LDriw_indexed_shl_cPt_V4;
// Byte.
case Hexagon::POST_STbri_cPt:
return Hexagon::POST_STbri_cNotPt;
case Hexagon::POST_STbri_cNotPt:
return Hexagon::POST_STbri_cPt;
case Hexagon::STrib_cPt:
return Hexagon::STrib_cNotPt;
case Hexagon::STrib_cNotPt:
return Hexagon::STrib_cPt;
case Hexagon::STrib_indexed_cPt:
return Hexagon::STrib_indexed_cNotPt;
case Hexagon::STrib_indexed_cNotPt:
return Hexagon::STrib_indexed_cPt;
case Hexagon::STrib_imm_cPt_V4:
return Hexagon::STrib_imm_cNotPt_V4;
case Hexagon::STrib_imm_cNotPt_V4:
return Hexagon::STrib_imm_cPt_V4;
case Hexagon::STrib_indexed_shl_cPt_V4:
return Hexagon::STrib_indexed_shl_cNotPt_V4;
case Hexagon::STrib_indexed_shl_cNotPt_V4:
return Hexagon::STrib_indexed_shl_cPt_V4;
// Halfword.
case Hexagon::POST_SThri_cPt:
return Hexagon::POST_SThri_cNotPt;
case Hexagon::POST_SThri_cNotPt:
return Hexagon::POST_SThri_cPt;
case Hexagon::STrih_cPt:
return Hexagon::STrih_cNotPt;
case Hexagon::STrih_cNotPt:
return Hexagon::STrih_cPt;
case Hexagon::STrih_indexed_cPt:
return Hexagon::STrih_indexed_cNotPt;
case Hexagon::STrih_indexed_cNotPt:
return Hexagon::STrih_indexed_cPt;
case Hexagon::STrih_imm_cPt_V4:
return Hexagon::STrih_imm_cNotPt_V4;
case Hexagon::STrih_imm_cNotPt_V4:
return Hexagon::STrih_imm_cPt_V4;
case Hexagon::STrih_indexed_shl_cPt_V4:
return Hexagon::STrih_indexed_shl_cNotPt_V4;
case Hexagon::STrih_indexed_shl_cNotPt_V4:
return Hexagon::STrih_indexed_shl_cPt_V4;
// Word.
case Hexagon::POST_STwri_cPt:
return Hexagon::POST_STwri_cNotPt;
case Hexagon::POST_STwri_cNotPt:
return Hexagon::POST_STwri_cPt;
case Hexagon::STriw_cPt:
return Hexagon::STriw_cNotPt;
case Hexagon::STriw_cNotPt:
return Hexagon::STriw_cPt;
case Hexagon::STriw_indexed_cPt:
return Hexagon::STriw_indexed_cNotPt;
case Hexagon::STriw_indexed_cNotPt:
return Hexagon::STriw_indexed_cPt;
case Hexagon::STriw_indexed_shl_cPt_V4:
return Hexagon::STriw_indexed_shl_cNotPt_V4;
case Hexagon::STriw_indexed_shl_cNotPt_V4:
return Hexagon::STriw_indexed_shl_cPt_V4;
case Hexagon::STriw_imm_cPt_V4:
return Hexagon::STriw_imm_cNotPt_V4;
case Hexagon::STriw_imm_cNotPt_V4:
return Hexagon::STriw_imm_cPt_V4;
// Double word.
case Hexagon::POST_STdri_cPt:
return Hexagon::POST_STdri_cNotPt;
case Hexagon::POST_STdri_cNotPt:
return Hexagon::POST_STdri_cPt;
case Hexagon::STrid_cPt:
return Hexagon::STrid_cNotPt;
case Hexagon::STrid_cNotPt:
return Hexagon::STrid_cPt;
case Hexagon::STrid_indexed_cPt:
return Hexagon::STrid_indexed_cNotPt;
case Hexagon::STrid_indexed_cNotPt:
return Hexagon::STrid_indexed_cPt;
case Hexagon::STrid_indexed_shl_cPt_V4:
return Hexagon::STrid_indexed_shl_cNotPt_V4;
case Hexagon::STrid_indexed_shl_cNotPt_V4:
return Hexagon::STrid_indexed_shl_cPt_V4;
// Load.
case Hexagon::LDrid_cPt:
return Hexagon::LDrid_cNotPt;
case Hexagon::LDrid_cNotPt:
return Hexagon::LDrid_cPt;
case Hexagon::LDriw_cPt:
return Hexagon::LDriw_cNotPt;
case Hexagon::LDriw_cNotPt:
return Hexagon::LDriw_cPt;
case Hexagon::LDrih_cPt:
return Hexagon::LDrih_cNotPt;
case Hexagon::LDrih_cNotPt:
return Hexagon::LDrih_cPt;
case Hexagon::LDriuh_cPt:
return Hexagon::LDriuh_cNotPt;
case Hexagon::LDriuh_cNotPt:
return Hexagon::LDriuh_cPt;
case Hexagon::LDrib_cPt:
return Hexagon::LDrib_cNotPt;
case Hexagon::LDrib_cNotPt:
return Hexagon::LDrib_cPt;
case Hexagon::LDriub_cPt:
return Hexagon::LDriub_cNotPt;
case Hexagon::LDriub_cNotPt:
return Hexagon::LDriub_cPt;
// Load Indexed.
case Hexagon::LDrid_indexed_cPt:
return Hexagon::LDrid_indexed_cNotPt;
case Hexagon::LDrid_indexed_cNotPt:
return Hexagon::LDrid_indexed_cPt;
case Hexagon::LDriw_indexed_cPt:
return Hexagon::LDriw_indexed_cNotPt;
case Hexagon::LDriw_indexed_cNotPt:
return Hexagon::LDriw_indexed_cPt;
case Hexagon::LDrih_indexed_cPt:
return Hexagon::LDrih_indexed_cNotPt;
case Hexagon::LDrih_indexed_cNotPt:
return Hexagon::LDrih_indexed_cPt;
case Hexagon::LDriuh_indexed_cPt:
return Hexagon::LDriuh_indexed_cNotPt;
case Hexagon::LDriuh_indexed_cNotPt:
return Hexagon::LDriuh_indexed_cPt;
case Hexagon::LDrib_indexed_cPt:
return Hexagon::LDrib_indexed_cNotPt;
case Hexagon::LDrib_indexed_cNotPt:
return Hexagon::LDrib_indexed_cPt;
case Hexagon::LDriub_indexed_cPt:
return Hexagon::LDriub_indexed_cNotPt;
case Hexagon::LDriub_indexed_cNotPt:
return Hexagon::LDriub_indexed_cPt;
// Post Inc Load.
case Hexagon::POST_LDrid_cPt:
return Hexagon::POST_LDrid_cNotPt;
case Hexagon::POST_LDriw_cNotPt:
return Hexagon::POST_LDriw_cPt;
case Hexagon::POST_LDrih_cPt:
return Hexagon::POST_LDrih_cNotPt;
case Hexagon::POST_LDrih_cNotPt:
return Hexagon::POST_LDrih_cPt;
case Hexagon::POST_LDriuh_cPt:
return Hexagon::POST_LDriuh_cNotPt;
case Hexagon::POST_LDriuh_cNotPt:
return Hexagon::POST_LDriuh_cPt;
case Hexagon::POST_LDrib_cPt:
return Hexagon::POST_LDrib_cNotPt;
case Hexagon::POST_LDrib_cNotPt:
return Hexagon::POST_LDrib_cPt;
case Hexagon::POST_LDriub_cPt:
return Hexagon::POST_LDriub_cNotPt;
case Hexagon::POST_LDriub_cNotPt:
return Hexagon::POST_LDriub_cPt;
// Dealloc_return.
case Hexagon::DEALLOC_RET_cPt_V4:
return Hexagon::DEALLOC_RET_cNotPt_V4;
case Hexagon::DEALLOC_RET_cNotPt_V4:
return Hexagon::DEALLOC_RET_cPt_V4;
// New Value Jump.
// JMPEQ_ri - with -1.
case Hexagon::JMP_EQriPtneg_nv_V4:
return Hexagon::JMP_EQriNotPtneg_nv_V4;
case Hexagon::JMP_EQriNotPtneg_nv_V4:
return Hexagon::JMP_EQriPtneg_nv_V4;
case Hexagon::JMP_EQriPntneg_nv_V4:
return Hexagon::JMP_EQriNotPntneg_nv_V4;
case Hexagon::JMP_EQriNotPntneg_nv_V4:
return Hexagon::JMP_EQriPntneg_nv_V4;
// JMPEQ_ri.
case Hexagon::JMP_EQriPt_nv_V4:
return Hexagon::JMP_EQriNotPt_nv_V4;
case Hexagon::JMP_EQriNotPt_nv_V4:
return Hexagon::JMP_EQriPt_nv_V4;
case Hexagon::JMP_EQriPnt_nv_V4:
return Hexagon::JMP_EQriNotPnt_nv_V4;
case Hexagon::JMP_EQriNotPnt_nv_V4:
return Hexagon::JMP_EQriPnt_nv_V4;
// JMPEQ_rr.
case Hexagon::JMP_EQrrPt_nv_V4:
return Hexagon::JMP_EQrrNotPt_nv_V4;
case Hexagon::JMP_EQrrNotPt_nv_V4:
return Hexagon::JMP_EQrrPt_nv_V4;
case Hexagon::JMP_EQrrPnt_nv_V4:
return Hexagon::JMP_EQrrNotPnt_nv_V4;
case Hexagon::JMP_EQrrNotPnt_nv_V4:
return Hexagon::JMP_EQrrPnt_nv_V4;
// JMPGT_ri - with -1.
case Hexagon::JMP_GTriPtneg_nv_V4:
return Hexagon::JMP_GTriNotPtneg_nv_V4;
case Hexagon::JMP_GTriNotPtneg_nv_V4:
return Hexagon::JMP_GTriPtneg_nv_V4;
case Hexagon::JMP_GTriPntneg_nv_V4:
return Hexagon::JMP_GTriNotPntneg_nv_V4;
case Hexagon::JMP_GTriNotPntneg_nv_V4:
return Hexagon::JMP_GTriPntneg_nv_V4;
// JMPGT_ri.
case Hexagon::JMP_GTriPt_nv_V4:
return Hexagon::JMP_GTriNotPt_nv_V4;
case Hexagon::JMP_GTriNotPt_nv_V4:
return Hexagon::JMP_GTriPt_nv_V4;
case Hexagon::JMP_GTriPnt_nv_V4:
return Hexagon::JMP_GTriNotPnt_nv_V4;
case Hexagon::JMP_GTriNotPnt_nv_V4:
return Hexagon::JMP_GTriPnt_nv_V4;
// JMPGT_rr.
case Hexagon::JMP_GTrrPt_nv_V4:
return Hexagon::JMP_GTrrNotPt_nv_V4;
case Hexagon::JMP_GTrrNotPt_nv_V4:
return Hexagon::JMP_GTrrPt_nv_V4;
case Hexagon::JMP_GTrrPnt_nv_V4:
return Hexagon::JMP_GTrrNotPnt_nv_V4;
case Hexagon::JMP_GTrrNotPnt_nv_V4:
return Hexagon::JMP_GTrrPnt_nv_V4;
// JMPGT_rrdn.
case Hexagon::JMP_GTrrdnPt_nv_V4:
return Hexagon::JMP_GTrrdnNotPt_nv_V4;
case Hexagon::JMP_GTrrdnNotPt_nv_V4:
return Hexagon::JMP_GTrrdnPt_nv_V4;
case Hexagon::JMP_GTrrdnPnt_nv_V4:
return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
return Hexagon::JMP_GTrrdnPnt_nv_V4;
// JMPGTU_ri.
case Hexagon::JMP_GTUriPt_nv_V4:
return Hexagon::JMP_GTUriNotPt_nv_V4;
case Hexagon::JMP_GTUriNotPt_nv_V4:
return Hexagon::JMP_GTUriPt_nv_V4;
case Hexagon::JMP_GTUriPnt_nv_V4:
return Hexagon::JMP_GTUriNotPnt_nv_V4;
case Hexagon::JMP_GTUriNotPnt_nv_V4:
return Hexagon::JMP_GTUriPnt_nv_V4;
// JMPGTU_rr.
case Hexagon::JMP_GTUrrPt_nv_V4:
return Hexagon::JMP_GTUrrNotPt_nv_V4;
case Hexagon::JMP_GTUrrNotPt_nv_V4:
return Hexagon::JMP_GTUrrPt_nv_V4;
case Hexagon::JMP_GTUrrPnt_nv_V4:
return Hexagon::JMP_GTUrrNotPnt_nv_V4;
case Hexagon::JMP_GTUrrNotPnt_nv_V4:
return Hexagon::JMP_GTUrrPnt_nv_V4;
// JMPGTU_rrdn.
case Hexagon::JMP_GTUrrdnPt_nv_V4:
return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
return Hexagon::JMP_GTUrrdnPt_nv_V4;
case Hexagon::JMP_GTUrrdnPnt_nv_V4:
return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
return Hexagon::JMP_GTUrrdnPnt_nv_V4;
default:
llvm_unreachable("Unexpected predicated instruction");
}
}
int HexagonInstrInfo::
@ -569,8 +1026,8 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
return !invertPredicate ? Hexagon::TFRI_cPt :
Hexagon::TFRI_cNotPt;
case Hexagon::JMP:
return !invertPredicate ? Hexagon::JMP_Pred :
Hexagon::JMP_PredNot;
return !invertPredicate ? Hexagon::JMP_c :
Hexagon::JMP_cNot;
case Hexagon::ADD_ri:
return !invertPredicate ? Hexagon::ADD_ri_cPt :
Hexagon::ADD_ri_cNotPt;
@ -1064,7 +1521,6 @@ isValidAutoIncImm(const EVT VT, const int Offset) const {
return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
Offset <= Hexagon_MEMB_AUTOINC_MAX);
}
llvm_unreachable("Not an auto-inc opc!");
}

View File

@ -163,6 +163,7 @@ public:
bool isConditionalALU32 (const MachineInstr* MI) const;
bool isConditionalLoad (const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;

View File

@ -695,10 +695,6 @@ def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1,
"$dst = and($src1, !$src2)",
[]>;
def NOT_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
"$dst = not($src1)",
[(set PredRegs:$dst, (not PredRegs:$src1))]>;
def ANY_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
"$dst = any8($src1)",
[]>;
@ -728,7 +724,7 @@ def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1),
"$dst = mask($src1)",
[]>;
def NOT_Ps : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
"$dst = not($src1)",
[(set PredRegs:$dst, (not PredRegs:$src1))]>;
@ -761,7 +757,7 @@ let isBranch = 1, isTerminator=1, isBarrier = 1, isPredicable = 1 in {
// if (p0) jump
let isBranch = 1, isTerminator=1, Defs = [PC],
isPredicated = 1 in {
def JMP_Pred : JInst< (outs),
def JMP_c : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if ($src) jump $offset",
[(brcond PredRegs:$src, bb:$offset)]>;
@ -770,7 +766,7 @@ let isBranch = 1, isTerminator=1, Defs = [PC],
// if (!p0) jump
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
isPredicated = 1 in {
def JMP_PredNot : JInst< (outs),
def JMP_cNot : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if (!$src) jump $offset",
[]>;
@ -787,7 +783,7 @@ let isTerminator = 1, isBranch = 1, neverHasSideEffects = 1, Defs = [PC],
// if (p0) jump:t
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
isPredicated = 1 in {
def JMP_PredPt : JInst< (outs),
def JMP_cdnPt : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if ($src.new) jump:t $offset",
[]>;
@ -796,7 +792,7 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
// if (!p0) jump:t
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
isPredicated = 1 in {
def JMP_PredNotPt : JInst< (outs),
def JMP_cdnNotPt : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if (!$src.new) jump:t $offset",
[]>;
@ -805,7 +801,7 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
// Not taken.
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
isPredicated = 1 in {
def JMP_PredPnt : JInst< (outs),
def JMP_cdnPnt : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if ($src.new) jump:nt $offset",
[]>;
@ -814,7 +810,7 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
// Not taken.
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
isPredicated = 1 in {
def JMP_PredNotPnt : JInst< (outs),
def JMP_cdnNotPnt : JInst< (outs),
(ins PredRegs:$src, brtarget:$offset),
"if (!$src.new) jump:nt $offset",
[]>;
@ -2267,6 +2263,20 @@ def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
[(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
IntRegs:$src3))]>;
let AddedComplexity = 100 in
def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3),
"Error; should not emit",
[(set IntRegs:$dst,
(select PredRegs:$src1, IntRegs:$src2, s12ImmPred:$src3))]>;
let AddedComplexity = 100 in
def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3),
"Error; should not emit",
[(set IntRegs:$dst,
(select PredRegs:$src1, s12ImmPred:$src2, IntRegs:$src3))]>;
let AddedComplexity = 100 in
def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
(ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3),
@ -2460,7 +2470,7 @@ def : Pat <(and IntRegs:$src1, 255),
// Add(p1, false) should never be produced,
// if it does, it got to be mapped to NOOP.
def : Pat <(add PredRegs:$src1, -1),
(NOT_pp PredRegs:$src1)>;
(NOT_p PredRegs:$src1)>;
// Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) =>
// p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1).
@ -2475,7 +2485,7 @@ def : Pat <(select (not PredRegs:$src1), s8ImmPred:$src2, s8ImmPred:$src3),
// Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump.
def : Pat <(brcond (not PredRegs:$src1), bb:$offset),
(JMP_PredNot PredRegs:$src1, bb:$offset)>;
(JMP_cNot PredRegs:$src1, bb:$offset)>;
// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2).
def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)),
@ -2674,39 +2684,39 @@ def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i8)),
(i64 (SXTW (SXTB (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>;
// We want to prevent emiting pnot's as much as possible.
// Map brcond with an unsupported setcc to a JMP_PredNot.
// Map brcond with an unsupported setcc to a JMP_cNot.
def : Pat <(brcond (i1 (setne IntRegs:$src1, IntRegs:$src2)), bb:$offset),
(JMP_PredNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
(JMP_cNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
def : Pat <(brcond (i1 (setne IntRegs:$src1, s10ImmPred:$src2)), bb:$offset),
(JMP_PredNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>;
(JMP_cNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>;
def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 -1))), bb:$offset),
(JMP_PredNot PredRegs:$src1, bb:$offset)>;
(JMP_cNot PredRegs:$src1, bb:$offset)>;
def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 0))), bb:$offset),
(JMP_Pred PredRegs:$src1, bb:$offset)>;
(JMP_c PredRegs:$src1, bb:$offset)>;
def : Pat <(brcond (i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), bb:$offset),
(JMP_PredNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>;
(JMP_cNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>;
def : Pat <(brcond (i1 (setlt IntRegs:$src1, IntRegs:$src2)), bb:$offset),
(JMP_Pred (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
(JMP_c (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
def : Pat <(brcond (i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
bb:$offset),
(JMP_PredNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1),
(JMP_cNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1),
bb:$offset)>;
def : Pat <(brcond (i1 (setule IntRegs:$src1, IntRegs:$src2)), bb:$offset),
(JMP_PredNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
(JMP_cNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
def : Pat <(brcond (i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
bb:$offset),
(JMP_PredNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2),
(JMP_cNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2),
bb:$offset)>;
// Map from a 64-bit select to an emulated 64-bit mux.
// Map from a 64-bit select to an emulated 64-bit mux.
// Hexagon does not support 64-bit MUXes; so emulate with combines.
def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
(COMBINE_rr
@ -2721,7 +2731,7 @@ def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
// From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3).
def : Pat <(select PredRegs:$src1, PredRegs:$src2, PredRegs:$src3),
(OR_pp (AND_pp PredRegs:$src1, PredRegs:$src2),
(AND_pp (NOT_pp PredRegs:$src1), PredRegs:$src3))>;
(AND_pp (NOT_p PredRegs:$src1), PredRegs:$src3))>;
// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs.
def : Pat<(i1 (load ADDRriS11_2:$addr)),
@ -2773,26 +2783,26 @@ def : Pat<(i64 (anyext IntRegs:$src1)),
// Map cmple -> cmpgt.
// rs <= rt -> !(rs > rt).
def : Pat<(i1 (setle IntRegs:$src1, s10ImmPred:$src2)),
(i1 (NOT_Ps (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>;
(i1 (NOT_p (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>;
// rs <= rt -> !(rs > rt).
def : Pat<(i1 (setle IntRegs:$src1, IntRegs:$src2)),
(i1 (NOT_Ps (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>;
(i1 (NOT_p (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>;
// Rss <= Rtt -> !(Rss > Rtt).
def : Pat<(i1 (setle DoubleRegs:$src1, DoubleRegs:$src2)),
(i1 (NOT_Ps (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
(i1 (NOT_p (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
// Map cmpne -> cmpeq.
// Hexagon_TODO: We should improve on this.
// rs != rt -> !(rs == rt).
def : Pat <(i1 (setne IntRegs:$src1, s10ImmPred:$src2)),
(i1 (NOT_Ps(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>;
(i1 (NOT_p(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>;
// Map cmpne(Rs) -> !cmpeqe(Rs).
// rs != rt -> !(rs == rt).
def : Pat <(i1 (setne IntRegs:$src1, IntRegs:$src2)),
(i1 (NOT_Ps(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>;
(i1 (NOT_p(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>;
// Convert setne back to xor for hexagon since we compute w/ pred registers.
def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)),
@ -2801,12 +2811,12 @@ def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)),
// Map cmpne(Rss) -> !cmpew(Rss).
// rs != rt -> !(rs == rt).
def : Pat <(i1 (setne DoubleRegs:$src1, DoubleRegs:$src2)),
(i1 (NOT_Ps(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>;
(i1 (NOT_p(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>;
// Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt).
// rs >= rt -> !(rt > rs).
def : Pat <(i1 (setge IntRegs:$src1, IntRegs:$src2)),
(i1 (NOT_Ps(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>;
(i1 (NOT_p(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>;
def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)),
(i1 (CMPGEri IntRegs:$src1, s8ImmPred:$src2))>;
@ -2814,12 +2824,12 @@ def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)),
// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
// rss >= rtt -> !(rtt > rss).
def : Pat <(i1 (setge DoubleRegs:$src1, DoubleRegs:$src2)),
(i1 (NOT_Ps(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>;
(i1 (NOT_p(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>;
// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
// rs < rt -> !(rs >= rt).
def : Pat <(i1 (setlt IntRegs:$src1, s8ImmPred:$src2)),
(i1 (NOT_Ps (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>;
(i1 (NOT_p (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>;
// Map cmplt(Rs, Rt) -> cmplt(Rs, Rt).
// rs < rt -> rs < rt. Let assembler map it.
@ -2844,22 +2854,22 @@ def : Pat <(i1 (setult DoubleRegs:$src1, DoubleRegs:$src2)),
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).
def : Pat <(i1 (setuge IntRegs:$src1, IntRegs:$src2)),
(i1 (NOT_Ps (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>;
(i1 (NOT_p (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>;
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).
def : Pat <(i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
(i1 (NOT_Ps (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>;
(i1 (NOT_p (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>;
// Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs).
// Map from (Rs <= Rt) -> !(Rs > Rt).
def : Pat <(i1 (setule IntRegs:$src1, IntRegs:$src2)),
(i1 (NOT_Ps (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>;
(i1 (NOT_p (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>;
// Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1).
// Map from (Rs <= Rt) -> !(Rs > Rt).
def : Pat <(i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
(i1 (NOT_Ps (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
(i1 (NOT_p (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
// Sign extends.
// i1 -> i32

View File

@ -2231,6 +2231,181 @@ def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
// NV/ST -
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// NV/J +
//===----------------------------------------------------------------------===//
multiclass NVJ_type_basic_reg<string NotStr, string OpcStr, string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, $src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
def _nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, $src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
}
multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr, string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1, $src2.new)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
def _nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1, $src2.new)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
}
multiclass NVJ_type_basic_imm<string NotStr, string OpcStr, string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
def _nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
}
multiclass NVJ_type_basic_neg<string NotStr, string OpcStr, string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, nOneImm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
def _nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, nOneImm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
}
multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr, string TakenStr> {
def _ie_nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
def _nv_V4 : NVInst_V4<(outs),
(ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset),
!strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
!strconcat("($src1.new, #$src2)) jump:",
!strconcat(TakenStr, " $offset"))))),
[]>,
Requires<[HasV4T]>;
}
// Multiclass for regular dot new of Ist operand register.
multiclass NVJ_type_br_pred_reg<string NotStr, string OpcStr> {
defm Pt : NVJ_type_basic_reg<NotStr, OpcStr, "t">;
defm Pnt : NVJ_type_basic_reg<NotStr, OpcStr, "nt">;
}
// Multiclass for dot new of 2nd operand register.
multiclass NVJ_type_br_pred_2ndDotNew<string NotStr, string OpcStr> {
defm Pt : NVJ_type_basic_2ndDotNew<NotStr, OpcStr, "t">;
defm Pnt : NVJ_type_basic_2ndDotNew<NotStr, OpcStr, "nt">;
}
// Multiclass for 2nd operand immediate, including -1.
multiclass NVJ_type_br_pred_imm<string NotStr, string OpcStr> {
defm Pt : NVJ_type_basic_imm<NotStr, OpcStr, "t">;
defm Pnt : NVJ_type_basic_imm<NotStr, OpcStr, "nt">;
defm Ptneg : NVJ_type_basic_neg<NotStr, OpcStr, "t">;
defm Pntneg : NVJ_type_basic_neg<NotStr, OpcStr, "nt">;
}
// Multiclass for 2nd operand immediate, excluding -1.
multiclass NVJ_type_br_pred_imm_only<string NotStr, string OpcStr> {
defm Pt : NVJ_type_basic_imm<NotStr, OpcStr, "t">;
defm Pnt : NVJ_type_basic_imm<NotStr, OpcStr, "nt">;
}
// Multiclass for tstbit, where 2nd operand is always #0.
multiclass NVJ_type_br_pred_tstbit<string NotStr, string OpcStr> {
defm Pt : NVJ_type_basic_tstbit<NotStr, OpcStr, "t">;
defm Pnt : NVJ_type_basic_tstbit<NotStr, OpcStr, "nt">;
}
// Multiclass for GT.
multiclass NVJ_type_rr_ri<string OpcStr> {
defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
defm rrdnNot : NVJ_type_br_pred_2ndDotNew<"!", OpcStr>;
defm rrdn : NVJ_type_br_pred_2ndDotNew<"", OpcStr>;
defm riNot : NVJ_type_br_pred_imm<"!", OpcStr>;
defm ri : NVJ_type_br_pred_imm<"", OpcStr>;
}
// Multiclass for EQ.
multiclass NVJ_type_rr_ri_no_2ndDotNew<string OpcStr> {
defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
defm riNot : NVJ_type_br_pred_imm<"!", OpcStr>;
defm ri : NVJ_type_br_pred_imm<"", OpcStr>;
}
// Multiclass for GTU.
multiclass NVJ_type_rr_ri_no_nOne<string OpcStr> {
defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
defm rrdnNot : NVJ_type_br_pred_2ndDotNew<"!", OpcStr>;
defm rrdn : NVJ_type_br_pred_2ndDotNew<"", OpcStr>;
defm riNot : NVJ_type_br_pred_imm_only<"!", OpcStr>;
defm ri : NVJ_type_br_pred_imm_only<"", OpcStr>;
}
// Multiclass for tstbit.
multiclass NVJ_type_r0<string OpcStr> {
defm r0Not : NVJ_type_br_pred_tstbit<"!", OpcStr>;
defm r0 : NVJ_type_br_pred_tstbit<"", OpcStr>;
}
// Base Multiclass for New Value Jump.
multiclass NVJ_type {
defm GT : NVJ_type_rr_ri<"cmp.gt">;
defm EQ : NVJ_type_rr_ri_no_2ndDotNew<"cmp.eq">;
defm GTU : NVJ_type_rr_ri_no_nOne<"cmp.gtu">;
defm TSTBIT : NVJ_type_r0<"tstbit">;
}
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC] in {
defm JMP_ : NVJ_type;
}
//===----------------------------------------------------------------------===//
// NV/J -
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// XTYPE/ALU +

View File

@ -18,7 +18,7 @@ def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETNE)),
(i32 (MUX_rr (i1 (NOT_Ps (CMPEQrr IntRegs:$lhs, IntRegs:$rhs))),
(i32 (MUX_rr (i1 (NOT_p (CMPEQrr IntRegs:$lhs, IntRegs:$rhs))),
IntRegs:$tval, IntRegs:$fval))>;
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
@ -35,24 +35,24 @@ def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETULT)),
(i32 (MUX_rr (i1 (NOT_Ps (CMPGTUrr IntRegs:$lhs,
(i32 (MUX_rr (i1 (NOT_p (CMPGTUrr IntRegs:$lhs,
(ADD_ri IntRegs:$rhs, -1)))),
IntRegs:$tval, IntRegs:$fval))>;
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETLT)),
(i32 (MUX_rr (i1 (NOT_Ps (CMPGTrr IntRegs:$lhs,
(i32 (MUX_rr (i1 (NOT_p (CMPGTrr IntRegs:$lhs,
(ADD_ri IntRegs:$rhs, -1)))),
IntRegs:$tval, IntRegs:$fval))>;
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETLE)),
(i32 (MUX_rr (i1 (NOT_Ps (CMPGTrr IntRegs:$lhs, IntRegs:$rhs))),
(i32 (MUX_rr (i1 (NOT_p (CMPGTrr IntRegs:$lhs, IntRegs:$rhs))),
IntRegs:$tval, IntRegs:$fval))>;
def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETULE)),
(i32 (MUX_rr (i1 (NOT_Ps (CMPGTUrr IntRegs:$lhs, IntRegs:$rhs))),
(i32 (MUX_rr (i1 (NOT_p (CMPGTUrr IntRegs:$lhs, IntRegs:$rhs))),
IntRegs:$tval, IntRegs:$fval))>;
@ -86,7 +86,7 @@ def : Pat <(i32 (selectcc PredRegs:$lhs, PredRegs:$rhs, IntRegs:$tval,
def : Pat <(i32 (selectcc PredRegs:$lhs, PredRegs:$rhs, IntRegs:$tval,
IntRegs:$fval, SETEQ)),
(i32 (MUX_rr (i1 (NOT_pp (XOR_pp PredRegs:$lhs, PredRegs:$rhs))),
(i32 (MUX_rr (i1 (NOT_p (XOR_pp PredRegs:$lhs, PredRegs:$rhs))),
IntRegs:$tval, IntRegs:$fval))>;

View File

@ -101,6 +101,7 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
bool HexagonPassConfig::addInstSelector() {
PM.add(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
PM.add(createHexagonISelDag(getHexagonTargetMachine()));
PM.add(createHexagonPeephole());
return false;
}