Hexagon constant extender support.

Patch by Jyotsna Verma.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156634 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Brendon Cahoon 2012-05-11 19:56:59 +00:00
parent e1093e5503
commit 6d532d8860
17 changed files with 3700 additions and 343 deletions

View File

@ -29,6 +29,7 @@ add_llvm_target(HexagonCodeGen
HexagonTargetMachine.cpp
HexagonTargetObjectFile.cpp
HexagonVLIWPacketizer.cpp
HexagonOptimizeConstExt.cpp
)
add_subdirectory(TargetInfo)

View File

@ -36,7 +36,7 @@ namespace llvm {
FunctionPass *createHexagonSplitTFRCondSets(HexagonTargetMachine &TM);
FunctionPass *createHexagonExpandPredSpillCode(HexagonTargetMachine &TM);
FunctionPass *createHexagonOptimizeConstExt(HexagonTargetMachine &TM);
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
//===--- HexagonConstExtInfo.h - Provides constant extender information ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains helper functions that extract constant extender
// information for a specified instruction from the HexagonConstExtInfo table.
//
//===----------------------------------------------------------------------===//
#ifndef HEXAGONCONSTEXT_H
#define HEXAGONCONSTEXT_H
namespace llvm {
namespace HexagonConstExt {
typedef struct {
const char * Name;
const short CExtOpNum;
const int MinValue;
const int MaxValue;
const int NonExtOpcode;
} HexagonConstExtInfo;
#include "HexagonCExtTable.h"
/// HexagonConstExt - This namespace holds the constant extension related
/// information.
bool isOperandExtended(unsigned short Opcode, unsigned short OperandNum);
unsigned short getCExtOpNum(unsigned short Opcode);
int getMinValue(unsigned short Opcode);
int getMaxValue(unsigned short Opcode);
bool NonExtEquivalentExists (unsigned short Opcode);
int getNonExtOpcode (unsigned short Opcode);
}
}
#endif

View File

@ -94,6 +94,7 @@ public:
SDNode *SelectConstant(SDNode *N);
SDNode *SelectConstantFP(SDNode *N);
SDNode *SelectAdd(SDNode *N);
bool isConstExtProfitable(SDNode *N) const;
// Include the pieces autogenerated from the target description.
#include "HexagonGenDAGISel.inc"
@ -312,9 +313,13 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy,
TargAddr);
SDNode* NewBase;
if (Subtarget.hasV4TOps())
NewBase = CurDAG->getMachineNode(Hexagon::TFRI_V4,
dl, PointerTy, TargAddr);
else
NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy, TargAddr);
// Figure out base + offset opcode
if (LoadedVT == MVT::i64) Opcode = Hexagon::LDrid_indexed;
else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
@ -686,9 +691,13 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy,
TargAddr);
SDNode* NewBase;
if (Subtarget.hasV4TOps())
NewBase = CurDAG->getMachineNode(Hexagon::TFRI_V4,
dl, PointerTy, TargAddr);
else
NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy, TargAddr);
// Figure out base + offset opcode
if (StoredVT == MVT::i64) Opcode = Hexagon::STrid_indexed;
@ -1507,3 +1516,13 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
OutOps.push_back(Op1);
return false;
}
bool HexagonDAGToDAGISel::isConstExtProfitable(SDNode *N) const {
unsigned UseCount = 0;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
UseCount++;
}
return (UseCount <= 1);
}

View File

@ -13,6 +13,12 @@ def s32Imm : Operand<i32> {
let PrintMethod = "printImmOperand";
}
// f32Ext type is used to identify constant extended floating point
// Immediate operands.
def f32Ext : Operand<f32> {
let PrintMethod = "printImmOperand";
}
def s16Imm : Operand<i32> {
let PrintMethod = "printImmOperand";
}
@ -506,3 +512,412 @@ def nOneImmPred : PatLeaf<(i32 imm), [{
return (-1 == v);
}]>;
// Operand types for constant extendable operands
def s16Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s12Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s10Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s9Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s8Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s6Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s11_0Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s11_1Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s11_2Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def s11_3Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u6Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u7Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u8Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u9Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u10Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u6_0Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u6_1Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u6_2Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
def u6_3Ext : Operand<i32> {
let PrintMethod = "printExtOperand";
}
// Predicates for constant extendable operands
def s16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 16-bit sign extended field.
return isInt<16>(v);
else {
if (isInt<16>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit signed field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 10-bit sign extended field.
return isInt<10>(v);
else {
if (isInt<10>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit signed field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 9-bit sign extended field.
return isInt<9>(v);
else {
if (isInt<9>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 8-bit sign extended field.
return isInt<8>(v);
else {
if (isInt<8>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit signed field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s8_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate fits in a 8-bit sign extended field.
return isInt<8>(v);
else {
if (isInt<8>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can't fit in a 16-bit signed field. This is required to avoid
// unnecessary constant extenders.
if (isConstExtProfitable(Node) && !isInt<16>(v))
return true;
else
return false;
}
}]>;
def s6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 6-bit sign extended field.
return isInt<6>(v);
else {
if (isInt<6>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s6_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate fits in a 6-bit sign extended field.
return isInt<6>(v);
else {
if (isInt<6>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can't fit in a 16-bit signed field. This is required to avoid
// unnecessary constant extenders.
if (isConstExtProfitable(Node) && !isInt<16>(v))
return true;
else
return false;
}
}]>;
def s6_10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 6-bit sign extended field.
return isInt<6>(v);
else {
if (isInt<6>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can't fit in a 10-bit signed field. This is required to avoid
// unnecessary constant extenders.
if (isConstExtProfitable(Node) && !isInt<10>(v))
return true;
else
return false;
}
}]>;
def s11_0ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 11-bit sign extended field.
return isShiftedInt<11,0>(v);
else {
if (isInt<11>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit signed field.
if (isConstExtProfitable(Node) && isInt<32>(v))
return true;
else
return false;
}
}]>;
def s11_1ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 12-bit sign extended field and
// is 2 byte aligned.
return isShiftedInt<11,1>(v);
else {
if (isInt<12>(v))
return isShiftedInt<11,1>(v);
// Return true if extending this immediate is profitable and the low 1 bit
// is zero (2-byte aligned).
if (isConstExtProfitable(Node) && isInt<32>(v) && ((v % 2) == 0))
return true;
else
return false;
}
}]>;
def s11_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 13-bit sign extended field and
// is 4-byte aligned.
return isShiftedInt<11,2>(v);
else {
if (isInt<13>(v))
return isShiftedInt<11,2>(v);
// Return true if extending this immediate is profitable and the low 2-bits
// are zero (4-byte aligned).
if (isConstExtProfitable(Node) && isInt<32>(v) && ((v % 4) == 0))
return true;
else
return false;
}
}]>;
def s11_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 14-bit sign extended field and
// is 8-byte aligned.
return isShiftedInt<11,3>(v);
else {
if (isInt<14>(v))
return isShiftedInt<11,3>(v);
// Return true if extending this immediate is profitable and the low 3-bits
// are zero (8-byte aligned).
if (isConstExtProfitable(Node) && isInt<32>(v) && ((v % 8) == 0))
return true;
else
return false;
}
}]>;
def u6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 6-bit unsigned field.
return isUInt<6>(v);
else {
if (isUInt<6>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v))
return true;
else
return false;
}
}]>;
def u7ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 7-bit unsigned field.
return isUInt<7>(v);
else {
if (isUInt<7>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v))
return true;
else
return false;
}
}]>;
def u8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 8-bit unsigned field.
return isUInt<8>(v);
else {
if (isUInt<8>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v))
return true;
else
return false;
}
}]>;
def u9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 9-bit unsigned field.
return isUInt<9>(v);
else {
if (isUInt<9>(v))
return true;
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v))
return true;
else
return false;
}
}]>;
def u6_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 8-bit unsigned field and
// is 4-byte aligned.
return isShiftedUInt<6,2>(v);
else {
if (isUInt<9>(v))
return isShiftedUInt<6,2>(v);
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 4) == 0))
return true;
else
return false;
}
}]>;
def u6_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
if (!Subtarget.hasV4TOps())
// Return true if the immediate can fit in a 9-bit unsigned field and
// is 8-byte aligned.
return isShiftedUInt<6,3>(v);
else {
if (isUInt<9>(v))
return isShiftedUInt<6,3>(v);
// Return true if extending this immediate is profitable and the value
// can fit in a 32-bit unsigned field.
if (isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 8) == 0))
return true;
else
return false;
}
}]>;

View File

@ -27,6 +27,7 @@
#define GET_INSTRINFO_CTOR
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
#include "HexagonConstExtInfo.h"
using namespace llvm;
@ -95,6 +96,7 @@ unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case Hexagon::STriw_indexed:
case Hexagon::STriw:
case Hexagon::STrid:
case Hexagon::STrih:
@ -364,7 +366,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
Align);
if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::STriw))
BuildMI(MBB, I, DL, get(Hexagon::STriw_indexed))
.addFrameIndex(FI).addImm(0)
.addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
} else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
@ -1312,72 +1314,85 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
return false;
const int Opc = MI->getOpcode();
int NumOperands = MI->getNumOperands();
// Keep a flag for upto 4 operands in the instructions, to indicate if
// that operand has been constant extended.
bool OpCExtended[4];
if (NumOperands > 4)
NumOperands = 4;
for (int i=0; i<NumOperands; i++)
OpCExtended[i] = (HexagonConstExt::isOperandExtended(Opc, 1) &&
isConstExtended(MI));
switch(Opc) {
case Hexagon::TFRI:
return isInt<12>(MI->getOperand(1).getImm());
// Return true if MI is constant extended as predicated form will also be
// extended so immediate value doesn't have to fit within range.
return OpCExtended[1] || isInt<12>(MI->getOperand(1).getImm());
case Hexagon::STrid:
case Hexagon::STrid_indexed:
return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
return OpCExtended[1] || isShiftedUInt<6,3>(MI->getOperand(1).getImm());
case Hexagon::STriw:
case Hexagon::STriw_indexed:
case Hexagon::STriw_nv_V4:
return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
return OpCExtended[1] || isShiftedUInt<6,2>(MI->getOperand(1).getImm());
case Hexagon::STrih:
case Hexagon::STrih_indexed:
case Hexagon::STrih_nv_V4:
return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
return OpCExtended[1] || isShiftedUInt<6,1>(MI->getOperand(1).getImm());
case Hexagon::STrib:
case Hexagon::STrib_indexed:
case Hexagon::STrib_nv_V4:
return isUInt<6>(MI->getOperand(1).getImm());
return OpCExtended[1] || isUInt<6>(MI->getOperand(1).getImm());
case Hexagon::LDrid:
case Hexagon::LDrid_indexed:
return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
return OpCExtended[2] || isShiftedUInt<6,3>(MI->getOperand(2).getImm());
case Hexagon::LDriw:
case Hexagon::LDriw_indexed:
return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
return OpCExtended[2] || isShiftedUInt<6,2>(MI->getOperand(2).getImm());
case Hexagon::LDrih:
case Hexagon::LDriuh:
case Hexagon::LDrih_indexed:
case Hexagon::LDriuh_indexed:
return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
return OpCExtended[2] || isShiftedUInt<6,1>(MI->getOperand(2).getImm());
case Hexagon::LDrib:
case Hexagon::LDriub:
case Hexagon::LDrib_indexed:
case Hexagon::LDriub_indexed:
return isUInt<6>(MI->getOperand(2).getImm());
return OpCExtended[2] || isUInt<6>(MI->getOperand(2).getImm());
case Hexagon::POST_LDrid:
return isShiftedInt<4,3>(MI->getOperand(3).getImm());
return OpCExtended[3] || isShiftedInt<4,3>(MI->getOperand(3).getImm());
case Hexagon::POST_LDriw:
return isShiftedInt<4,2>(MI->getOperand(3).getImm());
return OpCExtended[3] || isShiftedInt<4,2>(MI->getOperand(3).getImm());
case Hexagon::POST_LDrih:
case Hexagon::POST_LDriuh:
return isShiftedInt<4,1>(MI->getOperand(3).getImm());
return OpCExtended[3] || isShiftedInt<4,1>(MI->getOperand(3).getImm());
case Hexagon::POST_LDrib:
case Hexagon::POST_LDriub:
return isInt<4>(MI->getOperand(3).getImm());
return OpCExtended[3] || isInt<4>(MI->getOperand(3).getImm());
case Hexagon::STrib_imm_V4:
case Hexagon::STrih_imm_V4:
case Hexagon::STriw_imm_V4:
return (isUInt<6>(MI->getOperand(1).getImm()) &&
isInt<6>(MI->getOperand(2).getImm()));
return ((OpCExtended[1] || isUInt<6>(MI->getOperand(1).getImm())) &&
(OpCExtended[2] || isInt<6>(MI->getOperand(2).getImm())));
case Hexagon::ADD_ri:
return isInt<8>(MI->getOperand(2).getImm());
return OpCExtended[2] || isInt<8>(MI->getOperand(2).getImm());
case Hexagon::ASLH:
case Hexagon::ASRH:
@ -2190,6 +2205,73 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
case Hexagon::DEALLOC_RET_V4:
return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
Hexagon::DEALLOC_RET_cNotPt_V4;
// Load Absolute Addressing -- global address.
case Hexagon::LDrib_abs_V4:
return !invertPredicate ? Hexagon::LDrib_abs_cPt_V4 :
Hexagon::LDrib_abs_cNotPt_V4;
case Hexagon::LDriub_abs_V4:
return !invertPredicate ? Hexagon::LDriub_abs_cPt_V4 :
Hexagon::LDriub_abs_cNotPt_V4;
case Hexagon::LDrih_abs_V4:
return !invertPredicate ? Hexagon::LDrih_abs_cPt_V4 :
Hexagon::LDrih_abs_cNotPt_V4;
case Hexagon::LDriuh_abs_V4:
return !invertPredicate ? Hexagon::LDriuh_abs_cPt_V4 :
Hexagon::LDriuh_abs_cNotPt_V4;
case Hexagon::LDriw_abs_V4:
return !invertPredicate ? Hexagon::LDriw_abs_cPt_V4 :
Hexagon::LDriw_abs_cNotPt_V4;
case Hexagon::LDrid_abs_V4:
return !invertPredicate ? Hexagon::LDrid_abs_cPt_V4 :
Hexagon::LDrid_abs_cNotPt_V4;
// Load Absolute Addressing -- immediate value.
case Hexagon::LDrib_imm_abs_V4:
return !invertPredicate ? Hexagon::LDrib_imm_abs_cPt_V4 :
Hexagon::LDrib_imm_abs_cNotPt_V4;
case Hexagon::LDriub_imm_abs_V4:
return !invertPredicate ? Hexagon::LDriub_imm_abs_cPt_V4 :
Hexagon::LDriub_imm_abs_cNotPt_V4;
case Hexagon::LDrih_imm_abs_V4:
return !invertPredicate ? Hexagon::LDrih_imm_abs_cPt_V4 :
Hexagon::LDrih_imm_abs_cNotPt_V4;
case Hexagon::LDriuh_imm_abs_V4:
return !invertPredicate ? Hexagon::LDriuh_imm_abs_cPt_V4 :
Hexagon::LDriuh_imm_abs_cNotPt_V4;
case Hexagon::LDriw_imm_abs_V4:
return !invertPredicate ? Hexagon::LDriw_imm_abs_cPt_V4 :
Hexagon::LDriw_imm_abs_cNotPt_V4;
// Store Absolute Addressing.
case Hexagon::STrib_abs_V4:
return !invertPredicate ? Hexagon::STrib_abs_cPt_V4 :
Hexagon::STrib_abs_cNotPt_V4;
case Hexagon::STrih_abs_V4:
return !invertPredicate ? Hexagon::STrih_abs_cPt_V4 :
Hexagon::STrih_abs_cNotPt_V4;
case Hexagon::STriw_abs_V4:
return !invertPredicate ? Hexagon::STriw_abs_cPt_V4 :
Hexagon::STriw_abs_cNotPt_V4;
case Hexagon::STrid_abs_V4:
return !invertPredicate ? Hexagon::STrid_abs_cPt_V4 :
Hexagon::STrid_abs_cNotPt_V4;
// Store Absolute Addressing - global address.
case Hexagon::STrib_imm_abs_V4:
return !invertPredicate ? Hexagon::STrib_imm_abs_cPt_V4 :
Hexagon::STrib_imm_abs_cNotPt_V4;
case Hexagon::STrih_imm_abs_V4:
return !invertPredicate ? Hexagon::STrih_imm_abs_cPt_V4 :
Hexagon::STrih_imm_abs_cNotPt_V4;
case Hexagon::STriw_imm_abs_V4:
return !invertPredicate ? Hexagon::STriw_imm_abs_cPt_V4 :
Hexagon::STriw_imm_abs_cNotPt_V4;
// Transfer
case Hexagon::TFRI_V4:
return !invertPredicate ? Hexagon::TFRI_cPt_V4 :
Hexagon::TFRI_cNotPt_V4;
}
llvm_unreachable("Unexpected predicable instruction");
}
@ -2340,6 +2422,7 @@ isValidOffset(const int Opcode, const int Offset) const {
case Hexagon::LDriw:
case Hexagon::LDriw_f:
case Hexagon::STriw_indexed:
case Hexagon::STriw:
case Hexagon::STriw_f:
assert((Offset % 4 == 0) && "Offset has incorrect alignment");
@ -2805,3 +2888,71 @@ bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
}
bool HexagonInstrInfo::isExpr(unsigned OpType) const {
switch(OpType) {
case MachineOperand::MO_MachineBasicBlock:
case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol:
case MachineOperand::MO_JumpTableIndex:
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_BlockAddress:
return true;
default:
return false;
}
}
bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
unsigned short Opcode = MI->getOpcode();
short ExtOpNum = HexagonConstExt::getCExtOpNum(Opcode);
// Instruction has no constant extended operand.
if (ExtOpNum == -1)
return false;
int MinValue = HexagonConstExt::getMinValue(Opcode);
int MaxValue = HexagonConstExt::getMaxValue(Opcode);
const MachineOperand &MO = MI->getOperand(ExtOpNum);
if (!MO.isImm()) // no range check if the operand is non-immediate.
return true;
int ImmValue =MO.getImm();
return (ImmValue < MinValue || ImmValue > MaxValue);
}
// Returns true if a particular operand is extended for an instruction.
bool HexagonConstExt::isOperandExtended(unsigned short Opcode,
unsigned short OperandNum) {
return HexagonCExt[Opcode].CExtOpNum == OperandNum;
}
// Returns Operand Index for the constant extended instruction.
unsigned short HexagonConstExt::getCExtOpNum(unsigned short Opcode) {
return HexagonCExt[Opcode].CExtOpNum;
}
// Returns the min value that doesn't need to be extended.
int HexagonConstExt::getMinValue(unsigned short Opcode) {
return HexagonCExt[Opcode].MinValue;
}
// Returns the max value that doesn't need to be extended.
int HexagonConstExt::getMaxValue(unsigned short Opcode) {
return HexagonCExt[Opcode].MaxValue;
}
// Returns true if an instruction can be converted into a non-extended
// equivalent instruction.
bool HexagonConstExt::NonExtEquivalentExists (unsigned short Opcode) {
if (HexagonCExt[Opcode].NonExtOpcode < 0 )
return false;
return true;
}
// Returns opcode of the non-extended equivalent instruction.
int HexagonConstExt::getNonExtOpcode (unsigned short Opcode) {
return HexagonCExt[Opcode].NonExtOpcode;
}

View File

@ -174,6 +174,8 @@ public:
bool isNewValueJump(const MachineInstr* MI) const;
unsigned getImmExtForm(const MachineInstr* MI) const;
unsigned getNormalBranchForm(const MachineInstr* MI) const;
bool isExpr(unsigned OpType) const;
bool isConstExtended(MachineInstr *MI) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;

View File

@ -67,10 +67,14 @@ def FrameIndex : Operand<i32> {
let PrintMethod = "printGlobalOperand" in
def globaladdress : Operand<i32>;
let PrintMethod = "printGlobalOperand" in
def globaladdressExt : Operand<i32>;
let PrintMethod = "printJumpTable" in
def jumptablebase : Operand<i32>;
def brtarget : Operand<OtherVT>;
def brtargetExt : Operand<OtherVT>;
def calltarget : Operand<i32>;
def bblabel : Operand<i32>;
@ -115,10 +119,10 @@ multiclass CMP32_rr_ri_s10<string OpcStr, PatFrag OpNode> {
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
[(set (i1 PredRegs:$dst),
(OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Imm:$c),
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Ext:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
[(set (i1 PredRegs:$dst),
(OpNode (i32 IntRegs:$b), s10ImmPred:$c))]>;
(OpNode (i32 IntRegs:$b), s10ExtPred:$c))]>;
}
multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> {
@ -126,24 +130,24 @@ multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> {
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
[(set (i1 PredRegs:$dst),
(OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c),
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Ext:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
[(set (i1 PredRegs:$dst),
(OpNode (i32 IntRegs:$b), u9ImmPred:$c))]>;
(OpNode (i32 IntRegs:$b), u9ExtPred:$c))]>;
}
multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> {
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Imm:$c),
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Ext:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
[(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
u8ImmPred:$c))]>;
u8ExtPred:$c))]>;
}
multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Imm:$c),
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Ext:$c),
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
[(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
s8ImmPred:$c))]>;
s8ExtPred:$c))]>;
}
}
@ -160,10 +164,10 @@ def ADD_rr : ALU32_rr<(outs IntRegs:$dst),
let isPredicable = 1 in
def ADD_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s16Imm:$src2),
(ins IntRegs:$src1, s16Ext:$src2),
"$dst = add($src1, #$src2)",
[(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
s16ImmPred:$src2))]>;
s16ExtPred:$src2))]>;
// Logical operations.
let isPredicable = 1 in
@ -181,10 +185,10 @@ def AND_rr : ALU32_rr<(outs IntRegs:$dst),
(i32 IntRegs:$src2)))]>;
def OR_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s10Imm:$src2),
(ins IntRegs:$src1, s10Ext:$src2),
"$dst = or($src1, #$src2)",
[(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
s10ImmPred:$src2))]>;
s10ExtPred:$src2))]>;
def NOT_rr : ALU32_rr<(outs IntRegs:$dst),
(ins IntRegs:$src1),
@ -192,10 +196,10 @@ def NOT_rr : ALU32_rr<(outs IntRegs:$dst),
[(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>;
def AND_ri : ALU32_ri<(outs IntRegs:$dst),
(ins IntRegs:$src1, s10Imm:$src2),
(ins IntRegs:$src1, s10Ext:$src2),
"$dst = and($src1, #$src2)",
[(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
s10ImmPred:$src2))]>;
s10ExtPred:$src2))]>;
let isCommutable = 1, isPredicable = 1 in
def OR_rr : ALU32_rr<(outs IntRegs:$dst),
@ -224,15 +228,15 @@ def SUB_rr : ALU32_rr<(outs IntRegs:$dst),
// Rd32=sub(#s10,Rs32)
def SUB_ri : ALU32_ri<(outs IntRegs:$dst),
(ins s10Imm:$src1, IntRegs:$src2),
(ins s10Ext:$src1, IntRegs:$src2),
"$dst = sub(#$src1, $src2)",
[(set IntRegs:$dst, (sub s10ImmPred:$src1, IntRegs:$src2))]>;
[(set IntRegs:$dst, (sub s10ExtPred:$src1, IntRegs:$src2))]>;
// Transfer immediate.
let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Imm:$src1),
def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
"$dst = #$src1",
[(set (i32 IntRegs:$dst), s16ImmPred:$src1)]>;
[(set (i32 IntRegs:$dst), s16ExtPred:$src1)]>;
// Transfer register.
let neverHasSideEffects = 1, isPredicable = 1 in
@ -286,25 +290,25 @@ def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
(i32 IntRegs:$src2),
(i32 IntRegs:$src3))))]>;
def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
IntRegs:$src3),
"$dst = mux($src1, #$src2, $src3)",
[(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
s8ImmPred:$src2,
s8ExtPred:$src2,
(i32 IntRegs:$src3))))]>;
def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2,
s8Imm:$src3),
s8Ext:$src3),
"$dst = mux($src1, $src2, #$src3)",
[(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
(i32 IntRegs:$src2),
s8ImmPred:$src3)))]>;
s8ExtPred:$src3)))]>;
def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
s8Imm:$src3),
"$dst = mux($src1, #$src2, #$src3)",
[(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
s8ImmPred:$src2,
s8ExtPred:$src2,
s8ImmPred:$src3)))]>;
// Shift halfword.
@ -351,25 +355,25 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
// Conditional add.
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, s8Ext:$src3),
"if ($src1) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, s8Ext:$src3),
"if (!$src1) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, s8Ext:$src3),
"if ($src1.new) $dst = add($src2, #$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, s8Ext:$src3),
"if (!$src1.new) $dst = add($src2, #$src3)",
[]>;
@ -551,13 +555,13 @@ def TFR64_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2),
def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Ext:$src2),
"if ($src1) $dst = #$src2",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
s12Imm:$src2),
s12Ext:$src2),
"if (!$src1) $dst = #$src2",
[]>;
@ -575,13 +579,13 @@ def TFR_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
let neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cdnPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
s12Imm:$src2),
s12Ext:$src2),
"if ($src1.new) $dst = #$src2",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def TFRI_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
s12Imm:$src2),
s12Ext:$src2),
"if (!$src1.new) $dst = #$src2",
[]>;
@ -923,6 +927,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1,
/// increment operand.
///
// Load doubleword.
// Rdd=memd(Rs)"
let isPredicable = 1 in
def LDrid : LDInst<(outs DoubleRegs:$dst),
(ins MEMri:$addr),
@ -931,11 +936,11 @@ def LDrid : LDInst<(outs DoubleRegs:$dst),
let isPredicable = 1, AddedComplexity = 20 in
def LDrid_indexed : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, s11_3Imm:$offset),
(ins IntRegs:$src1, s11_3Ext:$offset),
"$dst = memd($src1+#$offset)",
[(set (i64 DoubleRegs:$dst),
(i64 (load (add (i32 IntRegs:$src1),
s11_3ImmPred:$offset))))]>;
s11_3ExtPred:$offset))))]>;
let neverHasSideEffects = 1 in
def LDrid_GP : LDInst2<(outs DoubleRegs:$dst),
@ -974,13 +979,13 @@ def LDrid_cNotPt : LDInst2<(outs DoubleRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrid_indexed_cPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3),
"if ($src1) $dst = memd($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrid_indexed_cNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3),
"if (!$src1) $dst = memd($src2+#$src3)",
[]>;
@ -1012,13 +1017,13 @@ def LDrid_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrid_indexed_cdnPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3),
"if ($src1.new) $dst = memd($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrid_indexed_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3),
"if (!$src1.new) $dst = memd($src2+#$src3)",
[]>;
@ -1037,11 +1042,11 @@ def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)),
// Indexed load byte.
let isPredicable = 1, AddedComplexity = 20 in
def LDrib_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_0Imm:$offset),
(ins IntRegs:$src1, s11_0Ext:$offset),
"$dst = memb($src1+#$offset)",
[(set (i32 IntRegs:$dst),
(i32 (sextloadi8 (add (i32 IntRegs:$src1),
s11_0ImmPred:$offset))))]>;
s11_0ExtPred:$offset))))]>;
// Indexed load byte any-extend.
let AddedComplexity = 20 in
@ -1091,13 +1096,13 @@ def LDrib_cNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrib_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if ($src1) $dst = memb($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrib_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if (!$src1) $dst = memb($src2+#$src3)",
[]>;
@ -1129,13 +1134,13 @@ def LDrib_cdnNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrib_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if ($src1.new) $dst = memb($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrib_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if (!$src1.new) $dst = memb($src2+#$src3)",
[]>;
@ -1149,11 +1154,11 @@ def LDrih : LDInst<(outs IntRegs:$dst),
let isPredicable = 1, AddedComplexity = 20 in
def LDrih_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_1Imm:$offset),
(ins IntRegs:$src1, s11_1Ext:$offset),
"$dst = memh($src1+#$offset)",
[(set (i32 IntRegs:$dst),
(i32 (sextloadi16 (add (i32 IntRegs:$src1),
s11_1ImmPred:$offset))))]>;
s11_1ExtPred:$offset))))]>;
def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)),
(i32 (LDrih ADDRriS11_1:$addr))>;
@ -1205,13 +1210,13 @@ def LDrih_cNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrih_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if ($src1) $dst = memh($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrih_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if (!$src1) $dst = memh($src2+#$src3)",
[]>;
@ -1243,13 +1248,13 @@ def LDrih_cdnNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrih_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if ($src1.new) $dst = memh($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDrih_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if (!$src1.new) $dst = memh($src2+#$src3)",
[]>;
@ -1265,11 +1270,11 @@ def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)),
let isPredicable = 1, AddedComplexity = 20 in
def LDriub_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_0Imm:$offset),
(ins IntRegs:$src1, s11_0Ext:$offset),
"$dst = memub($src1+#$offset)",
[(set (i32 IntRegs:$dst),
(i32 (zextloadi8 (add (i32 IntRegs:$src1),
s11_0ImmPred:$offset))))]>;
s11_0ExtPred:$offset))))]>;
let AddedComplexity = 20 in
def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))),
@ -1304,13 +1309,13 @@ def LDriub_cNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriub_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if ($src1) $dst = memub($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriub_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if (!$src1) $dst = memub($src2+#$src3)",
[]>;
@ -1342,13 +1347,13 @@ def LDriub_cdnNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriub_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if ($src1.new) $dst = memub($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriub_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3),
"if (!$src1.new) $dst = memub($src2+#$src3)",
[]>;
@ -1362,11 +1367,11 @@ def LDriuh : LDInst<(outs IntRegs:$dst),
// Indexed load unsigned halfword.
let isPredicable = 1, AddedComplexity = 20 in
def LDriuh_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_1Imm:$offset),
(ins IntRegs:$src1, s11_1Ext:$offset),
"$dst = memuh($src1+#$offset)",
[(set (i32 IntRegs:$dst),
(i32 (zextloadi16 (add (i32 IntRegs:$src1),
s11_1ImmPred:$offset))))]>;
s11_1ExtPred:$offset))))]>;
let neverHasSideEffects = 1 in
def LDriuh_GP : LDInst2<(outs IntRegs:$dst),
@ -1397,13 +1402,13 @@ def LDriuh_cNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriuh_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if ($src1) $dst = memuh($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriuh_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if (!$src1) $dst = memuh($src2+#$src3)",
[]>;
@ -1435,13 +1440,13 @@ def LDriuh_cdnNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriuh_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if ($src1.new) $dst = memuh($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriuh_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3),
"if (!$src1.new) $dst = memuh($src2+#$src3)",
[]>;
@ -1462,10 +1467,10 @@ def LDriw_pred : LDInst<(outs PredRegs:$dst),
// Indexed load.
let isPredicable = 1, AddedComplexity = 20 in
def LDriw_indexed : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_2Imm:$offset),
(ins IntRegs:$src1, s11_2Ext:$offset),
"$dst = memw($src1+#$offset)",
[(set IntRegs:$dst, (i32 (load (add IntRegs:$src1,
s11_2ImmPred:$offset))))]>;
s11_2ExtPred:$offset))))]>;
let neverHasSideEffects = 1 in
def LDriw_GP : LDInst2<(outs IntRegs:$dst),
@ -1504,13 +1509,13 @@ def LDriw_cNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriw_indexed_cPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3),
"if ($src1) $dst = memw($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriw_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3),
"if (!$src1) $dst = memw($src2+#$src3)",
[]>;
@ -1542,13 +1547,13 @@ def LDriw_cdnNotPt : LDInst2<(outs IntRegs:$dst),
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriw_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3),
"if ($src1.new) $dst = memw($src2+#$src3)",
[]>;
let neverHasSideEffects = 1, isPredicated = 1 in
def LDriw_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3),
"if (!$src1.new) $dst = memw($src2+#$src3)",
[]>;
@ -1583,10 +1588,10 @@ let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
//===----------------------------------------------------------------------===//
// Multiply and use lower result.
// Rd=+mpyi(Rs,#u8)
def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Ext:$src2),
"$dst =+ mpyi($src1, #$src2)",
[(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
u8ImmPred:$src2))]>;
u8ExtPred:$src2))]>;
// Rd=-mpyi(Rs,#u8)
def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2),
@ -1598,10 +1603,10 @@ def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2),
// s9 is NOT the same as m9 - but it works.. so far.
// Assembler maps to either Rd=+mpyi(Rs,#u8 or Rd=-mpyi(Rs,#u8)
// depending on the value of m9. See Arch Spec.
def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2),
def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Ext:$src2),
"$dst = mpyi($src1, #$src2)",
[(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
s9ImmPred:$src2))]>;
s9ExtPred:$src2))]>;
// Rd=mpyi(Rs,Rt)
def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
@ -1611,10 +1616,10 @@ def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
// Rx+=mpyi(Rs,#u8)
def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
(ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
"$dst += mpyi($src2, #$src3)",
[(set (i32 IntRegs:$dst),
(add (mul (i32 IntRegs:$src2), u8ImmPred:$src3),
(add (mul (i32 IntRegs:$src2), u8ExtPred:$src3),
(i32 IntRegs:$src1)))],
"$src1 = $dst">;
@ -1629,11 +1634,11 @@ def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst),
// Rx-=mpyi(Rs,#u8)
def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
(ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
(ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
"$dst -= mpyi($src2, #$src3)",
[(set (i32 IntRegs:$dst),
(sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
u8ImmPred:$src3)))],
u8ExtPred:$src3)))],
"$src1 = $dst">;
// Multiply and use upper result.
@ -1719,10 +1724,10 @@ def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
"$src1 = $dst">;
def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, s8Imm:$src3),
IntRegs:$src2, s8Ext:$src3),
"$dst += add($src2, #$src3)",
[(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
s8ImmPred:$src3),
s8_16ExtPred:$src3),
(i32 IntRegs:$src1)))],
"$src1 = $dst">;
@ -1735,11 +1740,11 @@ def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
"$src1 = $dst">;
def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
IntRegs:$src2, s8Imm:$src3),
IntRegs:$src2, s8Ext:$src3),
"$dst -= add($src2, #$src3)",
[(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
(add (i32 IntRegs:$src2),
s8ImmPred:$src3)))],
s8_16ExtPred:$src3)))],
"$src1 = $dst">;
//===----------------------------------------------------------------------===//
@ -1787,10 +1792,10 @@ def STrid : STInst<(outs),
// Indexed store double word.
let AddedComplexity = 10, isPredicable = 1 in
def STrid_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
(ins IntRegs:$src1, s11_3Ext:$src2, DoubleRegs:$src3),
"memd($src1+#$src2) = $src3",
[(store (i64 DoubleRegs:$src3),
(add (i32 IntRegs:$src1), s11_3ImmPred:$src2))]>;
(add (i32 IntRegs:$src1), s11_3ExtPred:$src2))]>;
let neverHasSideEffects = 1 in
def STrid_GP : STInst2<(outs),
@ -1837,7 +1842,7 @@ def STrid_cNotPt : STInst2<(outs),
let AddedComplexity = 10, neverHasSideEffects = 1,
isPredicated = 1 in
def STrid_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3,
DoubleRegs:$src4),
"if ($src1) memd($src2+#$src3) = $src4",
[]>;
@ -1846,7 +1851,7 @@ def STrid_indexed_cPt : STInst2<(outs),
let AddedComplexity = 10, neverHasSideEffects = 1,
isPredicated = 1 in
def STrid_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
(ins PredRegs:$src1, IntRegs:$src2, u6_3Ext:$src3,
DoubleRegs:$src4),
"if (!$src1) memd($src2+#$src3) = $src4",
[]>;
@ -1883,10 +1888,10 @@ def STrib : STInst<(outs),
let AddedComplexity = 10, isPredicable = 1 in
def STrib_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3),
(ins IntRegs:$src1, s11_0Ext:$src2, IntRegs:$src3),
"memb($src1+#$src2) = $src3",
[(truncstorei8 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1),
s11_0ImmPred:$src2))]>;
s11_0ExtPred:$src2))]>;
// memb(gp+#u16:0)=Rt
let neverHasSideEffects = 1 in
@ -1934,14 +1939,14 @@ def STrib_cNotPt : STInst2<(outs),
// if (Pv) memb(Rs+#u6:0)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STrib_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3, IntRegs:$src4),
"if ($src1) memb($src2+#$src3) = $src4",
[]>;
// if (!Pv) memb(Rs+#u6:0)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STrib_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_0Ext:$src3, IntRegs:$src4),
"if (!$src1) memb($src2+#$src3) = $src4",
[]>;
@ -1972,10 +1977,10 @@ def STrih : STInst<(outs),
let AddedComplexity = 10, isPredicable = 1 in
def STrih_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3),
(ins IntRegs:$src1, s11_1Ext:$src2, IntRegs:$src3),
"memh($src1+#$src2) = $src3",
[(truncstorei16 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1),
s11_1ImmPred:$src2))]>;
s11_1ExtPred:$src2))]>;
let neverHasSideEffects = 1 in
def STrih_GP : STInst2<(outs),
@ -2021,14 +2026,14 @@ def STrih_cNotPt : STInst2<(outs),
// if (Pv) memh(Rs+#u6:1)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STrih_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3, IntRegs:$src4),
"if ($src1) memh($src2+#$src3) = $src4",
[]>;
// if (!Pv) memh(Rs+#u6:1)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STrih_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_1Ext:$src3, IntRegs:$src4),
"if (!$src1) memh($src2+#$src3) = $src4",
[]>;
@ -2065,10 +2070,10 @@ def STriw : STInst<(outs),
let AddedComplexity = 10, isPredicable = 1 in
def STriw_indexed : STInst<(outs),
(ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
(ins IntRegs:$src1, s11_2Ext:$src2, IntRegs:$src3),
"memw($src1+#$src2) = $src3",
[(store (i32 IntRegs:$src3),
(add (i32 IntRegs:$src1), s11_2ImmPred:$src2))]>;
(add (i32 IntRegs:$src1), s11_2ExtPred:$src2))]>;
let neverHasSideEffects = 1 in
def STriw_GP : STInst2<(outs),
@ -2112,14 +2117,14 @@ def STriw_cNotPt : STInst2<(outs),
// if (Pv) memw(Rs+#u6:2)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STriw_indexed_cPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3, IntRegs:$src4),
"if ($src1) memw($src2+#$src3) = $src4",
[]>;
// if (!Pv) memw(Rs+#u6:2)=Rt
let neverHasSideEffects = 1, isPredicated = 1 in
def STriw_indexed_cNotPt : STInst2<(outs),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
(ins PredRegs:$src1, IntRegs:$src2, u6_2Ext:$src3, IntRegs:$src4),
"if (!$src1) memw($src2+#$src3) = $src4",
[]>;
@ -2409,12 +2414,24 @@ def LOOP0_i : CRInst<(outs), (ins brtarget:$offset, u10Imm:$src2),
[]>;
}
let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
def LOOP0_iext : CRInst<(outs), (ins brtargetExt:$offset, u10Imm:$src2),
"loop0(##$offset, #$src2)",
[]>;
}
let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2),
"loop0($offset, $src2)",
[]>;
}
let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
def LOOP0_rext : CRInst<(outs), (ins brtargetExt:$offset, IntRegs:$src2),
"loop0(##$offset, $src2)",
[]>;
}
let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1,
Defs = [PC, LC0], Uses = [SA0, LC0] in {
def ENDLOOP0 : Marker<(outs), (ins brtarget:$offset),
@ -3150,8 +3167,8 @@ def : Pat<(i64 (anyext (i32 IntRegs:$src1))),
// Map cmple -> cmpgt.
// rs <= rt -> !(rs > rt).
def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)),
(i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>;
def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)),
(i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ExtPred:$src2)))>;
// rs <= rt -> !(rs > rt).
def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
@ -3164,8 +3181,8 @@ def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
// Map cmpne -> cmpeq.
// Hexagon_TODO: We should improve on this.
// rs != rt -> !(rs == rt).
def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
(i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>;
def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)),
(i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ExtPred:$src2))))>;
// Map cmpne(Rs) -> !cmpeqe(Rs).
// rs != rt -> !(rs == rt).
@ -3187,8 +3204,8 @@ def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
(i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>;
def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)),
(i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>;
def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ExtPred:$src2)),
(i1 (CMPGEri (i32 IntRegs:$src1), s8ExtPred:$src2))>;
// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
// rss >= rtt -> !(rtt > rss).
@ -3198,8 +3215,8 @@ def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
// rs < rt -> !(rs >= rt).
def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
(i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>;
def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)),
(i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ExtPred:$src2)))>;
// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs).
// rs < rt -> rt > rs.
@ -3224,12 +3241,12 @@ def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
(i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
// Generate cmpgeu(Rs, #u8)
def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)),
(i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>;
def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ExtPred:$src2)),
(i1 (CMPGEUri (i32 IntRegs:$src1), u8ExtPred:$src2))>;
// Generate cmpgtu(Rs, #u9)
def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)),
(i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>;
def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)),
(i1 (CMPGTUri (i32 IntRegs:$src1), u9ExtPred:$src2))>;
// Map from Rs >= Rt -> !(Rt > Rs).
// rs >= rt -> !(rt > rs).

File diff suppressed because it is too large Load Diff

View File

@ -27,20 +27,20 @@ def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
// For double precision, use CONST64_float_real, as 64bit transfer
// can only hold 40-bit values - 32 from const ext + 8 bit immediate.
let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1),
def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1),
"$dst = ##$src1",
[(set IntRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2),
(ins PredRegs:$src1, f32Ext:$src2),
"if ($src1) $dst = ##$src2",
[]>,
Requires<[HasV5T]>;
let isPredicated = 1 in
def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
(ins PredRegs:$src1, f32imm:$src2),
(ins PredRegs:$src1, f32Ext:$src2),
"if (!$src1) $dst = ##$src2",
[]>,
Requires<[HasV5T]>;
@ -67,10 +67,10 @@ def LDrid_f : LDInst<(outs DoubleRegs:$dst),
let AddedComplexity = 20 in
def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst),
(ins IntRegs:$src1, s11_3Imm:$offset),
(ins IntRegs:$src1, s11_3Ext:$offset),
"$dst = memd($src1+#$offset)",
[(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1,
s11_3ImmPred:$offset))))]>,
s11_3ExtPred:$offset))))]>,
Requires<[HasV5T]>;
def LDriw_f : LDInst<(outs IntRegs:$dst),
@ -81,10 +81,10 @@ def LDriw_f : LDInst<(outs IntRegs:$dst),
let AddedComplexity = 20 in
def LDriw_indexed_f : LDInst<(outs IntRegs:$dst),
(ins IntRegs:$src1, s11_2Imm:$offset),
(ins IntRegs:$src1, s11_2Ext:$offset),
"$dst = memw($src1+#$offset)",
[(set IntRegs:$dst, (f32 (load (add IntRegs:$src1,
s11_2ImmPred:$offset))))]>,
s11_2ExtPred:$offset))))]>,
Requires<[HasV5T]>;
// Store.
@ -96,10 +96,10 @@ def STriw_f : STInst<(outs),
let AddedComplexity = 10 in
def STriw_indexed_f : STInst<(outs),
(ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
(ins IntRegs:$src1, s11_2Ext:$src2, IntRegs:$src3),
"memw($src1+#$src2) = $src3",
[(store (f32 IntRegs:$src3),
(add IntRegs:$src1, s11_2ImmPred:$src2))]>,
(add IntRegs:$src1, s11_2ExtPred:$src2))]>,
Requires<[HasV5T]>;
def STrid_f : STInst<(outs),
@ -111,10 +111,10 @@ def STrid_f : STInst<(outs),
// Indexed store double word.
let AddedComplexity = 10 in
def STrid_indexed_f : STInst<(outs),
(ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
(ins IntRegs:$src1, s11_3Ext:$src2, DoubleRegs:$src3),
"memd($src1+#$src2) = $src3",
[(store (f64 DoubleRegs:$src3),
(add IntRegs:$src1, s11_3ImmPred:$src2))]>,
(add IntRegs:$src1, s11_3ExtPred:$src2))]>,
Requires<[HasV5T]>;

View File

@ -0,0 +1,261 @@
//===---- HexagonOptimizeConstExt.cpp - Optimize Constant Extender Use ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass traverses through all the basic blocks in a functions and replaces
// constant extended instruction with their register equivalent if the same
// constant is being used by more than two instructions.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "xfer"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "HexagonTargetMachine.h"
#include "HexagonConstExtInfo.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/Support/CommandLine.h"
#define DEBUG_TYPE "xfer"
using namespace llvm;
namespace {
class HexagonOptimizeConstExt : public MachineFunctionPass {
HexagonTargetMachine& QTM;
const HexagonSubtarget &QST;
public:
static char ID;
HexagonOptimizeConstExt(HexagonTargetMachine& TM)
: MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
const char *getPassName() const {
return "Remove sub-optimal uses of constant extenders";
}
void getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
}
bool runOnMachineFunction(MachineFunction &Fn);
void removeConstExtFromMI (const HexagonInstrInfo *TII, MachineInstr* oldMI,
unsigned DestReg);
};
char HexagonOptimizeConstExt::ID = 0;
// Remove constant extended instructions with the corresponding non-extended
// instruction.
void HexagonOptimizeConstExt::removeConstExtFromMI (const HexagonInstrInfo *TII,
MachineInstr* oldMI,
unsigned DestReg) {
assert(HexagonConstExt::NonExtEquivalentExists(oldMI->getOpcode()) &&
"Non-extended equivalent instruction doesn't exist");
MachineBasicBlock *MBB = oldMI->getParent ();
int oldOpCode = oldMI->getOpcode();
unsigned short CExtOpNum = HexagonConstExt::getCExtOpNum(oldOpCode);
unsigned numOperands = oldMI->getNumOperands();
MachineInstrBuilder MIB = BuildMI(*MBB, oldMI, oldMI->getDebugLoc(),
TII->get(HexagonConstExt::getNonExtOpcode(oldMI->getOpcode())));
for (unsigned i = 0; i < numOperands; ++i) {
if (i == CExtOpNum) {
MIB.addReg(DestReg);
if (oldMI->getDesc().mayLoad()) {
// As of now, only absolute addressing mode instructions can load from
// global addresses. Other addressing modes allow only constant
// literals. Load with absolute addressing mode gets replaced with the
// corresponding base+offset load.
if (oldMI->getOperand(i).isGlobal()) {
MIB.addImm(oldMI->getOperand(i).getOffset());
}
else
MIB.addImm(0);
}
else if (oldMI->getDesc().mayStore()){
if (oldMI->getOperand(i).isGlobal()) {
// If stored value is a global address and is extended, it is required
// to have 0 offset.
if (CExtOpNum == (numOperands-1))
assert((oldMI->getOperand(i).getOffset()==0) && "Invalid Offset");
else
MIB.addImm(oldMI->getOperand(i).getOffset());
}
else if (CExtOpNum != (numOperands-1))
MIB.addImm(0);
}
}
else {
const MachineOperand &op = oldMI->getOperand(i);
MIB.addOperand(op);
}
}
DEBUG(dbgs () << "Removing old instr: " << *oldMI << "\n");
DEBUG(dbgs() << "New instr: " << (*MIB) << "\n");
oldMI->eraseFromParent();
}
// Returns false for the following instructions, since it may not be profitable
// to convert these instructions into a non-extended instruction if the offset
// is non-zero.
static bool canHaveAnyOffset(MachineInstr* MI) {
switch (MI->getOpcode()) {
case Hexagon::STriw_offset_ext_V4:
case Hexagon::STrih_offset_ext_V4:
return false;
default:
return true;
}
}
bool HexagonOptimizeConstExt::runOnMachineFunction(MachineFunction &Fn) {
const HexagonInstrInfo *TII = QTM.getInstrInfo();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
// CExtMap maintains a list of instructions for each constant extended value.
// It also keeps a flag for the value to indicate if it's a global address
// or a constant literal.
StringMap<std::pair<SmallVector<MachineInstr*, 8>, bool > > CExtMap;
// Loop over all the basic blocks
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
MBBb != MBBe; ++MBBb) {
MachineBasicBlock* MBB = MBBb;
// Traverse the basic block and update a map of (ImmValue->MI)
MachineBasicBlock::iterator MII = MBB->begin();
MachineBasicBlock::iterator MIE = MBB->end ();
while (MII != MIE) {
MachineInstr *MI = MII;
// Check if the instruction has any constant extended operand and also has
// a non-extended equivalent.
if (TII->isConstExtended(MI) &&
HexagonConstExt::NonExtEquivalentExists(MI->getOpcode())) {
short ExtOpNum = HexagonConstExt::getCExtOpNum(MI->getOpcode());
SmallString<256> TmpData;
if (MI->getOperand(ExtOpNum).isImm()) {
DEBUG(dbgs() << "Selected for replacement : " << *MI << "\n");
int ImmValue = MI->getOperand(ExtOpNum).getImm();
StringRef ExtValue = Twine(ImmValue).toStringRef(TmpData);
CExtMap[ExtValue].first.push_back(MI);
CExtMap[ExtValue].second = false;
}
else if (MI->getOperand(ExtOpNum).isGlobal()) {
StringRef ExtValue = MI->getOperand(ExtOpNum).getGlobal()->getName();
// If stored value is constant extended and has an offset, it's not
// profitable to replace these instructions with the non-extended
// version.
if (MI->getOperand(ExtOpNum).getOffset() == 0
|| canHaveAnyOffset(MI)) {
DEBUG(dbgs() << "Selected for replacement : " << *MI << "\n");
CExtMap[ExtValue].first.push_back(MI);
CExtMap[ExtValue].second = true;
}
}
}
++MII;
} // While ends
}
enum OpType {imm, GlobalAddr};
// Process the constants that have been extended.
for (StringMap<std::pair<SmallVector<MachineInstr*, 8>, bool> >::iterator II=
CExtMap.begin(), IE = CExtMap.end(); II != IE; ++II) {
SmallVector<MachineInstr*, 8> &MIList = (*II).second.first;
// Replace the constant extended instructions with the non-extended
// equivalent if more than 2 instructions extend the same constant value.
if (MIList.size() <= 2)
continue;
bool ExtOpType = (*II).second.second;
StringRef ExtValue = (*II).getKeyData();
const GlobalValue *GV = NULL;
unsigned char TargetFlags=0;
int ExtOpNum = HexagonConstExt::getCExtOpNum(MIList[0]->getOpcode());
SmallVector<MachineBasicBlock*, 8> MachineBlocks;
if (ExtOpType == GlobalAddr) {
GV = MIList[0]->getOperand(ExtOpNum).getGlobal();
TargetFlags = MIList[0]->getOperand(ExtOpNum).getTargetFlags();
}
// For each instruction in the list, record the block it belongs to.
for (SmallVector<MachineInstr*, 8>::iterator LB = MIList.begin(),
LE = MIList.end(); LB != LE; ++LB) {
MachineInstr *MI = (*LB);
MachineBlocks.push_back (MI->getParent());
}
MachineBasicBlock* CommDomBlock = MachineBlocks[0];
MachineBasicBlock* oldCommDomBlock = NULL;
// replaceMIs is the list of instructions to be replaced with a
// non-extended equivalent instruction.
// The idea here is that not all the instructions in the MIList will
// be replaced with a register.
SmallVector<MachineInstr*, 8> replaceMIs;
replaceMIs.push_back(MIList[0]);
for (unsigned i= 1; i < MachineBlocks.size(); ++i) {
oldCommDomBlock = CommDomBlock;
MachineBasicBlock *BB = MachineBlocks[i];
CommDomBlock = MDT.findNearestCommonDominator(&(*CommDomBlock),
&(*BB));
if (!CommDomBlock) {
CommDomBlock = oldCommDomBlock;
break;
}
replaceMIs.push_back(MIList[i]);
}
// Insert into CommDomBlock.
if (CommDomBlock) {
unsigned DestReg = TII->createVR (CommDomBlock->getParent(), MVT::i32);
MachineInstr *firstMI = CommDomBlock->getFirstNonPHI();
if (ExtOpType == imm) {
int ImmValue = 0;
ExtValue.getAsInteger(10,ImmValue);
BuildMI (*CommDomBlock, firstMI, firstMI->getDebugLoc(),
TII->get(Hexagon::TFRI), DestReg)
.addImm(ImmValue);
}
else {
BuildMI (*CommDomBlock, firstMI, firstMI->getDebugLoc(),
TII->get(Hexagon::TFRI_V4), DestReg)
.addGlobalAddress(GV, 0, TargetFlags);
}
for (unsigned i= 0; i < replaceMIs.size(); i++) {
MachineInstr *oldMI = replaceMIs[i];
removeConstExtFromMI(TII, oldMI, DestReg);
}
replaceMIs.clear();
}
}
return true;
}
}
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
FunctionPass *
llvm::createHexagonOptimizeConstExt(HexagonTargetMachine &TM) {
return new HexagonOptimizeConstExt(TM);
}

View File

@ -28,6 +28,10 @@ static cl::
opt<bool> DisableHardwareLoops(
"disable-hexagon-hwloops", cl::Hidden,
cl::desc("Disable Hardware Loops for Hexagon target"));
static cl::
opt<bool> DisableCExtOpt(
"disable-hexagon-cextopt", cl::Hidden,
cl::desc("Disable Optimization of Constant Extenders"));
/// HexagonTargetMachineModule - Note that this is used on hosts that
/// cannot link in a library unless there are references into the
@ -110,6 +114,9 @@ bool HexagonPassConfig::addInstSelector() {
bool HexagonPassConfig::addPreRegAlloc() {
if (!DisableCExtOpt) {
PM->add(createHexagonOptimizeConstExt(getHexagonTargetMachine()));
}
if (!DisableHardwareLoops) {
PM->add(createHexagonHardwareLoops());
}

View File

@ -257,7 +257,7 @@ void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) {
bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
assert(QII->isExtended(MI) &&
assert((QII->isExtended(MI) || QII->isConstExtended(MI)) &&
"Should only be called for constant extended instructions");
MachineFunction *MF = MI->getParent()->getParent();
MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT),
@ -394,6 +394,16 @@ bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
case Hexagon::POST_STbri_cdnPt_V4:
case Hexagon::POST_STbri_cNotPt:
case Hexagon::POST_STbri_cdnNotPt_V4:
case Hexagon::STrib_abs_V4:
case Hexagon::STrib_abs_cPt_V4:
case Hexagon::STrib_abs_cdnPt_V4:
case Hexagon::STrib_abs_cNotPt_V4:
case Hexagon::STrib_abs_cdnNotPt_V4:
case Hexagon::STrib_imm_abs_V4:
case Hexagon::STrib_imm_abs_cPt_V4:
case Hexagon::STrib_imm_abs_cdnPt_V4:
case Hexagon::STrib_imm_abs_cNotPt_V4:
case Hexagon::STrib_imm_abs_cdnNotPt_V4:
case Hexagon::STb_GP_cPt_V4:
case Hexagon::STb_GP_cNotPt_V4:
case Hexagon::STb_GP_cdnPt_V4:
@ -427,6 +437,16 @@ bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
case Hexagon::POST_SThri_cdnPt_V4:
case Hexagon::POST_SThri_cNotPt:
case Hexagon::POST_SThri_cdnNotPt_V4:
case Hexagon::STrih_abs_V4:
case Hexagon::STrih_abs_cPt_V4:
case Hexagon::STrih_abs_cdnPt_V4:
case Hexagon::STrih_abs_cNotPt_V4:
case Hexagon::STrih_abs_cdnNotPt_V4:
case Hexagon::STrih_imm_abs_V4:
case Hexagon::STrih_imm_abs_cPt_V4:
case Hexagon::STrih_imm_abs_cdnPt_V4:
case Hexagon::STrih_imm_abs_cNotPt_V4:
case Hexagon::STrih_imm_abs_cdnNotPt_V4:
case Hexagon::STh_GP_cPt_V4:
case Hexagon::STh_GP_cNotPt_V4:
case Hexagon::STh_GP_cdnPt_V4:
@ -460,6 +480,16 @@ bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
case Hexagon::POST_STwri_cdnPt_V4:
case Hexagon::POST_STwri_cNotPt:
case Hexagon::POST_STwri_cdnNotPt_V4:
case Hexagon::STriw_abs_V4:
case Hexagon::STriw_abs_cPt_V4:
case Hexagon::STriw_abs_cdnPt_V4:
case Hexagon::STriw_abs_cNotPt_V4:
case Hexagon::STriw_abs_cdnNotPt_V4:
case Hexagon::STriw_imm_abs_V4:
case Hexagon::STriw_imm_abs_cPt_V4:
case Hexagon::STriw_imm_abs_cdnPt_V4:
case Hexagon::STriw_imm_abs_cNotPt_V4:
case Hexagon::STriw_imm_abs_cdnNotPt_V4:
case Hexagon::STw_GP_cPt_V4:
case Hexagon::STw_GP_cNotPt_V4:
case Hexagon::STw_GP_cdnPt_V4:
@ -752,6 +782,98 @@ static int GetDotNewOp(const int opc) {
case Hexagon::POST_STwri_cdnNotPt_V4:
return Hexagon::POST_STwri_cdnNotPt_nv_V4;
// Absolute addressing mode -- global address
case Hexagon::STrib_abs_V4:
return Hexagon::STrib_abs_nv_V4;
case Hexagon::STrib_abs_cPt_V4:
return Hexagon::STrib_abs_cPt_nv_V4;
case Hexagon::STrib_abs_cdnPt_V4:
return Hexagon::STrib_abs_cdnPt_nv_V4;
case Hexagon::STrib_abs_cNotPt_V4:
return Hexagon::STrib_abs_cNotPt_nv_V4;
case Hexagon::STrib_abs_cdnNotPt_V4:
return Hexagon::STrib_abs_cdnNotPt_nv_V4;
case Hexagon::STrih_abs_V4:
return Hexagon::STrih_abs_nv_V4;
case Hexagon::STrih_abs_cPt_V4:
return Hexagon::STrih_abs_cPt_nv_V4;
case Hexagon::STrih_abs_cdnPt_V4:
return Hexagon::STrih_abs_cdnPt_nv_V4;
case Hexagon::STrih_abs_cNotPt_V4:
return Hexagon::STrih_abs_cNotPt_nv_V4;
case Hexagon::STrih_abs_cdnNotPt_V4:
return Hexagon::STrih_abs_cdnNotPt_nv_V4;
case Hexagon::STriw_abs_V4:
return Hexagon::STriw_abs_nv_V4;
case Hexagon::STriw_abs_cPt_V4:
return Hexagon::STriw_abs_cPt_nv_V4;
case Hexagon::STriw_abs_cdnPt_V4:
return Hexagon::STriw_abs_cdnPt_nv_V4;
case Hexagon::STriw_abs_cNotPt_V4:
return Hexagon::STriw_abs_cNotPt_nv_V4;
case Hexagon::STriw_abs_cdnNotPt_V4:
return Hexagon::STriw_abs_cdnNotPt_nv_V4;
// Absolute addressing mode -- immediate value
case Hexagon::STrib_imm_abs_V4:
return Hexagon::STrib_imm_abs_nv_V4;
case Hexagon::STrib_imm_abs_cPt_V4:
return Hexagon::STrib_imm_abs_cPt_nv_V4;
case Hexagon::STrib_imm_abs_cdnPt_V4:
return Hexagon::STrib_imm_abs_cdnPt_nv_V4;
case Hexagon::STrib_imm_abs_cNotPt_V4:
return Hexagon::STrib_imm_abs_cNotPt_nv_V4;
case Hexagon::STrib_imm_abs_cdnNotPt_V4:
return Hexagon::STrib_imm_abs_cdnNotPt_nv_V4;
case Hexagon::STrih_imm_abs_V4:
return Hexagon::STrih_imm_abs_nv_V4;
case Hexagon::STrih_imm_abs_cPt_V4:
return Hexagon::STrih_imm_abs_cPt_nv_V4;
case Hexagon::STrih_imm_abs_cdnPt_V4:
return Hexagon::STrih_imm_abs_cdnPt_nv_V4;
case Hexagon::STrih_imm_abs_cNotPt_V4:
return Hexagon::STrih_imm_abs_cNotPt_nv_V4;
case Hexagon::STrih_imm_abs_cdnNotPt_V4:
return Hexagon::STrih_imm_abs_cdnNotPt_nv_V4;
case Hexagon::STriw_imm_abs_V4:
return Hexagon::STriw_imm_abs_nv_V4;
case Hexagon::STriw_imm_abs_cPt_V4:
return Hexagon::STriw_imm_abs_cPt_nv_V4;
case Hexagon::STriw_imm_abs_cdnPt_V4:
return Hexagon::STriw_imm_abs_cdnPt_nv_V4;
case Hexagon::STriw_imm_abs_cNotPt_V4:
return Hexagon::STriw_imm_abs_cNotPt_nv_V4;
case Hexagon::STriw_imm_abs_cdnNotPt_V4:
return Hexagon::STriw_imm_abs_cdnNotPt_nv_V4;
case Hexagon::STw_GP_cPt_V4:
return Hexagon::STw_GP_cPt_nv_V4;
@ -1404,6 +1526,103 @@ static int GetDotNewPredOp(const int opc) {
return Hexagon::ZXTH_cdnPt_V4;
case Hexagon::ZXTH_cNotPt_V4 :
return Hexagon::ZXTH_cdnNotPt_V4;
// Load Absolute Addressing.
case Hexagon::LDrib_abs_cPt_V4 :
return Hexagon::LDrib_abs_cdnPt_V4;
case Hexagon::LDrib_abs_cNotPt_V4 :
return Hexagon::LDrib_abs_cdnNotPt_V4;
case Hexagon::LDriub_abs_cPt_V4 :
return Hexagon::LDriub_abs_cdnPt_V4;
case Hexagon::LDriub_abs_cNotPt_V4 :
return Hexagon::LDriub_abs_cdnNotPt_V4;
case Hexagon::LDrih_abs_cPt_V4 :
return Hexagon::LDrih_abs_cdnPt_V4;
case Hexagon::LDrih_abs_cNotPt_V4 :
return Hexagon::LDrih_abs_cdnNotPt_V4;
case Hexagon::LDriuh_abs_cPt_V4 :
return Hexagon::LDriuh_abs_cdnPt_V4;
case Hexagon::LDriuh_abs_cNotPt_V4 :
return Hexagon::LDriuh_abs_cdnNotPt_V4;
case Hexagon::LDriw_abs_cPt_V4 :
return Hexagon::LDriw_abs_cdnPt_V4;
case Hexagon::LDriw_abs_cNotPt_V4 :
return Hexagon::LDriw_abs_cdnNotPt_V4;
case Hexagon::LDrid_abs_cPt_V4 :
return Hexagon::LDrid_abs_cdnPt_V4;
case Hexagon::LDrid_abs_cNotPt_V4 :
return Hexagon::LDrid_abs_cdnNotPt_V4;
case Hexagon::LDrib_imm_abs_cPt_V4:
return Hexagon::LDrib_imm_abs_cdnPt_V4;
case Hexagon::LDrib_imm_abs_cNotPt_V4:
return Hexagon::LDrib_imm_abs_cdnNotPt_V4;
case Hexagon::LDriub_imm_abs_cPt_V4:
return Hexagon::LDriub_imm_abs_cdnPt_V4;
case Hexagon::LDriub_imm_abs_cNotPt_V4:
return Hexagon::LDriub_imm_abs_cdnNotPt_V4;
case Hexagon::LDrih_imm_abs_cPt_V4:
return Hexagon::LDrih_imm_abs_cdnPt_V4;
case Hexagon::LDrih_imm_abs_cNotPt_V4:
return Hexagon::LDrih_imm_abs_cdnNotPt_V4;
case Hexagon::LDriuh_imm_abs_cPt_V4:
return Hexagon::LDriuh_imm_abs_cdnPt_V4;
case Hexagon::LDriuh_imm_abs_cNotPt_V4:
return Hexagon::LDriuh_imm_abs_cdnNotPt_V4;
case Hexagon::LDriw_imm_abs_cPt_V4:
return Hexagon::LDriw_imm_abs_cdnPt_V4;
case Hexagon::LDriw_imm_abs_cNotPt_V4:
return Hexagon::LDriw_imm_abs_cdnNotPt_V4;
// Store Absolute Addressing.
case Hexagon::STrib_abs_cPt_V4 :
return Hexagon::STrib_abs_cdnPt_V4;
case Hexagon::STrib_abs_cNotPt_V4 :
return Hexagon::STrib_abs_cdnNotPt_V4;
case Hexagon::STrih_abs_cPt_V4 :
return Hexagon::STrih_abs_cdnPt_V4;
case Hexagon::STrih_abs_cNotPt_V4 :
return Hexagon::STrih_abs_cdnNotPt_V4;
case Hexagon::STriw_abs_cPt_V4 :
return Hexagon::STriw_abs_cdnPt_V4;
case Hexagon::STriw_abs_cNotPt_V4 :
return Hexagon::STriw_abs_cdnNotPt_V4;
case Hexagon::STrid_abs_cPt_V4 :
return Hexagon::STrid_abs_cdnPt_V4;
case Hexagon::STrid_abs_cNotPt_V4 :
return Hexagon::STrid_abs_cdnNotPt_V4;
case Hexagon::STrib_imm_abs_cPt_V4:
return Hexagon::STrib_imm_abs_cdnPt_V4;
case Hexagon::STrib_imm_abs_cNotPt_V4:
return Hexagon::STrib_imm_abs_cdnNotPt_V4;
case Hexagon::STrih_imm_abs_cPt_V4:
return Hexagon::STrih_imm_abs_cdnPt_V4;
case Hexagon::STrih_imm_abs_cNotPt_V4:
return Hexagon::STrih_imm_abs_cdnNotPt_V4;
case Hexagon::STriw_imm_abs_cPt_V4:
return Hexagon::STriw_imm_abs_cdnPt_V4;
case Hexagon::STriw_imm_abs_cNotPt_V4:
return Hexagon::STriw_imm_abs_cdnNotPt_V4;
case Hexagon::TFRI_cPt_V4:
return Hexagon::TFRI_cdnPt_V4;
case Hexagon::TFRI_cNotPt_V4:
return Hexagon::TFRI_cdnNotPt_V4;
}
}
@ -1431,7 +1650,6 @@ bool HexagonPacketizerList::isCondInst (MachineInstr* MI) {
return false;
}
// Promote an instructiont to its .new form.
// At this time, we have already made a call to CanPromoteToDotNew
// and made sure that it can *indeed* be promoted.
@ -2140,6 +2358,159 @@ static int GetDotOldOp(const int opc) {
case Hexagon::POST_STdri_cdnNotPt_V4 :
return Hexagon::POST_STdri_cNotPt;
// Absolute addressing mode - global address
case Hexagon::STrib_abs_nv_V4:
return Hexagon::STrib_abs_V4;
case Hexagon::STrib_abs_cdnPt_V4:
case Hexagon::STrib_abs_cPt_nv_V4:
case Hexagon::STrib_abs_cdnPt_nv_V4:
return Hexagon::STrib_abs_cPt_V4;
case Hexagon::STrib_abs_cdnNotPt_V4:
case Hexagon::STrib_abs_cNotPt_nv_V4:
case Hexagon::STrib_abs_cdnNotPt_nv_V4:
return Hexagon::STrib_abs_cNotPt_V4;
case Hexagon::STrih_abs_nv_V4:
return Hexagon::STrih_abs_V4;
case Hexagon::STrih_abs_cdnPt_V4:
case Hexagon::STrih_abs_cPt_nv_V4:
case Hexagon::STrih_abs_cdnPt_nv_V4:
return Hexagon::STrih_abs_cPt_V4;
case Hexagon::STrih_abs_cdnNotPt_V4:
case Hexagon::STrih_abs_cNotPt_nv_V4:
case Hexagon::STrih_abs_cdnNotPt_nv_V4:
return Hexagon::STrih_abs_cNotPt_V4;
case Hexagon::STriw_abs_nv_V4:
return Hexagon::STriw_abs_V4;
case Hexagon::STriw_abs_cdnPt_V4:
case Hexagon::STriw_abs_cPt_nv_V4:
case Hexagon::STriw_abs_cdnPt_nv_V4:
return Hexagon::STriw_abs_cPt_V4;
case Hexagon::STriw_abs_cdnNotPt_V4:
case Hexagon::STriw_abs_cNotPt_nv_V4:
case Hexagon::STriw_abs_cdnNotPt_nv_V4:
return Hexagon::STriw_abs_cNotPt_V4;
case Hexagon::STrid_abs_cdnPt_V4:
return Hexagon::STrid_abs_cPt_V4;
case Hexagon::STrid_abs_cdnNotPt_V4:
return Hexagon::STrid_abs_cNotPt_V4;
// Absolute addressing mode - immediate values
case Hexagon::STrib_imm_abs_nv_V4:
return Hexagon::STrib_imm_abs_V4;
case Hexagon::STrib_imm_abs_cdnPt_V4:
case Hexagon::STrib_imm_abs_cPt_nv_V4:
case Hexagon::STrib_imm_abs_cdnPt_nv_V4:
return Hexagon::STrib_imm_abs_cPt_V4;
case Hexagon::STrib_imm_abs_cdnNotPt_V4:
case Hexagon::STrib_imm_abs_cNotPt_nv_V4:
case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4:
return Hexagon::STrib_imm_abs_cNotPt_V4;
case Hexagon::STrih_imm_abs_nv_V4:
return Hexagon::STrih_imm_abs_V4;
case Hexagon::STrih_imm_abs_cdnPt_V4:
case Hexagon::STrih_imm_abs_cPt_nv_V4:
case Hexagon::STrih_imm_abs_cdnPt_nv_V4:
return Hexagon::STrih_imm_abs_cPt_V4;
case Hexagon::STrih_imm_abs_cdnNotPt_V4:
case Hexagon::STrih_imm_abs_cNotPt_nv_V4:
case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4:
return Hexagon::STrih_imm_abs_cNotPt_V4;
case Hexagon::STriw_imm_abs_nv_V4:
return Hexagon::STriw_imm_abs_V4;
case Hexagon::STriw_imm_abs_cdnPt_V4:
case Hexagon::STriw_imm_abs_cPt_nv_V4:
case Hexagon::STriw_imm_abs_cdnPt_nv_V4:
return Hexagon::STriw_imm_abs_cPt_V4;
case Hexagon::STriw_imm_abs_cdnNotPt_V4:
case Hexagon::STriw_imm_abs_cNotPt_nv_V4:
case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4:
return Hexagon::STriw_imm_abs_cNotPt_V4;
// Load - absolute set addressing
case Hexagon::LDrib_abs_cdnPt_V4:
return Hexagon::LDrib_abs_cPt_V4;
case Hexagon::LDrib_abs_cdnNotPt_V4:
return Hexagon::LDrib_abs_cNotPt_V4;
case Hexagon::LDriub_abs_cdnPt_V4:
return Hexagon::LDriub_abs_cPt_V4;
case Hexagon::LDriub_abs_cdnNotPt_V4:
return Hexagon::LDriub_abs_cNotPt_V4;
case Hexagon::LDrih_abs_cdnPt_V4:
return Hexagon::LDrih_abs_cPt_V4;
case Hexagon::LDrih_abs_cdnNotPt_V4:
return Hexagon::LDrih_abs_cNotPt_V4;
case Hexagon::LDriuh_abs_cdnPt_V4:
return Hexagon::LDriuh_abs_cPt_V4;
case Hexagon::LDriuh_abs_cdnNotPt_V4:
return Hexagon::LDriuh_abs_cNotPt_V4;
case Hexagon::LDriw_abs_cdnPt_V4:
return Hexagon::LDriw_abs_cPt_V4;
case Hexagon::LDriw_abs_cdnNotPt_V4:
return Hexagon::LDriw_abs_cNotPt_V4;
case Hexagon::LDrid_abs_cdnPt_V4:
return Hexagon::LDrid_abs_cPt_V4;
case Hexagon::LDrid_abs_cdnNotPt_V4:
return Hexagon::LDrid_abs_cNotPt_V4;
case Hexagon::LDrib_imm_abs_cdnPt_V4:
return Hexagon::LDrib_imm_abs_cPt_V4;
case Hexagon::LDrib_imm_abs_cdnNotPt_V4:
return Hexagon::LDrib_imm_abs_cNotPt_V4;
case Hexagon::LDriub_imm_abs_cdnPt_V4:
return Hexagon::LDriub_imm_abs_cPt_V4;
case Hexagon::LDriub_imm_abs_cdnNotPt_V4:
return Hexagon::LDriub_imm_abs_cNotPt_V4;
case Hexagon::LDrih_imm_abs_cdnPt_V4:
return Hexagon::LDrih_imm_abs_cPt_V4;
case Hexagon::LDrih_imm_abs_cdnNotPt_V4:
return Hexagon::LDrih_imm_abs_cNotPt_V4;
case Hexagon::LDriuh_imm_abs_cdnPt_V4:
return Hexagon::LDriuh_imm_abs_cPt_V4;
case Hexagon::LDriuh_imm_abs_cdnNotPt_V4:
return Hexagon::LDriuh_imm_abs_cNotPt_V4;
case Hexagon::LDriw_imm_abs_cdnPt_V4:
return Hexagon::LDriw_imm_abs_cPt_V4;
case Hexagon::LDriw_imm_abs_cdnNotPt_V4:
return Hexagon::LDriw_imm_abs_cNotPt_V4;
case Hexagon::STd_GP_cdnPt_V4 :
return Hexagon::STd_GP_cPt_V4;
@ -2298,6 +2669,46 @@ static bool GetPredicateSense(MachineInstr* MI,
case Hexagon::ZXTB_cdnPt_V4 :
case Hexagon::ZXTH_cPt_V4 :
case Hexagon::ZXTH_cdnPt_V4 :
case Hexagon::LDrib_abs_cPt_V4 :
case Hexagon::LDrib_abs_cdnPt_V4:
case Hexagon::LDriub_abs_cPt_V4 :
case Hexagon::LDriub_abs_cdnPt_V4:
case Hexagon::LDrih_abs_cPt_V4 :
case Hexagon::LDrih_abs_cdnPt_V4:
case Hexagon::LDriuh_abs_cPt_V4 :
case Hexagon::LDriuh_abs_cdnPt_V4:
case Hexagon::LDriw_abs_cPt_V4 :
case Hexagon::LDriw_abs_cdnPt_V4:
case Hexagon::LDrid_abs_cPt_V4 :
case Hexagon::LDrid_abs_cdnPt_V4:
case Hexagon::LDrib_imm_abs_cPt_V4 :
case Hexagon::LDrib_imm_abs_cdnPt_V4:
case Hexagon::LDriub_imm_abs_cPt_V4 :
case Hexagon::LDriub_imm_abs_cdnPt_V4:
case Hexagon::LDrih_imm_abs_cPt_V4 :
case Hexagon::LDrih_imm_abs_cdnPt_V4:
case Hexagon::LDriuh_imm_abs_cPt_V4 :
case Hexagon::LDriuh_imm_abs_cdnPt_V4:
case Hexagon::LDriw_imm_abs_cPt_V4 :
case Hexagon::LDriw_imm_abs_cdnPt_V4:
case Hexagon::STrib_abs_cPt_V4:
case Hexagon::STrib_abs_cdnPt_V4:
case Hexagon::STrih_abs_cPt_V4:
case Hexagon::STrih_abs_cdnPt_V4:
case Hexagon::STriw_abs_cPt_V4:
case Hexagon::STriw_abs_cdnPt_V4:
case Hexagon::STrid_abs_cPt_V4:
case Hexagon::STrid_abs_cdnPt_V4:
case Hexagon::STrib_imm_abs_cPt_V4:
case Hexagon::STrib_imm_abs_cdnPt_V4:
case Hexagon::STrih_imm_abs_cPt_V4:
case Hexagon::STrih_imm_abs_cdnPt_V4:
case Hexagon::STriw_imm_abs_cPt_V4:
case Hexagon::STriw_imm_abs_cdnPt_V4:
case Hexagon::LDrid_GP_cPt_V4 :
case Hexagon::LDrib_GP_cPt_V4 :
case Hexagon::LDriub_GP_cPt_V4 :
@ -2470,6 +2881,45 @@ static bool GetPredicateSense(MachineInstr* MI,
case Hexagon::ZXTH_cNotPt_V4 :
case Hexagon::ZXTH_cdnNotPt_V4 :
case Hexagon::LDrib_abs_cNotPt_V4:
case Hexagon::LDrib_abs_cdnNotPt_V4:
case Hexagon::LDriub_abs_cNotPt_V4 :
case Hexagon::LDriub_abs_cdnNotPt_V4:
case Hexagon::LDrih_abs_cNotPt_V4 :
case Hexagon::LDrih_abs_cdnNotPt_V4:
case Hexagon::LDriuh_abs_cNotPt_V4 :
case Hexagon::LDriuh_abs_cdnNotPt_V4:
case Hexagon::LDriw_abs_cNotPt_V4 :
case Hexagon::LDriw_abs_cdnNotPt_V4:
case Hexagon::LDrid_abs_cNotPt_V4 :
case Hexagon::LDrid_abs_cdnNotPt_V4:
case Hexagon::LDrib_imm_abs_cNotPt_V4:
case Hexagon::LDrib_imm_abs_cdnNotPt_V4:
case Hexagon::LDriub_imm_abs_cNotPt_V4 :
case Hexagon::LDriub_imm_abs_cdnNotPt_V4:
case Hexagon::LDrih_imm_abs_cNotPt_V4 :
case Hexagon::LDrih_imm_abs_cdnNotPt_V4:
case Hexagon::LDriuh_imm_abs_cNotPt_V4 :
case Hexagon::LDriuh_imm_abs_cdnNotPt_V4:
case Hexagon::LDriw_imm_abs_cNotPt_V4 :
case Hexagon::LDriw_imm_abs_cdnNotPt_V4:
case Hexagon::STrib_abs_cNotPt_V4:
case Hexagon::STrib_abs_cdnNotPt_V4:
case Hexagon::STrih_abs_cNotPt_V4:
case Hexagon::STrih_abs_cdnNotPt_V4:
case Hexagon::STriw_abs_cNotPt_V4:
case Hexagon::STriw_abs_cdnNotPt_V4:
case Hexagon::STrid_abs_cNotPt_V4:
case Hexagon::STrid_abs_cdnNotPt_V4:
case Hexagon::STrib_imm_abs_cNotPt_V4:
case Hexagon::STrib_imm_abs_cdnNotPt_V4:
case Hexagon::STrih_imm_abs_cNotPt_V4:
case Hexagon::STrih_imm_abs_cdnNotPt_V4:
case Hexagon::STriw_imm_abs_cNotPt_V4:
case Hexagon::STriw_imm_abs_cdnNotPt_V4:
case Hexagon::LDrid_GP_cNotPt_V4 :
case Hexagon::LDrib_GP_cNotPt_V4 :
case Hexagon::LDriub_GP_cNotPt_V4 :
@ -3503,6 +3953,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
&& QRI->Subtarget.hasV4TOps()
&& J->getOpcode() == Hexagon::ALLOCFRAME
&& (I->getOpcode() == Hexagon::STrid
|| I->getOpcode() == Hexagon::STriw_indexed
|| I->getOpcode() == Hexagon::STriw
|| I->getOpcode() == Hexagon::STrib)
&& I->getOperand(0).getReg() == QRI->getStackRegister()
@ -3580,7 +4031,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
MachineInstr *nvjMI = MII;
assert(ResourceTracker->canReserveResources(MI));
ResourceTracker->reserveResources(MI);
if (QII->isExtended(MI) &&
if ((QII->isExtended(MI) || QII->isConstExtended(MI)) &&
!tryAllocateResourcesForConstExt(MI)) {
endPacket(MBB, MI);
ResourceTracker->reserveResources(MI);
@ -3616,7 +4067,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
CurrentPacketMIs.push_back(MI);
CurrentPacketMIs.push_back(nvjMI);
} else {
if ( QII->isExtended(MI)
if ( (QII->isExtended(MI) || QII->isConstExtended(MI))
&& ( !tryAllocateResourcesForConstExt(MI)
|| !ResourceTracker->canReserveResources(MI)))
{

View File

@ -13,6 +13,7 @@
#define DEBUG_TYPE "asm-printer"
#include "Hexagon.h"
#include "HexagonConstExtInfo.h"
#include "HexagonAsmPrinter.h"
#include "HexagonInstPrinter.h"
#include "HexagonMCInst.h"
@ -107,7 +108,10 @@ void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo,
void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
if (isConstExtended(MI))
O << "#" << MI->getOperand(OpNo).getImm();
else
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI,
@ -117,7 +121,7 @@ void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI,
void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
O << -MI->getOperand(OpNo).getImm();
O << -MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo,
@ -131,7 +135,10 @@ void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
const MCOperand& MO1 = MI->getOperand(OpNo + 1);
O << getRegisterName(MO0.getReg());
O << " + #" << MO1.getImm();
if (isConstExtended(MI))
O << " + ##" << MO1.getImm();
else
O << " + #" << MO1.getImm();
}
void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
@ -196,3 +203,17 @@ void HexagonInstPrinter::printSymbol(const MCInst *MI, unsigned OpNo,
}
O << ')';
}
bool HexagonInstPrinter::isConstExtended(const MCInst *MI) const{
unsigned short Opcode = MI->getOpcode();
short ExtOpNum = HexagonConstExt::getCExtOpNum(Opcode);
int MinValue = HexagonConstExt::getMinValue(Opcode);
int MaxValue = HexagonConstExt::getMaxValue(Opcode);
// Instruction has no constant extended operand
if (ExtOpNum == -1)
return false;
int ImmValue = MI->getOperand(ExtOpNum).getImm();
return (ImmValue < MinValue || ImmValue > MaxValue);
}

View File

@ -0,0 +1,70 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; Make sure that constant extended instructions are generated.
; Check if add and add-sub instructions are extended.
define i32 @test1(i32 %b, i32* nocapture %c) nounwind {
entry:
%0 = load i32* %c, align 4
%add1 = add nsw i32 %0, 44400
; CHECK: add(r{{[0-9]+}}{{ *}},{{ *}}##44400)
%add = add i32 %b, 33000
%sub = sub i32 %add, %0
; CHECK: add(r{{[0-9]+}},{{ *}}sub(##33000,{{ *}}r{{[0-9]+}})
%add2 = add nsw i32 %add1, %0
store i32 %add1, i32* %c, align 4
%mul = mul nsw i32 %add2, %sub
ret i32 %mul
}
; Check if load and store instructions are extended.
define i32 @test2(i32* nocapture %b, i32 %c) nounwind {
entry:
%arrayidx = getelementptr inbounds i32* %b, i32 7000
%0 = load i32* %arrayidx, align 4
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##28000)
%sub = sub nsw i32 8000, %0
; CHECK: sub(##8000{{ *}},{{ *}}r{{[0-9]+}})
%cmp = icmp sgt i32 %sub, 10
br i1 %cmp, label %if.then, label %if.else
if.then:
%add = add nsw i32 %sub, %c
br label %return
if.else:
%arrayidx1 = getelementptr inbounds i32* %b, i32 6000
store i32 %sub, i32* %arrayidx1, align 4
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}##24000){{ *}}={{ *}}r{{[0-9]+}}
br label %return
return:
%retval.0 = phi i32 [ %add, %if.then ], [ 0, %if.else ]
ret i32 %retval.0
}
; Check if the transfer, compare and mpyi instructions are extended.
define i32 @test3() nounwind {
entry:
%call = tail call i32 @b(i32 1235, i32 34567) nounwind
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}##34567
%sext = shl i32 %call, 16
%conv1 = ashr exact i32 %sext, 16
%cmp = icmp slt i32 %sext, 65536
br i1 %cmp, label %if.then, label %if.else
; CHECK: cmp.gt(r{{[0-9]+}}{{ *}},{{ *}}##65535)
if.then:
%mul = mul nsw i32 %conv1, 34567
br label %if.end
; CHECK: r{{[0-9]+}}{{ *}}=+{{ *}}mpyi(r{{[0-9]+}}{{ *}},{{ *}}##34567)
if.else:
%mul5 = mul nsw i32 %conv1, 1235
br label %if.end
if.end:
%a.0 = phi i32 [ %mul, %if.then ], [ %mul5, %if.else ]
ret i32 %a.0
}
declare i32 @b(i32, i32)

View File

@ -6,12 +6,12 @@
; CHECK-NEXT: }
@Reg = global i32 0, align 4
define i32 @main() nounwind {
define void @foo() nounwind {
entry:
%number= alloca i32, align 4
store i32 500000, i32* %number, align 4
store i32 500, i32* %number, align 4
%number1= alloca i32, align 4
store i32 100000, i32* %number1, align 4
ret i32 0
store i32 100, i32* %number1, align 4
ret void
}