[Hexagon] Reapply r231699. Remove assumption that second operand is an immediate when checking if A2_tfrsi is combinable.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231710 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Colin LeMahieu 2015-03-09 21:48:13 +00:00
parent cd5029d001
commit ffc2de43d9
5 changed files with 22 additions and 145 deletions

View File

@ -127,12 +127,21 @@ static bool isCombinableInstType(MachineInstr *MI,
case Hexagon::A2_tfrsi: {
// A transfer-immediate can be combined if its argument is a signed 8bit
// value.
assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm());
unsigned DestReg = MI->getOperand(0).getReg();
const MachineOperand &Op0 = MI->getOperand(0);
const MachineOperand &Op1 = MI->getOperand(1);
assert(Op0.isReg());
// Only combine constant extended TFRI if we are in aggressive mode.
unsigned DestReg = Op0.getReg();
// Ensure that TargetFlags are MO_NO_FLAG for a global. This is a
// workaround for an ABI bug that prevents GOT relocations on combine
// instructions
if (!Op1.isImm() && Op1.getTargetFlags() != HexagonII::MO_NO_FLAG)
return false;
// Only combine constant extended A2_tfrsi if we are in aggressive mode.
bool NotExt = Op1.isImm() && isInt<8>(Op1.getImm());
return Hexagon::IntRegsRegClass.contains(DestReg) &&
(ShouldCombineAggressively || isInt<8>(MI->getOperand(1).getImm()));
(ShouldCombineAggressively || NotExt);
}
case Hexagon::TFRI_V4: {

View File

@ -332,73 +332,6 @@ static unsigned doesIntrinsicReturnPredicate(unsigned ID)
}
}
static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
if (MemType == MVT::i64 && isShiftedInt<11,3>(Offset)) {
return true;
}
if (MemType == MVT::i32 && isShiftedInt<11,2>(Offset)) {
return true;
}
if (MemType == MVT::i16 && isShiftedInt<11,1>(Offset)) {
return true;
}
if (MemType == MVT::i8 && isInt<11>(Offset)) {
return true;
}
return false;
}
//
// Try to lower loads of GlobalAdresses into base+offset loads. Custom
// lowering for GlobalAddress nodes has already turned it into a
// CONST32.
//
SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl) {
SDValue Chain = LD->getChain();
SDNode* Const32 = LD->getBasePtr().getNode();
unsigned Opcode = 0;
if (Const32->getOpcode() == HexagonISD::CONST32 &&
ISD::isNormalLoad(LD)) {
SDValue Base = Const32->getOperand(0);
EVT LoadedVT = LD->getMemoryVT();
int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
if (Offset != 0 && OffsetFitsS11(LoadedVT, Offset)) {
MVT PointerTy = getTargetLowering()->getPointerTy();
const GlobalValue* GV =
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy,
TargAddr);
// Figure out base + offset opcode
if (LoadedVT == MVT::i64) Opcode = Hexagon::L2_loadrd_io;
else if (LoadedVT == MVT::i32) Opcode = Hexagon::L2_loadri_io;
else if (LoadedVT == MVT::i16) Opcode = Hexagon::L2_loadrh_io;
else if (LoadedVT == MVT::i8) Opcode = Hexagon::L2_loadrb_io;
else llvm_unreachable("unknown memory type");
// Build indexed load.
SDValue TargetConstOff = CurDAG->getTargetConstant(Offset, PointerTy);
SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
LD->getValueType(0),
MVT::Other,
SDValue(NewBase,0),
TargetConstOff,
Chain);
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = LD->getMemOperand();
cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
ReplaceUses(LD, Result);
return Result;
}
}
return SelectCode(LD);
}
SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
unsigned Opcode,
@ -649,7 +582,7 @@ SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
if (AM != ISD::UNINDEXED) {
result = SelectIndexedLoad(LD, dl);
} else {
result = SelectBaseOffsetLoad(LD, dl);
result = SelectCode(LD);
}
return result;
@ -725,60 +658,6 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
}
SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
SDLoc dl) {
SDValue Chain = ST->getChain();
SDNode* Const32 = ST->getBasePtr().getNode();
SDValue Value = ST->getValue();
unsigned Opcode = 0;
// Try to lower stores of GlobalAdresses into indexed stores. Custom
// lowering for GlobalAddress nodes has already turned it into a
// CONST32. Avoid truncating stores for the moment. Post-inc stores
// do the same. Don't think there's a reason for it, so will file a
// bug to fix.
if ((Const32->getOpcode() == HexagonISD::CONST32) &&
!(Value.getValueType() == MVT::i64 && ST->isTruncatingStore())) {
SDValue Base = Const32->getOperand(0);
if (Base.getOpcode() == ISD::TargetGlobalAddress) {
EVT StoredVT = ST->getMemoryVT();
int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
if (Offset != 0 && OffsetFitsS11(StoredVT, Offset)) {
MVT PointerTy = getTargetLowering()->getPointerTy();
const GlobalValue* GV =
cast<GlobalAddressSDNode>(Base)->getGlobal();
SDValue TargAddr =
CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
dl, PointerTy,
TargAddr);
// Figure out base + offset opcode
if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_io;
else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_io;
else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_io;
else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_io;
else llvm_unreachable("unknown memory type");
SDValue Ops[] = {SDValue(NewBase,0),
CurDAG->getTargetConstant(Offset,PointerTy),
Value, Chain};
// build indexed store
SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
MVT::Other, Ops);
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = ST->getMemOperand();
cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
ReplaceUses(ST, Result);
return Result;
}
}
}
return SelectCode(ST);
}
SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
@ -789,7 +668,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
return SelectIndexedStore(ST, dl);
}
return SelectBaseOffsetStore(ST, dl);
return SelectCode(ST);
}
SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {

View File

@ -640,7 +640,7 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
switch(Opc) {
case Hexagon::A2_tfrsi:
return isInt<12>(MI->getOperand(1).getImm());
return (isOperandExtended(MI, 1) && isConstExtended(MI)) || isInt<12>(MI->getOperand(1).getImm());
case Hexagon::S2_storerd_io:
return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
@ -1646,7 +1646,7 @@ bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
// We currently only handle isGlobal() because it is the only kind of
// object we are going to end up with here for now.
// In the future we probably should add isSymbol(), etc.
if (MO.isGlobal() || MO.isSymbol())
if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress())
return true;
// If the extendable operand is not 'Immediate' type, the instruction should

View File

@ -4805,31 +4805,19 @@ def CONST32 : CONSTLDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
(load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
let isReMaterializable = 1, isMoveImm = 1 in
def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32 tglobaladdr:$global))]>;
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32_set_jt : CONSTLDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt),
"$dst = CONST32(#$jt)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32 tjumptable:$jt))]>;
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst),
(HexagonCONST32_GP tglobaladdr:$global))]>;
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32_Int_Real : CONSTLDInst<(outs IntRegs:$dst), (ins i32imm:$global),
"$dst = CONST32(#$global)",
[(set (i32 IntRegs:$dst), imm:$global) ]>;
// Map BlockAddress lowering to CONST32_Int_Real
def : Pat<(HexagonCONST32_GP tblockaddress:$addr),
(CONST32_Int_Real tblockaddress:$addr)>;
// Map TLS addressses to a CONST32 instruction
def: Pat<(HexagonCONST32 tglobaltlsaddr:$addr), (A2_tfrsi s16Ext:$addr)>;
def: Pat<(HexagonCONST32 bbl:$label), (A2_tfrsi s16Ext:$label)>;
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in
def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label),

View File

@ -1,4 +1,5 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; XFAIL:
; RUN: llc -march=hexagon < %s | FileCheck %s
; CHECK: r[[T0:[0-9]+]] = CONST32(#s2)
; CHECK: memw(r29+#0) = r{{.}}
; CHECK: memw(r29+#8) = r{{.}}