mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
86df21767a
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@176689 91177308-0d34-0410-b5e6-96231b3b80d8
3183 lines
128 KiB
TableGen
3183 lines
128 KiB
TableGen
//==- HexagonInstrInfo.td - Target Description for Hexagon -*- tablegen -*-===//
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file describes the Hexagon instructions in TableGen format.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
include "HexagonInstrFormats.td"
|
|
include "HexagonOperands.td"
|
|
|
|
// Multi-class for logical operators.
|
|
multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> {
|
|
def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
|
|
[(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$b),
|
|
(i32 IntRegs:$c)))]>;
|
|
def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")),
|
|
[(set (i32 IntRegs:$dst), (OpNode s10Imm:$b,
|
|
(i32 IntRegs:$c)))]>;
|
|
}
|
|
|
|
// Multi-class for compare ops.
|
|
let isCompare = 1 in {
|
|
multiclass CMP64_rr<string OpcStr, PatFrag OpNode> {
|
|
def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>;
|
|
}
|
|
multiclass CMP32_rr<string OpcStr, PatFrag OpNode> {
|
|
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
|
|
}
|
|
|
|
multiclass CMP32_rr_ri_s10<string OpcStr, string CextOp, PatFrag OpNode> {
|
|
let CextOpcode = CextOp in {
|
|
let InputType = "reg" in
|
|
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1,
|
|
opExtentBits = 10, InputType = "imm" in
|
|
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Ext:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i32 IntRegs:$b), s10ExtPred:$c))]>;
|
|
}
|
|
}
|
|
|
|
multiclass CMP32_rr_ri_u9<string OpcStr, string CextOp, PatFrag OpNode> {
|
|
let CextOpcode = CextOp in {
|
|
let InputType = "reg" in
|
|
def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 0,
|
|
opExtentBits = 9, InputType = "imm" in
|
|
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Ext:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
|
|
[(set (i1 PredRegs:$dst),
|
|
(OpNode (i32 IntRegs:$b), u9ExtPred:$c))]>;
|
|
}
|
|
}
|
|
|
|
multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> {
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 8 in
|
|
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Ext:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
|
|
[(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
|
|
u8ExtPred:$c))]>;
|
|
}
|
|
|
|
multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in
|
|
def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Ext:$c),
|
|
!strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
|
|
[(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b),
|
|
s8ExtPred:$c))]>;
|
|
}
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/ALU (Instructions with register-register form)
|
|
//===----------------------------------------------------------------------===//
|
|
multiclass ALU32_Pbase<string mnemonic, bit isNot,
|
|
bit isPredNew> {
|
|
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, IntRegs: $src3),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
|
|
") $dst = ")#mnemonic#"($src2, $src3)",
|
|
[]>;
|
|
}
|
|
|
|
multiclass ALU32_Pred<string mnemonic, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : ALU32_Pbase<mnemonic, PredNot, 0>;
|
|
// Predicate new
|
|
defm _cdn#NAME : ALU32_Pbase<mnemonic, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let InputType = "reg" in
|
|
multiclass ALU32_base<string mnemonic, string CextOp, SDNode OpNode> {
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp#_rr in {
|
|
let isPredicable = 1 in
|
|
def NAME : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = "#mnemonic#"($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
let neverHasSideEffects = 1, isPredicated = 1 in {
|
|
defm Pt : ALU32_Pred<mnemonic, 0>;
|
|
defm NotPt : ALU32_Pred<mnemonic, 1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
let isCommutable = 1 in {
|
|
defm ADD_rr : ALU32_base<"add", "ADD", add>, ImmRegRel, PredNewRel;
|
|
defm AND_rr : ALU32_base<"and", "AND", and>, ImmRegRel, PredNewRel;
|
|
defm XOR_rr : ALU32_base<"xor", "XOR", xor>, ImmRegRel, PredNewRel;
|
|
defm OR_rr : ALU32_base<"or", "OR", or>, ImmRegRel, PredNewRel;
|
|
}
|
|
|
|
defm SUB_rr : ALU32_base<"sub", "SUB", sub>, ImmRegRel, PredNewRel;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/ALU (ADD with register-immediate form)
|
|
//===----------------------------------------------------------------------===//
|
|
multiclass ALU32ri_Pbase<string mnemonic, bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, s8Ext: $src3),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ",
|
|
") $dst = ")#mnemonic#"($src2, #$src3)",
|
|
[]>;
|
|
}
|
|
|
|
multiclass ALU32ri_Pred<string mnemonic, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : ALU32ri_Pbase<mnemonic, PredNot, 0>;
|
|
// Predicate new
|
|
defm _cdn#NAME : ALU32ri_Pbase<mnemonic, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let isExtendable = 1, InputType = "imm" in
|
|
multiclass ALU32ri_base<string mnemonic, string CextOp, SDNode OpNode> {
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp#_ri in {
|
|
let opExtendable = 2, isExtentSigned = 1, opExtentBits = 16,
|
|
isPredicable = 1 in
|
|
def NAME : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, s16Ext:$src2),
|
|
"$dst = "#mnemonic#"($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1),
|
|
(s16ExtPred:$src2)))]>;
|
|
|
|
let opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
|
|
neverHasSideEffects = 1, isPredicated = 1 in {
|
|
defm Pt : ALU32ri_Pred<mnemonic, 0>;
|
|
defm NotPt : ALU32ri_Pred<mnemonic, 1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
defm ADD_ri : ALU32ri_base<"add", "ADD", add>, ImmRegRel, PredNewRel;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 10,
|
|
CextOpcode = "OR", InputType = "imm" in
|
|
def OR_ri : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, s10Ext:$src2),
|
|
"$dst = or($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
|
|
s10ExtPred:$src2))]>, ImmRegRel;
|
|
|
|
def NOT_rr : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1),
|
|
"$dst = not($src1)",
|
|
[(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 10,
|
|
InputType = "imm", CextOpcode = "AND" in
|
|
def AND_ri : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, s10Ext:$src2),
|
|
"$dst = and($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
|
|
s10ExtPred:$src2))]>, ImmRegRel;
|
|
// Negate.
|
|
def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = neg($src1)",
|
|
[(set (i32 IntRegs:$dst), (ineg (i32 IntRegs:$src1)))]>;
|
|
// Nop.
|
|
let neverHasSideEffects = 1 in
|
|
def NOP : ALU32_rr<(outs), (ins),
|
|
"nop",
|
|
[]>;
|
|
|
|
// Rd32=sub(#s10,Rs32)
|
|
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 10,
|
|
CextOpcode = "SUB", InputType = "imm" in
|
|
def SUB_ri : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins s10Ext:$src1, IntRegs:$src2),
|
|
"$dst = sub(#$src1, $src2)",
|
|
[(set IntRegs:$dst, (sub s10ExtPred:$src1, IntRegs:$src2))]>,
|
|
ImmRegRel;
|
|
|
|
|
|
multiclass TFR_Pred<bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
def _c#NAME : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2),
|
|
!if(PredNot, "if (!$src1", "if ($src1")#") $dst = $src2",
|
|
[]>;
|
|
// Predicate new
|
|
let PNewValue = "new" in
|
|
def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2),
|
|
!if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = $src2",
|
|
[]>;
|
|
}
|
|
}
|
|
|
|
let InputType = "reg", neverHasSideEffects = 1 in
|
|
multiclass TFR_base<string CextOp> {
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp in {
|
|
let isPredicable = 1 in
|
|
def NAME : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = $src1",
|
|
[]>;
|
|
|
|
let isPredicated = 1 in {
|
|
defm Pt : TFR_Pred<0>;
|
|
defm NotPt : TFR_Pred<1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
class T_TFR64_Pred<bit PredNot, bit isPredNew>
|
|
: ALU32_rr<(outs DoubleRegs:$dst),
|
|
(ins PredRegs:$src1, DoubleRegs:$src2),
|
|
!if(PredNot, "if (!$src1", "if ($src1")#
|
|
!if(isPredNew, ".new) ", ") ")#"$dst = $src2", []>
|
|
{
|
|
bits<5> dst;
|
|
bits<2> src1;
|
|
bits<5> src2;
|
|
|
|
let IClass = 0b1111;
|
|
let Inst{27-24} = 0b1101;
|
|
let Inst{13} = isPredNew;
|
|
let Inst{7} = PredNot;
|
|
let Inst{4-0} = dst;
|
|
let Inst{6-5} = src1;
|
|
let Inst{20-17} = src2{4-1};
|
|
let Inst{16} = 0b1;
|
|
let Inst{12-9} = src2{4-1};
|
|
let Inst{8} = 0b0;
|
|
}
|
|
|
|
multiclass TFR64_Pred<bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
def _c#NAME : T_TFR64_Pred<PredNot, 0>;
|
|
|
|
let PNewValue = "new" in
|
|
def _cdn#NAME : T_TFR64_Pred<PredNot, 1>; // Predicate new
|
|
}
|
|
}
|
|
|
|
let neverHasSideEffects = 1 in
|
|
multiclass TFR64_base<string BaseName> {
|
|
let BaseOpcode = BaseName in {
|
|
let isPredicable = 1 in
|
|
def NAME : ALU32Inst <(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1),
|
|
"$dst = $src1" > {
|
|
bits<5> dst;
|
|
bits<5> src1;
|
|
|
|
let IClass = 0b1111;
|
|
let Inst{27-23} = 0b01010;
|
|
let Inst{4-0} = dst;
|
|
let Inst{20-17} = src1{4-1};
|
|
let Inst{16} = 0b1;
|
|
let Inst{12-9} = src1{4-1};
|
|
let Inst{8} = 0b0;
|
|
}
|
|
|
|
let isPredicated = 1 in {
|
|
defm Pt : TFR64_Pred<0>;
|
|
defm NotPt : TFR64_Pred<1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
multiclass TFRI_Pred<bit PredNot> {
|
|
let isMoveImm = 1, PredSense = !if(PredNot, "false", "true") in {
|
|
def _c#NAME : ALU32_ri<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, s12Ext:$src2),
|
|
!if(PredNot, "if (!$src1", "if ($src1")#") $dst = #$src2",
|
|
[]>;
|
|
|
|
// Predicate new
|
|
let PNewValue = "new" in
|
|
def _cdn#NAME : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, s12Ext:$src2),
|
|
!if(PredNot, "if (!$src1", "if ($src1")#".new) $dst = #$src2",
|
|
[]>;
|
|
}
|
|
}
|
|
|
|
let InputType = "imm", isExtendable = 1, isExtentSigned = 1 in
|
|
multiclass TFRI_base<string CextOp> {
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp#I in {
|
|
let isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16,
|
|
isMoveImm = 1, isPredicable = 1, isReMaterializable = 1 in
|
|
def NAME : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
|
|
"$dst = #$src1",
|
|
[(set (i32 IntRegs:$dst), s16ExtPred:$src1)]>;
|
|
|
|
let opExtendable = 2, opExtentBits = 12, neverHasSideEffects = 1,
|
|
isPredicated = 1 in {
|
|
defm Pt : TFRI_Pred<0>;
|
|
defm NotPt : TFRI_Pred<1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
defm TFRI : TFRI_base<"TFR">, ImmRegRel, PredNewRel;
|
|
defm TFR : TFR_base<"TFR">, ImmRegRel, PredNewRel;
|
|
defm TFR64 : TFR64_base<"TFR64">, PredNewRel;
|
|
|
|
// Transfer control register.
|
|
let neverHasSideEffects = 1 in
|
|
def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = $src1",
|
|
[]>;
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/ALU -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/PERM +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Combine.
|
|
|
|
def SDTHexagonI64I32I32 : SDTypeProfile<1, 2,
|
|
[SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>;
|
|
|
|
def HexagonWrapperCombineII :
|
|
SDNode<"HexagonISD::WrapperCombineII", SDTHexagonI64I32I32>;
|
|
def HexagonWrapperCombineRR :
|
|
SDNode<"HexagonISD::WrapperCombineRR", SDTHexagonI64I32I32>;
|
|
|
|
// Combines the two integer registers SRC1 and SRC2 into a double register.
|
|
let isPredicable = 1 in
|
|
def COMBINE_rr : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1,
|
|
IntRegs:$src2),
|
|
"$dst = combine($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (HexagonWrapperCombineRR (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2))))]>;
|
|
|
|
// Rd=combine(Rt.[HL], Rs.[HL])
|
|
class COMBINE_halves<string A, string B>: ALU32_rr<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1,
|
|
IntRegs:$src2),
|
|
"$dst = combine($src1."# A #", $src2."# B #")", []>;
|
|
|
|
let isPredicable = 1 in {
|
|
def COMBINE_hh : COMBINE_halves<"H", "H">;
|
|
def COMBINE_hl : COMBINE_halves<"H", "L">;
|
|
def COMBINE_lh : COMBINE_halves<"L", "H">;
|
|
def COMBINE_ll : COMBINE_halves<"L", "L">;
|
|
}
|
|
|
|
def : Pat<(i32 (trunc (i64 (srl (i64 DoubleRegs:$a), (i32 16))))),
|
|
(COMBINE_lh (EXTRACT_SUBREG (i64 DoubleRegs:$a), subreg_hireg),
|
|
(EXTRACT_SUBREG (i64 DoubleRegs:$a), subreg_loreg))>;
|
|
|
|
// Combines the two immediates SRC1 and SRC2 into a double register.
|
|
class COMBINE_imm<Operand imm1, Operand imm2, PatLeaf pat1, PatLeaf pat2> :
|
|
ALU32_ii<(outs DoubleRegs:$dst), (ins imm1:$src1, imm2:$src2),
|
|
"$dst = combine(#$src1, #$src2)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (HexagonWrapperCombineII (i32 pat1:$src1), (i32 pat2:$src2))))]>;
|
|
|
|
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 8 in
|
|
def COMBINE_Ii : COMBINE_imm<s8Ext, s8Imm, s8ExtPred, s8ImmPred>;
|
|
|
|
// Mux.
|
|
def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
|
|
DoubleRegs:$src2,
|
|
DoubleRegs:$src3),
|
|
"$dst = vmux($src1, $src2, $src3)",
|
|
[]>;
|
|
|
|
let CextOpcode = "MUX", InputType = "reg" in
|
|
def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
|
|
IntRegs:$src2, IntRegs:$src3),
|
|
"$dst = mux($src1, $src2, $src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src3))))]>, ImmRegRel;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8,
|
|
CextOpcode = "MUX", InputType = "imm" in
|
|
def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
|
|
IntRegs:$src3),
|
|
"$dst = mux($src1, #$src2, $src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), s8ExtPred:$src2,
|
|
(i32 IntRegs:$src3))))]>, ImmRegRel;
|
|
|
|
let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
|
|
CextOpcode = "MUX", InputType = "imm" in
|
|
def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2,
|
|
s8Ext:$src3),
|
|
"$dst = mux($src1, $src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
|
|
s8ExtPred:$src3)))]>, ImmRegRel;
|
|
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 8 in
|
|
def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Ext:$src2,
|
|
s8Imm:$src3),
|
|
"$dst = mux($src1, #$src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1),
|
|
s8ExtPred:$src2,
|
|
s8ImmPred:$src3)))]>;
|
|
|
|
// Shift halfword.
|
|
let isPredicable = 1 in
|
|
def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = aslh($src1)",
|
|
[(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>;
|
|
|
|
let isPredicable = 1 in
|
|
def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = asrh($src1)",
|
|
[(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>;
|
|
|
|
// Sign extend.
|
|
let isPredicable = 1 in
|
|
def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = sxtb($src1)",
|
|
[(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>;
|
|
|
|
let isPredicable = 1 in
|
|
def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = sxth($src1)",
|
|
[(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>;
|
|
|
|
// Zero extend.
|
|
let isPredicable = 1, neverHasSideEffects = 1 in
|
|
def ZXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = zxtb($src1)",
|
|
[]>;
|
|
|
|
let isPredicable = 1, neverHasSideEffects = 1 in
|
|
def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = zxth($src1)",
|
|
[]>;
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/PERM -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/PRED +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Conditional combine.
|
|
let neverHasSideEffects = 1, isPredicated = 1 in
|
|
def COMBINE_rr_cPt : ALU32_rr<(outs DoubleRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"if ($src1) $dst = combine($src2, $src3)",
|
|
[]>;
|
|
|
|
let neverHasSideEffects = 1, isPredicated = 1 in
|
|
def COMBINE_rr_cNotPt : ALU32_rr<(outs DoubleRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"if (!$src1) $dst = combine($src2, $src3)",
|
|
[]>;
|
|
|
|
let neverHasSideEffects = 1, isPredicated = 1 in
|
|
def COMBINE_rr_cdnPt : ALU32_rr<(outs DoubleRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"if ($src1.new) $dst = combine($src2, $src3)",
|
|
[]>;
|
|
|
|
let neverHasSideEffects = 1, isPredicated = 1 in
|
|
def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"if (!$src1.new) $dst = combine($src2, $src3)",
|
|
[]>;
|
|
|
|
// Compare.
|
|
defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", "CMPGTU", setugt>, ImmRegRel;
|
|
defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", "CMPGT", setgt>, ImmRegRel;
|
|
defm CMPLT : CMP32_rr<"cmp.lt", setlt>;
|
|
defm CMPLTU : CMP32_rr<"cmp.ltu", setult>;
|
|
defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", "CMPEQ", seteq>, ImmRegRel;
|
|
defm CMPGE : CMP32_ri_s8<"cmp.ge", setge>;
|
|
defm CMPGEU : CMP32_ri_u8<"cmp.geu", setuge>;
|
|
|
|
def CTLZ_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = cl0($src1)",
|
|
[(set (i32 IntRegs:$dst), (ctlz (i32 IntRegs:$src1)))]>;
|
|
|
|
def CTTZ_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = ct0($src1)",
|
|
[(set (i32 IntRegs:$dst), (cttz (i32 IntRegs:$src1)))]>;
|
|
|
|
def CTLZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
|
|
"$dst = cl0($src1)",
|
|
[(set (i32 IntRegs:$dst), (i32 (trunc (ctlz (i64 DoubleRegs:$src1)))))]>;
|
|
|
|
def CTTZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1),
|
|
"$dst = ct0($src1)",
|
|
[(set (i32 IntRegs:$dst), (i32 (trunc (cttz (i64 DoubleRegs:$src1)))))]>;
|
|
|
|
def TSTBIT_rr : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = tstbit($src1, $src2)",
|
|
[(set (i1 PredRegs:$dst),
|
|
(setne (and (shl 1, (i32 IntRegs:$src2)), (i32 IntRegs:$src1)), 0))]>;
|
|
|
|
def TSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = tstbit($src1, $src2)",
|
|
[(set (i1 PredRegs:$dst),
|
|
(setne (and (shl 1, (u5ImmPred:$src2)), (i32 IntRegs:$src1)), 0))]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU32/PRED -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/ALU +
|
|
//===----------------------------------------------------------------------===//
|
|
// Add.
|
|
def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = add($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (add (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))]>;
|
|
|
|
// Add halfword.
|
|
|
|
// Compare.
|
|
defm CMPEHexagon4 : CMP64_rr<"cmp.eq", seteq>;
|
|
defm CMPGT64 : CMP64_rr<"cmp.gt", setgt>;
|
|
defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>;
|
|
|
|
// Logical operations.
|
|
def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = and($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))]>;
|
|
|
|
def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = or($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (or (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))]>;
|
|
|
|
def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = xor($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))]>;
|
|
|
|
// Maximum.
|
|
def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = max($src2, $src1)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 (setlt (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src1))),
|
|
(i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
|
|
|
|
def MAXUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = maxu($src2, $src1)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 (setult (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src1))),
|
|
(i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
|
|
|
|
def MAXd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = max($src2, $src1)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src1))),
|
|
(i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2))))]>;
|
|
|
|
def MAXUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = maxu($src2, $src1)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (select (i1 (setult (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src1))),
|
|
(i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2))))]>;
|
|
|
|
// Minimum.
|
|
def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = min($src2, $src1)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 (setgt (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src1))),
|
|
(i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
|
|
|
|
def MINUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = minu($src2, $src1)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 (setugt (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src1))),
|
|
(i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>;
|
|
|
|
def MINd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = min($src2, $src1)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src1))),
|
|
(i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2))))]>;
|
|
|
|
def MINUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = minu($src2, $src1)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(i64 (select (i1 (setugt (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src1))),
|
|
(i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2))))]>;
|
|
|
|
// Subtract.
|
|
def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2),
|
|
"$dst = sub($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (sub (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))]>;
|
|
|
|
// Subtract halfword.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/ALU -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/BIT +
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/BIT -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/PERM +
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
// ALU64/PERM -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// CR +
|
|
//===----------------------------------------------------------------------===//
|
|
// Logical reductions on predicates.
|
|
|
|
// Looping instructions.
|
|
|
|
// Pipelined looping instructions.
|
|
|
|
// Logical operations on predicates.
|
|
def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
|
|
"$dst = and($src1, $src2)",
|
|
[(set (i1 PredRegs:$dst), (and (i1 PredRegs:$src1),
|
|
(i1 PredRegs:$src2)))]>;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1,
|
|
PredRegs:$src2),
|
|
"$dst = and($src1, !$src2)",
|
|
[]>;
|
|
|
|
def ANY_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
|
|
"$dst = any8($src1)",
|
|
[]>;
|
|
|
|
def ALL_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
|
|
"$dst = all8($src1)",
|
|
[]>;
|
|
|
|
def VITPACK_pp : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1,
|
|
PredRegs:$src2),
|
|
"$dst = vitpack($src1, $src2)",
|
|
[]>;
|
|
|
|
def VALIGN_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2,
|
|
PredRegs:$src3),
|
|
"$dst = valignb($src1, $src2, $src3)",
|
|
[]>;
|
|
|
|
def VSPLICE_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
DoubleRegs:$src2,
|
|
PredRegs:$src3),
|
|
"$dst = vspliceb($src1, $src2, $src3)",
|
|
[]>;
|
|
|
|
def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1),
|
|
"$dst = mask($src1)",
|
|
[]>;
|
|
|
|
def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
|
|
"$dst = not($src1)",
|
|
[(set (i1 PredRegs:$dst), (not (i1 PredRegs:$src1)))]>;
|
|
|
|
def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
|
|
"$dst = or($src1, $src2)",
|
|
[(set (i1 PredRegs:$dst), (or (i1 PredRegs:$src1),
|
|
(i1 PredRegs:$src2)))]>;
|
|
|
|
def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
|
|
"$dst = xor($src1, $src2)",
|
|
[(set (i1 PredRegs:$dst), (xor (i1 PredRegs:$src1),
|
|
(i1 PredRegs:$src2)))]>;
|
|
|
|
|
|
// User control register transfer.
|
|
//===----------------------------------------------------------------------===//
|
|
// CR -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// J +
|
|
//===----------------------------------------------------------------------===//
|
|
// Jump to address.
|
|
let isBranch = 1, isTerminator=1, isBarrier = 1, isPredicable = 1 in {
|
|
def JMP : JInst< (outs),
|
|
(ins brtarget:$offset),
|
|
"jump $offset",
|
|
[(br bb:$offset)]>;
|
|
}
|
|
|
|
// if (p0) jump
|
|
let isBranch = 1, isTerminator=1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_c : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if ($src) jump $offset",
|
|
[(brcond (i1 PredRegs:$src), bb:$offset)]>;
|
|
}
|
|
|
|
// if (!p0) jump
|
|
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_cNot : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if (!$src) jump $offset",
|
|
[]>;
|
|
}
|
|
|
|
let isTerminator = 1, isBranch = 1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def BRCOND : JInst < (outs), (ins PredRegs:$pred, brtarget:$dst),
|
|
"if ($pred) jump $dst",
|
|
[]>;
|
|
}
|
|
|
|
// Jump to address conditioned on new predicate.
|
|
// if (p0) jump:t
|
|
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_cdnPt : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if ($src.new) jump:t $offset",
|
|
[]>;
|
|
}
|
|
|
|
// if (!p0) jump:t
|
|
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_cdnNotPt : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if (!$src.new) jump:t $offset",
|
|
[]>;
|
|
}
|
|
|
|
// Not taken.
|
|
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_cdnPnt : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if ($src.new) jump:nt $offset",
|
|
[]>;
|
|
}
|
|
|
|
// Not taken.
|
|
let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
|
|
isPredicated = 1 in {
|
|
def JMP_cdnNotPnt : JInst< (outs),
|
|
(ins PredRegs:$src, brtarget:$offset),
|
|
"if (!$src.new) jump:nt $offset",
|
|
[]>;
|
|
}
|
|
//===----------------------------------------------------------------------===//
|
|
// J -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// JR +
|
|
//===----------------------------------------------------------------------===//
|
|
def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone,
|
|
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
|
|
|
|
// Jump to address from register.
|
|
let isPredicable =1, isReturn = 1, isTerminator = 1, isBarrier = 1,
|
|
Defs = [PC], Uses = [R31] in {
|
|
def JMPR: JRInst<(outs), (ins),
|
|
"jumpr r31",
|
|
[(retflag)]>;
|
|
}
|
|
|
|
// Jump to address from register.
|
|
let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1,
|
|
Defs = [PC], Uses = [R31] in {
|
|
def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1),
|
|
"if ($src1) jumpr r31",
|
|
[]>;
|
|
}
|
|
|
|
// Jump to address from register.
|
|
let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1,
|
|
Defs = [PC], Uses = [R31] in {
|
|
def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1),
|
|
"if (!$src1) jumpr r31",
|
|
[]>;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// JR -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// LD +
|
|
//===----------------------------------------------------------------------===//
|
|
///
|
|
// Load -- MEMri operand
|
|
multiclass LD_MEMri_Pbase<string mnemonic, RegisterClass RC,
|
|
bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : LDInst2<(outs RC:$dst),
|
|
(ins PredRegs:$src1, MEMri:$addr),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#"$dst = "#mnemonic#"($addr)",
|
|
[]>;
|
|
}
|
|
|
|
multiclass LD_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 0>;
|
|
// Predicate new
|
|
defm _cdn#NAME : LD_MEMri_Pbase<mnemonic, RC, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let isExtendable = 1, neverHasSideEffects = 1 in
|
|
multiclass LD_MEMri<string mnemonic, string CextOp, RegisterClass RC,
|
|
bits<5> ImmBits, bits<5> PredImmBits> {
|
|
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp in {
|
|
let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits,
|
|
isPredicable = 1 in
|
|
def NAME : LDInst2<(outs RC:$dst), (ins MEMri:$addr),
|
|
"$dst = "#mnemonic#"($addr)",
|
|
[]>;
|
|
|
|
let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits,
|
|
isPredicated = 1 in {
|
|
defm Pt : LD_MEMri_Pred<mnemonic, RC, 0 >;
|
|
defm NotPt : LD_MEMri_Pred<mnemonic, RC, 1 >;
|
|
}
|
|
}
|
|
}
|
|
|
|
let addrMode = BaseImmOffset, isMEMri = "true" in {
|
|
defm LDrib: LD_MEMri < "memb", "LDrib", IntRegs, 11, 6>, AddrModeRel;
|
|
defm LDriub: LD_MEMri < "memub" , "LDriub", IntRegs, 11, 6>, AddrModeRel;
|
|
defm LDrih: LD_MEMri < "memh", "LDrih", IntRegs, 12, 7>, AddrModeRel;
|
|
defm LDriuh: LD_MEMri < "memuh", "LDriuh", IntRegs, 12, 7>, AddrModeRel;
|
|
defm LDriw: LD_MEMri < "memw", "LDriw", IntRegs, 13, 8>, AddrModeRel;
|
|
defm LDrid: LD_MEMri < "memd", "LDrid", DoubleRegs, 14, 9>, AddrModeRel;
|
|
}
|
|
|
|
def : Pat < (i32 (sextloadi8 ADDRriS11_0:$addr)),
|
|
(LDrib ADDRriS11_0:$addr) >;
|
|
|
|
def : Pat < (i32 (zextloadi8 ADDRriS11_0:$addr)),
|
|
(LDriub ADDRriS11_0:$addr) >;
|
|
|
|
def : Pat < (i32 (sextloadi16 ADDRriS11_1:$addr)),
|
|
(LDrih ADDRriS11_1:$addr) >;
|
|
|
|
def : Pat < (i32 (zextloadi16 ADDRriS11_1:$addr)),
|
|
(LDriuh ADDRriS11_1:$addr) >;
|
|
|
|
def : Pat < (i32 (load ADDRriS11_2:$addr)),
|
|
(LDriw ADDRriS11_2:$addr) >;
|
|
|
|
def : Pat < (i64 (load ADDRriS11_3:$addr)),
|
|
(LDrid ADDRriS11_3:$addr) >;
|
|
|
|
|
|
// Load - Base with Immediate offset addressing mode
|
|
multiclass LD_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp,
|
|
bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : LDInst2<(outs RC:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#"$dst = "#mnemonic#"($src2+#$src3)",
|
|
[]>;
|
|
}
|
|
|
|
multiclass LD_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp,
|
|
bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>;
|
|
// Predicate new
|
|
defm _cdn#NAME : LD_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let isExtendable = 1, neverHasSideEffects = 1 in
|
|
multiclass LD_Idxd<string mnemonic, string CextOp, RegisterClass RC,
|
|
Operand ImmOp, Operand predImmOp, bits<5> ImmBits,
|
|
bits<5> PredImmBits> {
|
|
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
|
|
let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits,
|
|
isPredicable = 1, AddedComplexity = 20 in
|
|
def NAME : LDInst2<(outs RC:$dst), (ins IntRegs:$src1, ImmOp:$offset),
|
|
"$dst = "#mnemonic#"($src1+#$offset)",
|
|
[]>;
|
|
|
|
let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits,
|
|
isPredicated = 1 in {
|
|
defm Pt : LD_Idxd_Pred<mnemonic, RC, predImmOp, 0 >;
|
|
defm NotPt : LD_Idxd_Pred<mnemonic, RC, predImmOp, 1 >;
|
|
}
|
|
}
|
|
}
|
|
|
|
let addrMode = BaseImmOffset in {
|
|
defm LDrib_indexed: LD_Idxd <"memb", "LDrib", IntRegs, s11_0Ext, u6_0Ext,
|
|
11, 6>, AddrModeRel;
|
|
defm LDriub_indexed: LD_Idxd <"memub" , "LDriub", IntRegs, s11_0Ext, u6_0Ext,
|
|
11, 6>, AddrModeRel;
|
|
defm LDrih_indexed: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext,
|
|
12, 7>, AddrModeRel;
|
|
defm LDriuh_indexed: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext,
|
|
12, 7>, AddrModeRel;
|
|
defm LDriw_indexed: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext,
|
|
13, 8>, AddrModeRel;
|
|
defm LDrid_indexed: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext,
|
|
14, 9>, AddrModeRel;
|
|
}
|
|
|
|
let AddedComplexity = 20 in {
|
|
def : Pat < (i32 (sextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))),
|
|
(LDrib_indexed IntRegs:$src1, s11_0ExtPred:$offset) >;
|
|
|
|
def : Pat < (i32 (zextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))),
|
|
(LDriub_indexed IntRegs:$src1, s11_0ExtPred:$offset) >;
|
|
|
|
def : Pat < (i32 (sextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))),
|
|
(LDrih_indexed IntRegs:$src1, s11_1ExtPred:$offset) >;
|
|
|
|
def : Pat < (i32 (zextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))),
|
|
(LDriuh_indexed IntRegs:$src1, s11_1ExtPred:$offset) >;
|
|
|
|
def : Pat < (i32 (load (add IntRegs:$src1, s11_2ExtPred:$offset))),
|
|
(LDriw_indexed IntRegs:$src1, s11_2ExtPred:$offset) >;
|
|
|
|
def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))),
|
|
(LDrid_indexed IntRegs:$src1, s11_3ExtPred:$offset) >;
|
|
}
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def LDrid_GP : LDInst2<(outs DoubleRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memd(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDd_GP : LDInst2<(outs DoubleRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memd(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Post increment load
|
|
// Make sure that in post increment load, the first operand is always the post
|
|
// increment operand.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
multiclass LD_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
|
|
bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2),
|
|
(ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#"$dst = "#mnemonic#"($src2++#$offset)",
|
|
[],
|
|
"$src2 = $dst2">;
|
|
}
|
|
|
|
multiclass LD_PostInc_Pred<string mnemonic, RegisterClass RC,
|
|
Operand ImmOp, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : LD_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
|
|
// Predicate new
|
|
let Predicates = [HasV4T], validSubTargets = HasV4SubT in
|
|
defm _cdn#NAME#_V4 : LD_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
multiclass LD_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
|
|
Operand ImmOp> {
|
|
|
|
let BaseOpcode = "POST_"#BaseOp in {
|
|
let isPredicable = 1 in
|
|
def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2),
|
|
(ins IntRegs:$src1, ImmOp:$offset),
|
|
"$dst = "#mnemonic#"($src1++#$offset)",
|
|
[],
|
|
"$src1 = $dst2">;
|
|
|
|
let isPredicated = 1 in {
|
|
defm Pt : LD_PostInc_Pred<mnemonic, RC, ImmOp, 0 >;
|
|
defm NotPt : LD_PostInc_Pred<mnemonic, RC, ImmOp, 1 >;
|
|
}
|
|
}
|
|
}
|
|
|
|
let hasCtrlDep = 1, neverHasSideEffects = 1 in {
|
|
defm POST_LDrib : LD_PostInc<"memb", "LDrib", IntRegs, s4_0Imm>,
|
|
PredNewRel;
|
|
defm POST_LDriub : LD_PostInc<"memub", "LDriub", IntRegs, s4_0Imm>,
|
|
PredNewRel;
|
|
defm POST_LDrih : LD_PostInc<"memh", "LDrih", IntRegs, s4_1Imm>,
|
|
PredNewRel;
|
|
defm POST_LDriuh : LD_PostInc<"memuh", "LDriuh", IntRegs, s4_1Imm>,
|
|
PredNewRel;
|
|
defm POST_LDriw : LD_PostInc<"memw", "LDriw", IntRegs, s4_2Imm>,
|
|
PredNewRel;
|
|
defm POST_LDrid : LD_PostInc<"memd", "LDrid", DoubleRegs, s4_3Imm>,
|
|
PredNewRel;
|
|
}
|
|
|
|
def : Pat< (i32 (extloadi1 ADDRriS11_0:$addr)),
|
|
(i32 (LDrib ADDRriS11_0:$addr)) >;
|
|
|
|
// Load byte any-extend.
|
|
def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)),
|
|
(i32 (LDrib ADDRriS11_0:$addr)) >;
|
|
|
|
// Indexed load byte any-extend.
|
|
let AddedComplexity = 20 in
|
|
def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))),
|
|
(i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def LDrib_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memb(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDb_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memb(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDub_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memub(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)),
|
|
(i32 (LDrih ADDRriS11_1:$addr))>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))),
|
|
(i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def LDrih_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memh(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDh_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memh(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDuh_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memuh(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 10 in
|
|
def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)),
|
|
(i32 (LDriub ADDRriS11_0:$addr))>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))),
|
|
(i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def LDriub_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memub(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Load unsigned halfword.
|
|
let neverHasSideEffects = 1 in
|
|
def LDriuh_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memuh(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Load predicate.
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
|
|
isPseudo = 1, Defs = [R10,R11,D5], neverHasSideEffects = 1 in
|
|
def LDriw_pred : LDInst2<(outs PredRegs:$dst),
|
|
(ins MEMri:$addr),
|
|
"Error; should not emit",
|
|
[]>;
|
|
|
|
// Indexed load.
|
|
let neverHasSideEffects = 1 in
|
|
def LDriw_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global, u16Imm:$offset),
|
|
"$dst = memw(#$global+$offset)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def LDw_GP : LDInst2<(outs IntRegs:$dst),
|
|
(ins globaladdress:$global),
|
|
"$dst = memw(#$global)",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Deallocate stack frame.
|
|
let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
|
|
def DEALLOCFRAME : LDInst2<(outs), (ins),
|
|
"deallocframe",
|
|
[]>;
|
|
}
|
|
|
|
// Load and unpack bytes to halfwords.
|
|
//===----------------------------------------------------------------------===//
|
|
// LD -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/ALU +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/ALU -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/COMPLEX +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/COMPLEX -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/MPYH +
|
|
//===----------------------------------------------------------------------===//
|
|
// Multiply and use lower result.
|
|
// Rd=+mpyi(Rs,#u8)
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 0, opExtentBits = 8 in
|
|
def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Ext:$src2),
|
|
"$dst =+ mpyi($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
|
|
u8ExtPred:$src2))]>;
|
|
|
|
// Rd=-mpyi(Rs,#u8)
|
|
def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
|
|
"$dst =- mpyi($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (ineg (mul (i32 IntRegs:$src1),
|
|
u8ImmPred:$src2)))]>;
|
|
|
|
// Rd=mpyi(Rs,#m9)
|
|
// s9 is NOT the same as m9 - but it works.. so far.
|
|
// Assembler maps to either Rd=+mpyi(Rs,#u8 or Rd=-mpyi(Rs,#u8)
|
|
// depending on the value of m9. See Arch Spec.
|
|
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 9,
|
|
CextOpcode = "MPYI", InputType = "imm" in
|
|
def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Ext:$src2),
|
|
"$dst = mpyi($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
|
|
s9ExtPred:$src2))]>, ImmRegRel;
|
|
|
|
// Rd=mpyi(Rs,Rt)
|
|
let CextOpcode = "MPYI", InputType = "reg" in
|
|
def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpyi($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>, ImmRegRel;
|
|
|
|
// Rx+=mpyi(Rs,#u8)
|
|
let isExtendable = 1, opExtendable = 3, isExtentSigned = 0, opExtentBits = 8,
|
|
CextOpcode = "MPYI_acc", InputType = "imm" in
|
|
def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
|
|
"$dst += mpyi($src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(add (mul (i32 IntRegs:$src2), u8ExtPred:$src3),
|
|
(i32 IntRegs:$src1)))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
// Rx+=mpyi(Rs,Rt)
|
|
let CextOpcode = "MPYI_acc", InputType = "reg" in
|
|
def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"$dst += mpyi($src2, $src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
|
|
(i32 IntRegs:$src1)))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
// Rx-=mpyi(Rs,#u8)
|
|
let isExtendable = 1, opExtendable = 3, isExtentSigned = 0, opExtentBits = 8 in
|
|
def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2, u8Ext:$src3),
|
|
"$dst -= mpyi($src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
|
|
u8ExtPred:$src3)))],
|
|
"$src1 = $dst">;
|
|
|
|
// Multiply and use upper result.
|
|
// Rd=mpy(Rs,Rt.H):<<1:rnd:sat
|
|
// Rd=mpy(Rs,Rt.L):<<1:rnd:sat
|
|
// Rd=mpy(Rs,Rt)
|
|
def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpy($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (mulhs (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
// Rd=mpy(Rs,Rt):rnd
|
|
// Rd=mpyu(Rs,Rt)
|
|
def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpyu($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (mulhu (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
// Multiply and use full result.
|
|
// Rdd=mpyu(Rs,Rt)
|
|
def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpyu($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(mul (i64 (anyext (i32 IntRegs:$src1))),
|
|
(i64 (anyext (i32 IntRegs:$src2)))))]>;
|
|
|
|
// Rdd=mpy(Rs,Rt)
|
|
def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpy($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(mul (i64 (sext (i32 IntRegs:$src1))),
|
|
(i64 (sext (i32 IntRegs:$src2)))))]>;
|
|
|
|
// Multiply and accumulate, use full result.
|
|
// Rxx[+-]=mpy(Rs,Rt)
|
|
// Rxx+=mpy(Rs,Rt)
|
|
def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"$dst += mpy($src2, $src3)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(add (mul (i64 (sext (i32 IntRegs:$src2))),
|
|
(i64 (sext (i32 IntRegs:$src3)))),
|
|
(i64 DoubleRegs:$src1)))],
|
|
"$src1 = $dst">;
|
|
|
|
// Rxx-=mpy(Rs,Rt)
|
|
def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"$dst -= mpy($src2, $src3)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(sub (i64 DoubleRegs:$src1),
|
|
(mul (i64 (sext (i32 IntRegs:$src2))),
|
|
(i64 (sext (i32 IntRegs:$src3))))))],
|
|
"$src1 = $dst">;
|
|
|
|
// Rxx[+-]=mpyu(Rs,Rt)
|
|
// Rxx+=mpyu(Rs,Rt)
|
|
def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
IntRegs:$src2, IntRegs:$src3),
|
|
"$dst += mpyu($src2, $src3)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(add (mul (i64 (anyext (i32 IntRegs:$src2))),
|
|
(i64 (anyext (i32 IntRegs:$src3)))),
|
|
(i64 DoubleRegs:$src1)))], "$src1 = $dst">;
|
|
|
|
// Rxx-=mpyu(Rs,Rt)
|
|
def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
"$dst -= mpyu($src2, $src3)",
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(sub (i64 DoubleRegs:$src1),
|
|
(mul (i64 (anyext (i32 IntRegs:$src2))),
|
|
(i64 (anyext (i32 IntRegs:$src3))))))],
|
|
"$src1 = $dst">;
|
|
|
|
|
|
let InputType = "reg", CextOpcode = "ADD_acc" in
|
|
def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
|
|
IntRegs:$src2, IntRegs:$src3),
|
|
"$dst += add($src2, $src3)",
|
|
[(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src3)),
|
|
(i32 IntRegs:$src1)))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
|
|
InputType = "imm", CextOpcode = "ADD_acc" in
|
|
def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
|
|
IntRegs:$src2, s8Ext:$src3),
|
|
"$dst += add($src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2),
|
|
s8_16ExtPred:$src3),
|
|
(i32 IntRegs:$src1)))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
let CextOpcode = "SUB_acc", InputType = "reg" in
|
|
def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
|
|
IntRegs:$src2, IntRegs:$src3),
|
|
"$dst -= add($src2, $src3)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(sub (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src3))))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
let isExtendable = 1, opExtendable = 3, isExtentSigned = 1, opExtentBits = 8,
|
|
CextOpcode = "SUB_acc", InputType = "imm" in
|
|
def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
|
|
IntRegs:$src2, s8Ext:$src3),
|
|
"$dst -= add($src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1),
|
|
(add (i32 IntRegs:$src2),
|
|
s8_16ExtPred:$src3)))],
|
|
"$src1 = $dst">, ImmRegRel;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/MPYH -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/MPYS +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/MPYS -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/VB +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/VB -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/VH +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// MTYPE/VH -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// ST +
|
|
//===----------------------------------------------------------------------===//
|
|
///
|
|
/// Assumptions::: ****** DO NOT IGNORE ********
|
|
/// 1. Make sure that in post increment store, the zero'th operand is always the
|
|
/// post increment operand.
|
|
/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the
|
|
/// last operand.
|
|
///
|
|
// Store doubleword.
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def STrid_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src),
|
|
"memd(#$global+$offset) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def STd_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, DoubleRegs:$src),
|
|
"memd(#$global) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Post increment store
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
multiclass ST_PostInc_Pbase<string mnemonic, RegisterClass RC, Operand ImmOp,
|
|
bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : STInst2PI<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#mnemonic#"($src2++#$offset) = $src3",
|
|
[],
|
|
"$src2 = $dst">;
|
|
}
|
|
|
|
multiclass ST_PostInc_Pred<string mnemonic, RegisterClass RC,
|
|
Operand ImmOp, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME# : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 0>;
|
|
// Predicate new
|
|
let Predicates = [HasV4T], validSubTargets = HasV4SubT in
|
|
defm _cdn#NAME#_V4 : ST_PostInc_Pbase<mnemonic, RC, ImmOp, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let hasCtrlDep = 1, isNVStorable = 1, neverHasSideEffects = 1 in
|
|
multiclass ST_PostInc<string mnemonic, string BaseOp, RegisterClass RC,
|
|
Operand ImmOp> {
|
|
|
|
let hasCtrlDep = 1, BaseOpcode = "POST_"#BaseOp in {
|
|
let isPredicable = 1 in
|
|
def NAME : STInst2PI<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, ImmOp:$offset, RC:$src2),
|
|
#mnemonic#"($src1++#$offset) = $src2",
|
|
[],
|
|
"$src1 = $dst">;
|
|
|
|
let isPredicated = 1 in {
|
|
defm Pt : ST_PostInc_Pred<mnemonic, RC, ImmOp, 0 >;
|
|
defm NotPt : ST_PostInc_Pred<mnemonic, RC, ImmOp, 1 >;
|
|
}
|
|
}
|
|
}
|
|
|
|
defm POST_STbri: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm>, AddrModeRel;
|
|
defm POST_SThri: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm>, AddrModeRel;
|
|
defm POST_STwri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel;
|
|
|
|
let isNVStorable = 0 in
|
|
defm POST_STdri: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm>, AddrModeRel;
|
|
|
|
def : Pat<(post_truncsti8 (i32 IntRegs:$src1), IntRegs:$src2,
|
|
s4_3ImmPred:$offset),
|
|
(POST_STbri IntRegs:$src2, s4_0ImmPred:$offset, IntRegs:$src1)>;
|
|
|
|
def : Pat<(post_truncsti16 (i32 IntRegs:$src1), IntRegs:$src2,
|
|
s4_3ImmPred:$offset),
|
|
(POST_SThri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>;
|
|
|
|
def : Pat<(post_store (i32 IntRegs:$src1), IntRegs:$src2, s4_2ImmPred:$offset),
|
|
(POST_STwri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>;
|
|
|
|
def : Pat<(post_store (i64 DoubleRegs:$src1), IntRegs:$src2,
|
|
s4_3ImmPred:$offset),
|
|
(POST_STdri IntRegs:$src2, s4_3ImmPred:$offset, DoubleRegs:$src1)>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// multiclass for the store instructions with MEMri operand.
|
|
//===----------------------------------------------------------------------===//
|
|
multiclass ST_MEMri_Pbase<string mnemonic, RegisterClass RC, bit isNot,
|
|
bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : STInst2<(outs),
|
|
(ins PredRegs:$src1, MEMri:$addr, RC: $src2),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#mnemonic#"($addr) = $src2",
|
|
[]>;
|
|
}
|
|
|
|
multiclass ST_MEMri_Pred<string mnemonic, RegisterClass RC, bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true") in {
|
|
defm _c#NAME : ST_MEMri_Pbase<mnemonic, RC, PredNot, 0>;
|
|
|
|
// Predicate new
|
|
let validSubTargets = HasV4SubT, Predicates = [HasV4T] in
|
|
defm _cdn#NAME#_V4 : ST_MEMri_Pbase<mnemonic, RC, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let isExtendable = 1, isNVStorable = 1, neverHasSideEffects = 1 in
|
|
multiclass ST_MEMri<string mnemonic, string CextOp, RegisterClass RC,
|
|
bits<5> ImmBits, bits<5> PredImmBits> {
|
|
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp in {
|
|
let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
|
|
isPredicable = 1 in
|
|
def NAME : STInst2<(outs),
|
|
(ins MEMri:$addr, RC:$src),
|
|
mnemonic#"($addr) = $src",
|
|
[]>;
|
|
|
|
let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits,
|
|
isPredicated = 1 in {
|
|
defm Pt : ST_MEMri_Pred<mnemonic, RC, 0>;
|
|
defm NotPt : ST_MEMri_Pred<mnemonic, RC, 1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
let addrMode = BaseImmOffset, isMEMri = "true" in {
|
|
defm STrib: ST_MEMri < "memb", "STrib", IntRegs, 11, 6>, AddrModeRel;
|
|
defm STrih: ST_MEMri < "memh", "STrih", IntRegs, 12, 7>, AddrModeRel;
|
|
defm STriw: ST_MEMri < "memw", "STriw", IntRegs, 13, 8>, AddrModeRel;
|
|
|
|
let isNVStorable = 0 in
|
|
defm STrid: ST_MEMri < "memd", "STrid", DoubleRegs, 14, 9>, AddrModeRel;
|
|
}
|
|
|
|
def : Pat<(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr),
|
|
(STrib ADDRriS11_0:$addr, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr),
|
|
(STrih ADDRriS11_1:$addr, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(store (i32 IntRegs:$src1), ADDRriS11_2:$addr),
|
|
(STriw ADDRriS11_2:$addr, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr),
|
|
(STrid ADDRriS11_3:$addr, (i64 DoubleRegs:$src1))>;
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// multiclass for the store instructions with base+immediate offset
|
|
// addressing mode
|
|
//===----------------------------------------------------------------------===//
|
|
multiclass ST_Idxd_Pbase<string mnemonic, RegisterClass RC, Operand predImmOp,
|
|
bit isNot, bit isPredNew> {
|
|
let PNewValue = !if(isPredNew, "new", "") in
|
|
def NAME : STInst2<(outs),
|
|
(ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4),
|
|
!if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ",
|
|
") ")#mnemonic#"($src2+#$src3) = $src4",
|
|
[]>;
|
|
}
|
|
|
|
multiclass ST_Idxd_Pred<string mnemonic, RegisterClass RC, Operand predImmOp,
|
|
bit PredNot> {
|
|
let PredSense = !if(PredNot, "false", "true"), isPredicated = 1 in {
|
|
defm _c#NAME : ST_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 0>;
|
|
|
|
// Predicate new
|
|
let validSubTargets = HasV4SubT, Predicates = [HasV4T] in
|
|
defm _cdn#NAME#_V4 : ST_Idxd_Pbase<mnemonic, RC, predImmOp, PredNot, 1>;
|
|
}
|
|
}
|
|
|
|
let isExtendable = 1, isNVStorable = 1, neverHasSideEffects = 1 in
|
|
multiclass ST_Idxd<string mnemonic, string CextOp, RegisterClass RC,
|
|
Operand ImmOp, Operand predImmOp, bits<5> ImmBits,
|
|
bits<5> PredImmBits> {
|
|
|
|
let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in {
|
|
let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits,
|
|
isPredicable = 1 in
|
|
def NAME : STInst2<(outs),
|
|
(ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
|
|
mnemonic#"($src1+#$src2) = $src3",
|
|
[]>;
|
|
|
|
let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits in {
|
|
defm Pt : ST_Idxd_Pred<mnemonic, RC, predImmOp, 0>;
|
|
defm NotPt : ST_Idxd_Pred<mnemonic, RC, predImmOp, 1>;
|
|
}
|
|
}
|
|
}
|
|
|
|
let addrMode = BaseImmOffset, InputType = "reg" in {
|
|
defm STrib_indexed: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext,
|
|
u6_0Ext, 11, 6>, AddrModeRel, ImmRegRel;
|
|
defm STrih_indexed: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext,
|
|
u6_1Ext, 12, 7>, AddrModeRel, ImmRegRel;
|
|
defm STriw_indexed: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext,
|
|
u6_2Ext, 13, 8>, AddrModeRel, ImmRegRel;
|
|
let isNVStorable = 0 in
|
|
defm STrid_indexed: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext,
|
|
u6_3Ext, 14, 9>, AddrModeRel;
|
|
}
|
|
|
|
let AddedComplexity = 10 in {
|
|
def : Pat<(truncstorei8 (i32 IntRegs:$src1), (add IntRegs:$src2,
|
|
s11_0ExtPred:$offset)),
|
|
(STrib_indexed IntRegs:$src2, s11_0ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(truncstorei16 (i32 IntRegs:$src1), (add IntRegs:$src2,
|
|
s11_1ExtPred:$offset)),
|
|
(STrih_indexed IntRegs:$src2, s11_1ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(store (i32 IntRegs:$src1), (add IntRegs:$src2,
|
|
s11_2ExtPred:$offset)),
|
|
(STriw_indexed IntRegs:$src2, s11_2ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(store (i64 DoubleRegs:$src1), (add IntRegs:$src2,
|
|
s11_3ExtPred:$offset)),
|
|
(STrid_indexed IntRegs:$src2, s11_3ImmPred:$offset,
|
|
(i64 DoubleRegs:$src1))>;
|
|
}
|
|
|
|
// memb(gp+#u16:0)=Rt
|
|
let neverHasSideEffects = 1 in
|
|
def STrib_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
|
|
"memb(#$global+$offset) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// memb(#global)=Rt
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def STb_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, IntRegs:$src),
|
|
"memb(#$global) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def STrih_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
|
|
"memh(#$global+$offset) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def STh_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, IntRegs:$src),
|
|
"memh(#$global) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// memh(Rx++#s4:1)=Rt.H
|
|
|
|
// Store word.
|
|
// Store predicate.
|
|
let Defs = [R10,R11,D5], neverHasSideEffects = 1 in
|
|
def STriw_pred : STInst2<(outs),
|
|
(ins MEMri:$addr, PredRegs:$src1),
|
|
"Error; should not emit",
|
|
[]>;
|
|
|
|
let neverHasSideEffects = 1 in
|
|
def STriw_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
|
|
"memw(#$global+$offset) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let neverHasSideEffects = 1, validSubTargets = NoV4SubT in
|
|
def STw_GP : STInst2<(outs),
|
|
(ins globaladdress:$global, IntRegs:$src),
|
|
"memw(#$global) = $src",
|
|
[]>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Allocate stack frame.
|
|
let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in {
|
|
def ALLOCFRAME : STInst2<(outs),
|
|
(ins i32imm:$amt),
|
|
"allocframe(#$amt)",
|
|
[]>;
|
|
}
|
|
//===----------------------------------------------------------------------===//
|
|
// ST -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/ALU +
|
|
//===----------------------------------------------------------------------===//
|
|
// Logical NOT.
|
|
def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
|
|
"$dst = not($src1)",
|
|
[(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>;
|
|
|
|
|
|
// Sign extend word to doubleword.
|
|
def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = sxtw($src1)",
|
|
[(set (i64 DoubleRegs:$dst), (sext (i32 IntRegs:$src1)))]>;
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/ALU -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/BIT +
|
|
//===----------------------------------------------------------------------===//
|
|
// clrbit.
|
|
def CLRBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = clrbit($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1),
|
|
(not
|
|
(shl 1, u5ImmPred:$src2))))]>;
|
|
|
|
def CLRBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = clrbit($src1, #$src2)",
|
|
[]>;
|
|
|
|
// Map from r0 = and(r1, 2147483647) to r0 = clrbit(r1, #31).
|
|
def : Pat <(and (i32 IntRegs:$src1), 2147483647),
|
|
(CLRBIT_31 (i32 IntRegs:$src1), 31)>;
|
|
|
|
// setbit.
|
|
def SETBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = setbit($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1),
|
|
(shl 1, u5ImmPred:$src2)))]>;
|
|
|
|
// Map from r0 = or(r1, -2147483648) to r0 = setbit(r1, #31).
|
|
def SETBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = setbit($src1, #$src2)",
|
|
[]>;
|
|
|
|
def : Pat <(or (i32 IntRegs:$src1), -2147483648),
|
|
(SETBIT_31 (i32 IntRegs:$src1), 31)>;
|
|
|
|
// togglebit.
|
|
def TOGBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = setbit($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1),
|
|
(shl 1, u5ImmPred:$src2)))]>;
|
|
|
|
// Map from r0 = xor(r1, -2147483648) to r0 = togglebit(r1, #31).
|
|
def TOGBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = togglebit($src1, #$src2)",
|
|
[]>;
|
|
|
|
def : Pat <(xor (i32 IntRegs:$src1), -2147483648),
|
|
(TOGBIT_31 (i32 IntRegs:$src1), 31)>;
|
|
|
|
// Predicate transfer.
|
|
let neverHasSideEffects = 1 in
|
|
def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1),
|
|
"$dst = $src1 /* Should almost never emit this. */",
|
|
[]>;
|
|
|
|
def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = $src1 /* Should almost never emit this. */",
|
|
[(set (i1 PredRegs:$dst), (trunc (i32 IntRegs:$src1)))]>;
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/PRED -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/SHIFT +
|
|
//===----------------------------------------------------------------------===//
|
|
// Shift by immediate.
|
|
def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = asr($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
|
|
u5ImmPred:$src2))]>;
|
|
|
|
def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
|
|
"$dst = asr($src1, #$src2)",
|
|
[(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
|
|
u6ImmPred:$src2))]>;
|
|
|
|
def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = asl($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
|
|
u5ImmPred:$src2))]>;
|
|
|
|
def ASLd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
|
|
"$dst = asl($src1, #$src2)",
|
|
[(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
|
|
u6ImmPred:$src2))]>;
|
|
|
|
def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
|
|
"$dst = lsr($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
|
|
u5ImmPred:$src2))]>;
|
|
|
|
def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
|
|
"$dst = lsr($src1, #$src2)",
|
|
[(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
|
|
u6ImmPred:$src2))]>;
|
|
|
|
// Shift by immediate and add.
|
|
let AddedComplexity = 100 in
|
|
def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
|
|
u3Imm:$src3),
|
|
"$dst = addasl($src1, $src2, #$src3)",
|
|
[(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1),
|
|
(shl (i32 IntRegs:$src2),
|
|
u3ImmPred:$src3)))]>;
|
|
|
|
// Shift by register.
|
|
def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = asl($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = asr($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def LSL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = lsl($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = lsr($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def ASLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
|
|
"$dst = asl($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
|
|
"$dst = lsl($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
IntRegs:$src2),
|
|
"$dst = asr($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
|
|
IntRegs:$src2),
|
|
"$dst = lsr($src1, $src2)",
|
|
[(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1),
|
|
(i32 IntRegs:$src2)))]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/SHIFT -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/VH +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/VH -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/VW +
|
|
//===----------------------------------------------------------------------===//
|
|
//===----------------------------------------------------------------------===//
|
|
// STYPE/VW -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SYSTEM/SUPER +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SYSTEM/USER +
|
|
//===----------------------------------------------------------------------===//
|
|
def SDHexagonBARRIER: SDTypeProfile<0, 0, []>;
|
|
def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER,
|
|
[SDNPHasChain]>;
|
|
|
|
let hasSideEffects = 1, isSolo = 1 in
|
|
def BARRIER : SYSInst<(outs), (ins),
|
|
"barrier",
|
|
[(HexagonBARRIER)]>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// SYSTEM/SUPER -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TFRI64 - assembly mapped.
|
|
let isReMaterializable = 1 in
|
|
def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1),
|
|
"$dst = #$src1",
|
|
[(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>;
|
|
|
|
// Pseudo instruction to encode a set of conditional transfers.
|
|
// This instruction is used instead of a mux and trades-off codesize
|
|
// for performance. We conduct this transformation optimistically in
|
|
// the hope that these instructions get promoted to dot-new transfers.
|
|
let AddedComplexity = 100, isPredicated = 1 in
|
|
def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
|
|
IntRegs:$src2,
|
|
IntRegs:$src3),
|
|
"Error; should not emit",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1),
|
|
(i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src3))))]>;
|
|
let AddedComplexity = 100, isPredicated = 1 in
|
|
def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3),
|
|
"Error; should not emit",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2),
|
|
s12ImmPred:$src3)))]>;
|
|
|
|
let AddedComplexity = 100, isPredicated = 1 in
|
|
def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3),
|
|
"Error; should not emit",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2,
|
|
(i32 IntRegs:$src3))))]>;
|
|
|
|
let AddedComplexity = 100, isPredicated = 1 in
|
|
def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
|
|
(ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3),
|
|
"Error; should not emit",
|
|
[(set (i32 IntRegs:$dst),
|
|
(i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2,
|
|
s12ImmPred:$src3)))]>;
|
|
|
|
// Generate frameindex addresses.
|
|
let isReMaterializable = 1 in
|
|
def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1),
|
|
"$dst = add($src1)",
|
|
[(set (i32 IntRegs:$dst), ADDRri:$src1)]>;
|
|
|
|
//
|
|
// CR - Type.
|
|
//
|
|
let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
|
|
def LOOP0_i : CRInst<(outs), (ins brtarget:$offset, u10Imm:$src2),
|
|
"loop0($offset, #$src2)",
|
|
[]>;
|
|
}
|
|
|
|
let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
|
|
def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2),
|
|
"loop0($offset, $src2)",
|
|
[]>;
|
|
}
|
|
|
|
let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1,
|
|
Defs = [PC, LC0], Uses = [SA0, LC0] in {
|
|
def ENDLOOP0 : Endloop<(outs), (ins brtarget:$offset),
|
|
":endloop0",
|
|
[]>;
|
|
}
|
|
|
|
// Support for generating global address.
|
|
// Taken from X86InstrInfo.td.
|
|
def SDTHexagonCONST32 : SDTypeProfile<1, 1, [
|
|
SDTCisVT<0, i32>,
|
|
SDTCisVT<1, i32>,
|
|
SDTCisPtrTy<0>]>;
|
|
def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>;
|
|
def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
|
|
|
|
// HI/LO Instructions
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def LO : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
|
|
"$dst.l = #LO($global)",
|
|
[]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def HI : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global),
|
|
"$dst.h = #HI($global)",
|
|
[]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def LOi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
|
|
"$dst.l = #LO($imm_value)",
|
|
[]>;
|
|
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def HIi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value),
|
|
"$dst.h = #HI($imm_value)",
|
|
[]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def LO_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
|
|
"$dst.l = #LO($jt)",
|
|
[]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def HI_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt),
|
|
"$dst.h = #HI($jt)",
|
|
[]>;
|
|
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in
|
|
def LO_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
|
|
"$dst.l = #LO($label)",
|
|
[]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 , neverHasSideEffects = 1 in
|
|
def HI_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label),
|
|
"$dst.h = #HI($label)",
|
|
[]>;
|
|
|
|
// This pattern is incorrect. When we add small data, we should change
|
|
// this pattern to use memw(#foo).
|
|
// This is for sdata.
|
|
let isMoveImm = 1 in
|
|
def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
|
|
"$dst = CONST32(#$global)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
|
|
|
|
// This is for non-sdata.
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
|
|
"$dst = CONST32(#$global)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(HexagonCONST32 tglobaladdr:$global))]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins jumptablebase:$jt),
|
|
"$dst = CONST32(#$jt)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(HexagonCONST32 tjumptable:$jt))]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global),
|
|
"$dst = CONST32(#$global)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(HexagonCONST32_GP tglobaladdr:$global))]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global),
|
|
"$dst = CONST32(#$global)",
|
|
[(set (i32 IntRegs:$dst), imm:$global) ]>;
|
|
|
|
// Map BlockAddress lowering to CONST32_Int_Real
|
|
def : Pat<(HexagonCONST32_GP tblockaddress:$addr),
|
|
(CONST32_Int_Real tblockaddress:$addr)>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label),
|
|
"$dst = CONST32($label)",
|
|
[(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>;
|
|
|
|
let isReMaterializable = 1, isMoveImm = 1 in
|
|
def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins i64imm:$global),
|
|
"$dst = CONST64(#$global)",
|
|
[(set (i64 DoubleRegs:$dst), imm:$global) ]>;
|
|
|
|
def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins),
|
|
"$dst = xor($dst, $dst)",
|
|
[(set (i1 PredRegs:$dst), 0)]>;
|
|
|
|
def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
|
|
"$dst = mpy($src1, $src2)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))),
|
|
(i64 (sext (i32 IntRegs:$src2))))),
|
|
(i32 32)))))]>;
|
|
|
|
// Pseudo instructions.
|
|
def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
|
|
|
|
def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
|
|
SDTCisVT<1, i32> ]>;
|
|
|
|
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
|
|
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
|
|
|
|
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
|
|
[SDNPHasChain, SDNPOutGlue]>;
|
|
|
|
def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
|
|
|
|
def call : SDNode<"HexagonISD::CALL", SDT_SPCall,
|
|
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
|
|
|
|
// For tailcalls a HexagonTCRet SDNode has 3 SDNode Properties - a chain,
|
|
// Optional Flag and Variable Arguments.
|
|
// Its 1 Operand has pointer type.
|
|
def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall,
|
|
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
|
|
|
|
let Defs = [R29, R30], Uses = [R31, R30, R29] in {
|
|
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
|
|
"Should never be emitted",
|
|
[(callseq_start timm:$amt)]>;
|
|
}
|
|
|
|
let Defs = [R29, R30, R31], Uses = [R29] in {
|
|
def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
|
|
"Should never be emitted",
|
|
[(callseq_end timm:$amt1, timm:$amt2)]>;
|
|
}
|
|
// Call subroutine.
|
|
let isCall = 1, neverHasSideEffects = 1,
|
|
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
|
|
R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
|
|
def CALL : JInst<(outs), (ins calltarget:$dst),
|
|
"call $dst", []>;
|
|
}
|
|
|
|
// Call subroutine from register.
|
|
let isCall = 1, neverHasSideEffects = 1,
|
|
Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
|
|
R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
|
|
def CALLR : JRInst<(outs), (ins IntRegs:$dst),
|
|
"callr $dst",
|
|
[]>;
|
|
}
|
|
|
|
// Tail Calls.
|
|
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
|
|
def TCRETURNtg : JInst<(outs), (ins calltarget:$dst),
|
|
"jump $dst // TAILCALL", []>;
|
|
}
|
|
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
|
|
def TCRETURNtext : JInst<(outs), (ins calltarget:$dst),
|
|
"jump $dst // TAILCALL", []>;
|
|
}
|
|
|
|
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in {
|
|
def TCRETURNR : JInst<(outs), (ins IntRegs:$dst),
|
|
"jumpr $dst // TAILCALL", []>;
|
|
}
|
|
// Map call instruction.
|
|
def : Pat<(call (i32 IntRegs:$dst)),
|
|
(CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>;
|
|
def : Pat<(call tglobaladdr:$dst),
|
|
(CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>;
|
|
def : Pat<(call texternalsym:$dst),
|
|
(CALL texternalsym:$dst)>, Requires<[HasV2TOnly]>;
|
|
//Tail calls.
|
|
def : Pat<(HexagonTCRet tglobaladdr:$dst),
|
|
(TCRETURNtg tglobaladdr:$dst)>;
|
|
def : Pat<(HexagonTCRet texternalsym:$dst),
|
|
(TCRETURNtext texternalsym:$dst)>;
|
|
def : Pat<(HexagonTCRet (i32 IntRegs:$dst)),
|
|
(TCRETURNR (i32 IntRegs:$dst))>;
|
|
|
|
// Atomic load and store support
|
|
// 8 bit atomic load
|
|
def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(i32 (LDub_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_8 ADDRriS11_0:$src1),
|
|
(i32 (LDriub ADDRriS11_0:$src1))>;
|
|
|
|
def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)),
|
|
(i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>;
|
|
|
|
|
|
|
|
// 16 bit atomic load
|
|
def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(i32 (LDuh_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_16 ADDRriS11_1:$src1),
|
|
(i32 (LDriuh ADDRriS11_1:$src1))>;
|
|
|
|
def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)),
|
|
(i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>;
|
|
|
|
|
|
|
|
// 32 bit atomic load
|
|
def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(i32 (LDw_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_32 ADDRriS11_2:$src1),
|
|
(i32 (LDriw ADDRriS11_2:$src1))>;
|
|
|
|
def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)),
|
|
(i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>;
|
|
|
|
|
|
// 64 bit atomic load
|
|
def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(i64 (LDd_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_load_64 ADDRriS11_3:$src1),
|
|
(i64 (LDrid ADDRriS11_3:$src1))>;
|
|
|
|
def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)),
|
|
(i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>;
|
|
|
|
|
|
// 64 bit atomic store
|
|
def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
|
|
(i64 DoubleRegs:$src1)),
|
|
(STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset),
|
|
(i64 DoubleRegs:$src1)),
|
|
(STrid_GP tglobaladdr:$global, u16ImmPred:$offset,
|
|
(i64 DoubleRegs:$src1))>, Requires<[NoV4T]>;
|
|
|
|
// 8 bit atomic store
|
|
def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global),
|
|
(i32 IntRegs:$src1)),
|
|
(STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset),
|
|
(i32 IntRegs:$src1)),
|
|
(STrib_GP tglobaladdr:$global, u16ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>, Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)),
|
|
(STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset),
|
|
(i32 IntRegs:$src1)),
|
|
(STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
|
|
// 16 bit atomic store
|
|
def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
|
|
(i32 IntRegs:$src1)),
|
|
(STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset),
|
|
(i32 IntRegs:$src1)),
|
|
(STrih_GP tglobaladdr:$global, u16ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>, Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)),
|
|
(STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(atomic_store_16 (i32 IntRegs:$src1),
|
|
(add (i32 IntRegs:$src2), s11_1ImmPred:$offset)),
|
|
(STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
|
|
// 32 bit atomic store
|
|
def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
|
|
(i32 IntRegs:$src1)),
|
|
(STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset),
|
|
(i32 IntRegs:$src1)),
|
|
(STriw_GP tglobaladdr:$global, u16ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)),
|
|
(STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>;
|
|
|
|
def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset),
|
|
(i32 IntRegs:$src1)),
|
|
(STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset,
|
|
(i32 IntRegs:$src1))>;
|
|
|
|
|
|
|
|
|
|
def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)),
|
|
(STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>;
|
|
|
|
def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset),
|
|
(i64 DoubleRegs:$src1)),
|
|
(STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset,
|
|
(i64 DoubleRegs:$src1))>;
|
|
|
|
// Map from r0 = and(r1, 65535) to r0 = zxth(r1)
|
|
def : Pat <(and (i32 IntRegs:$src1), 65535),
|
|
(ZXTH (i32 IntRegs:$src1))>;
|
|
|
|
// Map from r0 = and(r1, 255) to r0 = zxtb(r1).
|
|
def : Pat <(and (i32 IntRegs:$src1), 255),
|
|
(ZXTB (i32 IntRegs:$src1))>;
|
|
|
|
// Map Add(p1, true) to p1 = not(p1).
|
|
// Add(p1, false) should never be produced,
|
|
// if it does, it got to be mapped to NOOP.
|
|
def : Pat <(add (i1 PredRegs:$src1), -1),
|
|
(NOT_p (i1 PredRegs:$src1))>;
|
|
|
|
// Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) =>
|
|
// p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1).
|
|
def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i32 IntRegs:$src3),
|
|
(i32 IntRegs:$src4)),
|
|
(i32 (TFR_condset_rr (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
|
|
(i32 IntRegs:$src4), (i32 IntRegs:$src3)))>,
|
|
Requires<[HasV2TOnly]>;
|
|
|
|
// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
|
|
def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3),
|
|
(i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3,
|
|
s8ImmPred:$src2))>;
|
|
|
|
// Map from p0 = pnot(p0); r0 = select(p0, #i, r1)
|
|
// => r0 = TFR_condset_ri(p0, r1, #i)
|
|
def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2,
|
|
(i32 IntRegs:$src3)),
|
|
(i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3),
|
|
s12ImmPred:$src2))>;
|
|
|
|
// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i)
|
|
// => r0 = TFR_condset_ir(p0, #i, r1)
|
|
def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, s12ImmPred:$src3),
|
|
(i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3,
|
|
(i32 IntRegs:$src2)))>;
|
|
|
|
// Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump.
|
|
def : Pat <(brcond (not PredRegs:$src1), bb:$offset),
|
|
(JMP_cNot (i1 PredRegs:$src1), bb:$offset)>;
|
|
|
|
// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2).
|
|
def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)),
|
|
(i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
|
|
|
|
// Map from store(globaladdress + x) -> memd(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(store (i64 DoubleRegs:$src1),
|
|
(add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(STrid_GP tglobaladdr:$global, u16ImmPred:$offset,
|
|
(i64 DoubleRegs:$src1))>, Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress) -> memd(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(store (i64 DoubleRegs:$src1),
|
|
(HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress + x) -> memw(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(store (i32 IntRegs:$src1),
|
|
(add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress) -> memw(#foo + 0).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>;
|
|
|
|
// Map from store(globaladdress) -> memw(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress + x) -> memh(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(truncstorei16 (i32 IntRegs:$src1),
|
|
(add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(STrih_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress) -> memh(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(truncstorei16 (i32 IntRegs:$src1),
|
|
(HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress + x) -> memb(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(truncstorei8 (i32 IntRegs:$src1),
|
|
(add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset)),
|
|
(STrib_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from store(globaladdress) -> memb(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(truncstorei8 (i32 IntRegs:$src1),
|
|
(HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memw(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memw(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDw_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memd(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memw(#foo + 0).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i64 (LDd_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd.
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i1 (TFR_PdRs (i32 (LDb_GP tglobaladdr:$global))))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memh(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memh(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDrih_GP tglobaladdr:$global, 0))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memuh(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memuh(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDriuh_GP tglobaladdr:$global, 0))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memh(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDh_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memuh(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDuh_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memb(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memb(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress + x) -> memub(#foo + x).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
|
|
u16ImmPred:$offset))),
|
|
(i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memb(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDb_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memb(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDb_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from load(globaladdress) -> memub(#foo).
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDub_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// When the Interprocedural Global Variable optimizer realizes that a
|
|
// certain global variable takes only two constant values, it shrinks the
|
|
// global to a boolean. Catch those loads here in the following 3 patterns.
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDb_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDb_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 100 in
|
|
def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
|
|
(i32 (LDub_GP tglobaladdr:$global))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned.
|
|
def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)),
|
|
(i32 (AND_rr (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>;
|
|
|
|
// Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo).
|
|
def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)),
|
|
(i64 (SXTW (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>;
|
|
|
|
// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)).
|
|
def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)),
|
|
(i64 (SXTW (i32 (SXTH (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
|
|
subreg_loreg))))))>;
|
|
|
|
// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)).
|
|
def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)),
|
|
(i64 (SXTW (i32 (SXTB (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
|
|
subreg_loreg))))))>;
|
|
|
|
// We want to prevent emitting pnot's as much as possible.
|
|
// Map brcond with an unsupported setcc to a JMP_cNot.
|
|
def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
|
|
bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset),
|
|
(JMP_cNot (i1 PredRegs:$src1), bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset),
|
|
(JMP_c (i1 PredRegs:$src1), bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2), bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
bb:$offset),
|
|
(JMP_c (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)),
|
|
bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)),
|
|
bb:$offset)>;
|
|
|
|
def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
bb:$offset),
|
|
(JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
|
|
bb:$offset)>;
|
|
|
|
// Map from a 64-bit select to an emulated 64-bit mux.
|
|
// Hexagon does not support 64-bit MUXes; so emulate with combines.
|
|
def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src3)),
|
|
(i64 (COMBINE_rr (i32 (MUX_rr (i1 PredRegs:$src1),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
|
|
subreg_hireg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
|
|
subreg_hireg)))),
|
|
(i32 (MUX_rr (i1 PredRegs:$src1),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
|
|
subreg_loreg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3),
|
|
subreg_loreg))))))>;
|
|
|
|
// Map from a 1-bit select to logical ops.
|
|
// From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3).
|
|
def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2),
|
|
(i1 PredRegs:$src3)),
|
|
(OR_pp (AND_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)),
|
|
(AND_pp (NOT_p (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>;
|
|
|
|
// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs.
|
|
def : Pat<(i1 (load ADDRriS11_2:$addr)),
|
|
(i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>;
|
|
|
|
// Map for truncating from 64 immediates to 32 bit immediates.
|
|
def : Pat<(i32 (trunc (i64 DoubleRegs:$src))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>;
|
|
|
|
// Map for truncating from i64 immediates to i1 bit immediates.
|
|
def : Pat<(i1 (trunc (i64 DoubleRegs:$src))),
|
|
(i1 (TFR_PdRs (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
|
|
subreg_loreg))))>;
|
|
|
|
// Map memb(Rs) = Rdd -> memb(Rs) = Rt.
|
|
def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
|
|
(STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
|
|
subreg_loreg)))>;
|
|
|
|
// Map memh(Rs) = Rdd -> memh(Rs) = Rt.
|
|
def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
|
|
(STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
|
|
subreg_loreg)))>;
|
|
// Map memw(Rs) = Rdd -> memw(Rs) = Rt
|
|
def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
|
|
(STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
|
|
subreg_loreg)))>;
|
|
|
|
// Map memw(Rs) = Rdd -> memw(Rs) = Rt.
|
|
def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr),
|
|
(STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src),
|
|
subreg_loreg)))>;
|
|
|
|
// Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0.
|
|
def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
|
|
(STrib ADDRriS11_2:$addr, (TFRI 1))>;
|
|
|
|
let AddedComplexity = 100 in
|
|
// Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1;
|
|
// memw(#foo) = r0
|
|
def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
|
|
(STb_GP tglobaladdr:$global, (TFRI 1))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0.
|
|
def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
|
|
(STrib ADDRriS11_2:$addr, (TFRI 1))>;
|
|
|
|
// Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt.
|
|
def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr),
|
|
(STrib ADDRriS11_2:$addr, (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0)) )>;
|
|
|
|
// Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs).
|
|
// Hexagon_TODO: We can probably use combine but that will cost 2 instructions.
|
|
// Better way to do this?
|
|
def : Pat<(i64 (anyext (i32 IntRegs:$src1))),
|
|
(i64 (SXTW (i32 IntRegs:$src1)))>;
|
|
|
|
// Map cmple -> cmpgt.
|
|
// rs <= rt -> !(rs > rt).
|
|
def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)),
|
|
(i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>;
|
|
|
|
// rs <= rt -> !(rs > rt).
|
|
def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (NOT_p (CMPGTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
|
|
|
|
// Rss <= Rtt -> !(Rss > Rtt).
|
|
def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (NOT_p (CMPGT64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
|
|
|
|
// Map cmpne -> cmpeq.
|
|
// Hexagon_TODO: We should improve on this.
|
|
// rs != rt -> !(rs == rt).
|
|
def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)),
|
|
(i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>;
|
|
|
|
// Map cmpne(Rs) -> !cmpeqe(Rs).
|
|
// rs != rt -> !(rs == rt).
|
|
def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (NOT_p (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>;
|
|
|
|
// Convert setne back to xor for hexagon since we compute w/ pred registers.
|
|
def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))),
|
|
(i1 (XOR_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>;
|
|
|
|
// Map cmpne(Rss) -> !cmpew(Rss).
|
|
// rs != rt -> !(rs == rt).
|
|
def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (NOT_p (i1 (CMPEHexagon4rr (i64 DoubleRegs:$src1),
|
|
(i64 DoubleRegs:$src2)))))>;
|
|
|
|
// Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt).
|
|
// rs >= rt -> !(rt > rs).
|
|
def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>;
|
|
|
|
def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)),
|
|
(i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>;
|
|
|
|
// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
|
|
// rss >= rtt -> !(rtt > rss).
|
|
def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (NOT_p (i1 (CMPGT64rr (i64 DoubleRegs:$src2),
|
|
(i64 DoubleRegs:$src1)))))>;
|
|
|
|
// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
|
|
// rs < rt -> !(rs >= rt).
|
|
def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)),
|
|
(i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>;
|
|
|
|
// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs).
|
|
// rs < rt -> rt > rs.
|
|
// We can let assembler map it, or we can do in the compiler itself.
|
|
def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
|
|
|
|
// Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss).
|
|
// rss < rtt -> (rtt > rss).
|
|
def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (CMPGT64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
|
|
|
|
// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs)
|
|
// rs < rt -> rt > rs.
|
|
// We can let assembler map it, or we can do in the compiler itself.
|
|
def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>;
|
|
|
|
// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss).
|
|
// rs < rt -> rt > rs.
|
|
def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>;
|
|
|
|
// Generate cmpgeu(Rs, #u8)
|
|
def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)),
|
|
(i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>;
|
|
|
|
// Generate cmpgtu(Rs, #u9)
|
|
def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)),
|
|
(i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>;
|
|
|
|
// Map from Rs >= Rt -> !(Rt > Rs).
|
|
// rs >= rt -> !(rt > rs).
|
|
def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>;
|
|
|
|
// Map from Rs >= Rt -> !(Rt > Rs).
|
|
// rs >= rt -> !(rt > rs).
|
|
def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>;
|
|
|
|
// Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs).
|
|
// Map from (Rs <= Rt) -> !(Rs > Rt).
|
|
def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))),
|
|
(i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>;
|
|
|
|
// Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1).
|
|
// Map from (Rs <= Rt) -> !(Rs > Rt).
|
|
def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))),
|
|
(i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>;
|
|
|
|
// Sign extends.
|
|
// i1 -> i32
|
|
def : Pat <(i32 (sext (i1 PredRegs:$src1))),
|
|
(i32 (MUX_ii (i1 PredRegs:$src1), -1, 0))>;
|
|
|
|
// i1 -> i64
|
|
def : Pat <(i64 (sext (i1 PredRegs:$src1))),
|
|
(i64 (COMBINE_rr (TFRI -1), (MUX_ii (i1 PredRegs:$src1), -1, 0)))>;
|
|
|
|
// Convert sign-extended load back to load and sign extend.
|
|
// i8 -> i64
|
|
def: Pat <(i64 (sextloadi8 ADDRriS11_0:$src1)),
|
|
(i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
|
|
|
|
// Convert any-extended load back to load and sign extend.
|
|
// i8 -> i64
|
|
def: Pat <(i64 (extloadi8 ADDRriS11_0:$src1)),
|
|
(i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
|
|
|
|
// Convert sign-extended load back to load and sign extend.
|
|
// i16 -> i64
|
|
def: Pat <(i64 (sextloadi16 ADDRriS11_1:$src1)),
|
|
(i64 (SXTW (LDrih ADDRriS11_1:$src1)))>;
|
|
|
|
// Convert sign-extended load back to load and sign extend.
|
|
// i32 -> i64
|
|
def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)),
|
|
(i64 (SXTW (LDriw ADDRriS11_2:$src1)))>;
|
|
|
|
|
|
// Zero extends.
|
|
// i1 -> i32
|
|
def : Pat <(i32 (zext (i1 PredRegs:$src1))),
|
|
(i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
|
|
|
|
// i1 -> i64
|
|
def : Pat <(i64 (zext (i1 PredRegs:$src1))),
|
|
(i64 (COMBINE_rr (TFRI 0), (MUX_ii (i1 PredRegs:$src1), 1, 0)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// i32 -> i64
|
|
def : Pat <(i64 (zext (i32 IntRegs:$src1))),
|
|
(i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// i8 -> i64
|
|
def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1),
|
|
s11_0ExtPred:$offset))),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriub_indexed IntRegs:$src1,
|
|
s11_0ExtPred:$offset)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// i1 -> i64
|
|
def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1),
|
|
s11_0ExtPred:$offset))),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriub_indexed IntRegs:$src1,
|
|
s11_0ExtPred:$offset)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// i16 -> i64
|
|
def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriuh ADDRriS11_1:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def: Pat <(i64 (zextloadi16 (add (i32 IntRegs:$src1),
|
|
s11_1ExtPred:$offset))),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriuh_indexed IntRegs:$src1,
|
|
s11_1ExtPred:$offset)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// i32 -> i64
|
|
def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)),
|
|
(i32 (LDriw ADDRriS11_0:$src1))>;
|
|
|
|
// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
|
|
def : Pat <(i32 (zext (i1 PredRegs:$src1))),
|
|
(i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
|
|
|
|
// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
|
|
def : Pat <(i32 (anyext (i1 PredRegs:$src1))),
|
|
(i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>;
|
|
|
|
// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0))
|
|
def : Pat <(i64 (anyext (i1 PredRegs:$src1))),
|
|
(i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>;
|
|
|
|
|
|
// Any extended 64-bit load.
|
|
// anyext i32 -> i64
|
|
def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// When there is an offset we should prefer the pattern below over the pattern above.
|
|
// The complexity of the above is 13 (gleaned from HexagonGenDAGIsel.inc)
|
|
// So this complexity below is comfortably higher to allow for choosing the below.
|
|
// If this is not done then we generate addresses such as
|
|
// ********************************************
|
|
// r1 = add (r0, #4)
|
|
// r1 = memw(r1 + #0)
|
|
// instead of
|
|
// r1 = memw(r0 + #4)
|
|
// ********************************************
|
|
let AddedComplexity = 100 in
|
|
def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDriw_indexed IntRegs:$src1,
|
|
s11_2ExtPred:$offset)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// anyext i16 -> i64.
|
|
def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
let AddedComplexity = 20 in
|
|
def: Pat <(i64 (extloadi16 (add (i32 IntRegs:$src1),
|
|
s11_1ExtPred:$offset))),
|
|
(i64 (COMBINE_rr (TFRI 0), (LDrih_indexed IntRegs:$src1,
|
|
s11_1ExtPred:$offset)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs).
|
|
def : Pat<(i64 (zext (i32 IntRegs:$src1))),
|
|
(i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>,
|
|
Requires<[NoV4T]>;
|
|
|
|
// Multiply 64-bit unsigned and use upper result.
|
|
def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
|
|
(i64
|
|
(MPYU64_acc
|
|
(i64
|
|
(COMBINE_rr
|
|
(TFRI 0),
|
|
(i32
|
|
(EXTRACT_SUBREG
|
|
(i64
|
|
(LSRd_ri
|
|
(i64
|
|
(MPYU64_acc
|
|
(i64
|
|
(MPYU64_acc
|
|
(i64
|
|
(COMBINE_rr (TFRI 0),
|
|
(i32
|
|
(EXTRACT_SUBREG
|
|
(i64
|
|
(LSRd_ri
|
|
(i64
|
|
(MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
|
|
subreg_loreg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
|
|
subreg_loreg)))), 32)),
|
|
subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
|
|
32)), subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
|
|
|
|
// Multiply 64-bit signed and use upper result.
|
|
def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)),
|
|
(i64
|
|
(MPY64_acc
|
|
(i64
|
|
(COMBINE_rr (TFRI 0),
|
|
(i32
|
|
(EXTRACT_SUBREG
|
|
(i64
|
|
(LSRd_ri
|
|
(i64
|
|
(MPY64_acc
|
|
(i64
|
|
(MPY64_acc
|
|
(i64
|
|
(COMBINE_rr (TFRI 0),
|
|
(i32
|
|
(EXTRACT_SUBREG
|
|
(i64
|
|
(LSRd_ri
|
|
(i64
|
|
(MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1),
|
|
subreg_loreg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2),
|
|
subreg_loreg)))), 32)),
|
|
subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))),
|
|
32)), subreg_loreg)))),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)),
|
|
(i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>;
|
|
|
|
// Hexagon specific ISD nodes.
|
|
//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>;
|
|
def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2,
|
|
[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
|
|
def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC",
|
|
SDTHexagonADJDYNALLOC>;
|
|
// Needed to tag these instructions for stack layout.
|
|
let usesCustomInserter = 1 in
|
|
def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1,
|
|
s16Imm:$src2),
|
|
"$dst = add($src1, #$src2)",
|
|
[(set (i32 IntRegs:$dst),
|
|
(Hexagon_ADJDYNALLOC (i32 IntRegs:$src1),
|
|
s16ImmPred:$src2))]>;
|
|
|
|
def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>;
|
|
def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>;
|
|
def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1),
|
|
"$dst = $src1",
|
|
[(set (i32 IntRegs:$dst),
|
|
(Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>;
|
|
|
|
let AddedComplexity = 100 in
|
|
def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)),
|
|
(COPY (i32 IntRegs:$src1))>;
|
|
|
|
def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
|
|
def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>;
|
|
|
|
let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
|
|
def BR_JT : JRInst<(outs), (ins IntRegs:$src),
|
|
"jumpr $src",
|
|
[(HexagonBR_JT (i32 IntRegs:$src))]>;
|
|
|
|
let isBranch=1, isIndirectBranch=1, isTerminator=1 in
|
|
def BRIND : JRInst<(outs), (ins IntRegs:$src),
|
|
"jumpr $src",
|
|
[(brind (i32 IntRegs:$src))]>;
|
|
|
|
def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
|
|
|
|
def : Pat<(HexagonWrapperJT tjumptable:$dst),
|
|
(i32 (CONST32_set_jt tjumptable:$dst))>;
|
|
|
|
// XTYPE/SHIFT
|
|
|
|
// Multi-class for logical operators :
|
|
// Shift by immediate/register and accumulate/logical
|
|
multiclass xtype_imm<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
|
|
def _ri : SInst_acc<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2, u5Imm:$src3),
|
|
!strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
|
|
[(set (i32 IntRegs:$dst),
|
|
(OpNode2 (i32 IntRegs:$src1),
|
|
(OpNode1 (i32 IntRegs:$src2),
|
|
u5ImmPred:$src3)))],
|
|
"$src1 = $dst">;
|
|
|
|
def d_ri : SInst_acc<(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1, DoubleRegs:$src2, u6Imm:$src3),
|
|
!strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")),
|
|
[(set (i64 DoubleRegs:$dst), (OpNode2 (i64 DoubleRegs:$src1),
|
|
(OpNode1 (i64 DoubleRegs:$src2), u6ImmPred:$src3)))],
|
|
"$src1 = $dst">;
|
|
}
|
|
|
|
// Multi-class for logical operators :
|
|
// Shift by register and accumulate/logical (32/64 bits)
|
|
multiclass xtype_reg<string OpcStr, SDNode OpNode1, SDNode OpNode2> {
|
|
def _rr : SInst_acc<(outs IntRegs:$dst),
|
|
(ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
|
|
!strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
|
|
[(set (i32 IntRegs:$dst),
|
|
(OpNode2 (i32 IntRegs:$src1),
|
|
(OpNode1 (i32 IntRegs:$src2),
|
|
(i32 IntRegs:$src3))))],
|
|
"$src1 = $dst">;
|
|
|
|
def d_rr : SInst_acc<(outs DoubleRegs:$dst),
|
|
(ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
|
|
!strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")),
|
|
[(set (i64 DoubleRegs:$dst),
|
|
(OpNode2 (i64 DoubleRegs:$src1),
|
|
(OpNode1 (i64 DoubleRegs:$src2),
|
|
(i32 IntRegs:$src3))))],
|
|
"$src1 = $dst">;
|
|
|
|
}
|
|
|
|
multiclass basic_xtype_imm<string OpcStr, SDNode OpNode> {
|
|
let AddedComplexity = 100 in
|
|
defm _ADD : xtype_imm< !strconcat("+= ", OpcStr), OpNode, add>;
|
|
defm _SUB : xtype_imm< !strconcat("-= ", OpcStr), OpNode, sub>;
|
|
defm _AND : xtype_imm< !strconcat("&= ", OpcStr), OpNode, and>;
|
|
defm _OR : xtype_imm< !strconcat("|= ", OpcStr), OpNode, or>;
|
|
}
|
|
|
|
multiclass basic_xtype_reg<string OpcStr, SDNode OpNode> {
|
|
let AddedComplexity = 100 in
|
|
defm _ADD : xtype_reg< !strconcat("+= ", OpcStr), OpNode, add>;
|
|
defm _SUB : xtype_reg< !strconcat("-= ", OpcStr), OpNode, sub>;
|
|
defm _AND : xtype_reg< !strconcat("&= ", OpcStr), OpNode, and>;
|
|
defm _OR : xtype_reg< !strconcat("|= ", OpcStr), OpNode, or>;
|
|
}
|
|
|
|
multiclass xtype_xor_imm<string OpcStr, SDNode OpNode> {
|
|
let AddedComplexity = 100 in
|
|
defm _XOR : xtype_imm< !strconcat("^= ", OpcStr), OpNode, xor>;
|
|
}
|
|
|
|
defm ASL : basic_xtype_imm<"asl", shl>, basic_xtype_reg<"asl", shl>,
|
|
xtype_xor_imm<"asl", shl>;
|
|
|
|
defm LSR : basic_xtype_imm<"lsr", srl>, basic_xtype_reg<"lsr", srl>,
|
|
xtype_xor_imm<"lsr", srl>;
|
|
|
|
defm ASR : basic_xtype_imm<"asr", sra>, basic_xtype_reg<"asr", sra>;
|
|
defm LSL : basic_xtype_reg<"lsl", shl>;
|
|
|
|
// Change the sign of the immediate for Rd=-mpyi(Rs,#u8)
|
|
def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)),
|
|
(i32 (MPYI_rin (i32 IntRegs:$src1), u8ImmPred:$src2))>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V3 Instructions +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
include "HexagonInstrInfoV3.td"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V3 Instructions -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V4 Instructions +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
include "HexagonInstrInfoV4.td"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V4 Instructions -
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V5 Instructions +
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
include "HexagonInstrInfoV5.td"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// V5 Instructions -
|
|
//===----------------------------------------------------------------------===//
|