llvm-6502/lib/Target/ARM/ARMInstrInfo.td
Jim Grosbach cf121c35c4 ARM assembly parsing and encoding for BLX (immediate).
Add parsing support for BLX (immediate). Since the register operand version is
predicated and the label operand version is not, we have to use some special
handling to get the operand list right for matching.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@136406 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-28 21:57:55 +00:00

4432 lines
167 KiB
TableGen

//===- ARMInstrInfo.td - Target Description for ARM Target -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the ARM instructions in TableGen format.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// ARM specific DAG Nodes.
//
// Type profiles.
def SDT_ARMCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
def SDT_ARMCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>;
def SDT_ARMSaveCallPC : SDTypeProfile<0, 1, []>;
def SDT_ARMcall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SDT_ARMCMov : SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisVT<3, i32>]>;
def SDT_ARMBrcond : SDTypeProfile<0, 2,
[SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
def SDT_ARMBrJT : SDTypeProfile<0, 3,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>]>;
def SDT_ARMBr2JT : SDTypeProfile<0, 4,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDT_ARMBCC_i64 : SDTypeProfile<0, 6,
[SDTCisVT<0, i32>,
SDTCisVT<1, i32>, SDTCisVT<2, i32>,
SDTCisVT<3, i32>, SDTCisVT<4, i32>,
SDTCisVT<5, OtherVT>]>;
def SDT_ARMAnd : SDTypeProfile<1, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>]>;
def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
SDTCisPtrTy<1>, SDTCisVT<2, i32>]>;
def SDT_ARMThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
def SDT_ARMEH_SJLJ_Setjmp : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisPtrTy<1>,
SDTCisInt<2>]>;
def SDT_ARMEH_SJLJ_Longjmp: SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
def SDT_ARMEH_SJLJ_DispatchSetup: SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMMEMBARRIER : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMPREFETCH : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisSameAs<1, 2>,
SDTCisInt<1>]>;
def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDT_ARMBFI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperDYN : SDNode<"ARMISD::WrapperDYN", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
[SDNPHasChain, SDNPOutGlue]>;
def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def ARMcall : SDNode<"ARMISD::CALL", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_pred : SDNode<"ARMISD::CALL_PRED", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_nolink : SDNode<"ARMISD::CALL_NOLINK", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMretflag : SDNode<"ARMISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue]>;
def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
[SDNPInGlue]>;
def ARMbrcond : SDNode<"ARMISD::BRCOND", SDT_ARMBrcond,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
def ARMbrjt : SDNode<"ARMISD::BR_JT", SDT_ARMBrJT,
[SDNPHasChain]>;
def ARMbr2jt : SDNode<"ARMISD::BR2_JT", SDT_ARMBr2JT,
[SDNPHasChain]>;
def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64,
[SDNPHasChain]>;
def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
[SDNPOutGlue]>;
def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp,
[SDNPOutGlue, SDNPCommutative]>;
def ARMpic_add : SDNode<"ARMISD::PIC_ADD", SDT_ARMPICAdd>;
def ARMsrl_flag : SDNode<"ARMISD::SRL_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
def ARMsra_flag : SDNode<"ARMISD::SRA_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
def ARMrrx : SDNode<"ARMISD::RRX" , SDTIntUnaryOp, [SDNPInGlue ]>;
def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>;
def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP",
SDT_ARMEH_SJLJ_Setjmp, [SDNPHasChain]>;
def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>;
def ARMeh_sjlj_dispatchsetup: SDNode<"ARMISD::EH_SJLJ_DISPATCHSETUP",
SDT_ARMEH_SJLJ_DispatchSetup, [SDNPHasChain]>;
def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER,
[SDNPHasChain]>;
def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER,
[SDNPHasChain]>;
def ARMPreload : SDNode<"ARMISD::PRELOAD", SDT_ARMPREFETCH,
[SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
//===----------------------------------------------------------------------===//
// ARM Instruction Predicate Definitions.
//
def HasV4T : Predicate<"Subtarget->hasV4TOps()">,
AssemblerPredicate<"HasV4TOps">;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
AssemblerPredicate<"HasV5TEOps">;
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
AssemblerPredicate<"HasV6Ops">;
def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
AssemblerPredicate<"HasV6T2Ops">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
def HasV7 : Predicate<"Subtarget->hasV7Ops()">,
AssemblerPredicate<"HasV7Ops">;
def NoVFP : Predicate<"!Subtarget->hasVFP2()">;
def HasVFP2 : Predicate<"Subtarget->hasVFP2()">,
AssemblerPredicate<"FeatureVFP2">;
def HasVFP3 : Predicate<"Subtarget->hasVFP3()">,
AssemblerPredicate<"FeatureVFP3">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16">;
def HasDivide : Predicate<"Subtarget->hasDivide()">,
AssemblerPredicate<"FeatureHWDiv">;
def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
AssemblerPredicate<"FeatureT2XtPk">;
def HasThumb2DSP : Predicate<"Subtarget->hasThumb2DSP()">,
AssemblerPredicate<"FeatureDSPThumb2">;
def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
AssemblerPredicate<"FeatureDB">;
def HasMP : Predicate<"Subtarget->hasMPExtension()">,
AssemblerPredicate<"FeatureMP">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
def IsThumb : Predicate<"Subtarget->isThumb()">,
AssemblerPredicate<"ModeThumb">;
def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
def IsThumb2 : Predicate<"Subtarget->isThumb2()">,
AssemblerPredicate<"ModeThumb,FeatureThumb2">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb">;
def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
def IsNotDarwin : Predicate<"!Subtarget->isTargetDarwin()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
class RegConstraint<string C> {
string Constraints = C;
}
//===----------------------------------------------------------------------===//
// ARM specific transformation functions and pattern fragments.
//
// so_imm_neg_XFORM - Return a so_imm value packed into the format described for
// so_imm_neg def below.
def so_imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
// so_imm_not_XFORM - Return a so_imm value packed into the format described for
// so_imm_not def below.
def so_imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
/// imm1_15 predicate - True if the 32-bit immediate is in the range [1,15].
def imm1_15 : ImmLeaf<i32, [{
return (int32_t)Imm >= 1 && (int32_t)Imm < 16;
}]>;
/// imm16_31 predicate - True if the 32-bit immediate is in the range [16,31].
def imm16_31 : ImmLeaf<i32, [{
return (int32_t)Imm >= 16 && (int32_t)Imm < 32;
}]>;
def so_imm_neg :
PatLeaf<(imm), [{
return ARM_AM::getSOImmVal(-(uint32_t)N->getZExtValue()) != -1;
}], so_imm_neg_XFORM>;
def so_imm_not :
PatLeaf<(imm), [{
return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
}], so_imm_not_XFORM>;
// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
def sext_16_node : PatLeaf<(i32 GPR:$a), [{
return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
}]>;
/// Split a 32-bit immediate into two 16 bit parts.
def hi16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
}]>;
def lo16AllZero : PatLeaf<(i32 imm), [{
// Returns true if all low 16-bits are 0.
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
/// imm0_65535 - An immediate is in the range [0.65535].
def Imm0_65535AsmOperand: AsmOperandClass { let Name = "Imm0_65535"; }
def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 65536;
}]> {
let ParserMatchClass = Imm0_65535AsmOperand;
}
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
class UnOpFrag <dag res> : PatFrag<(ops node:$Src), res>;
/// adde and sube predicates - True based on whether the carry flag output
/// will be needed or not.
def adde_dead_carry :
PatFrag<(ops node:$LHS, node:$RHS), (adde node:$LHS, node:$RHS),
[{return !N->hasAnyUseOfValue(1);}]>;
def sube_dead_carry :
PatFrag<(ops node:$LHS, node:$RHS), (sube node:$LHS, node:$RHS),
[{return !N->hasAnyUseOfValue(1);}]>;
def adde_live_carry :
PatFrag<(ops node:$LHS, node:$RHS), (adde node:$LHS, node:$RHS),
[{return N->hasAnyUseOfValue(1);}]>;
def sube_live_carry :
PatFrag<(ops node:$LHS, node:$RHS), (sube node:$LHS, node:$RHS),
[{return N->hasAnyUseOfValue(1);}]>;
// An 'and' node with a single use.
def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'xor' node with a single use.
def xor_su : PatFrag<(ops node:$lhs, node:$rhs), (xor node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'fmul' node with a single use.
def fmul_su : PatFrag<(ops node:$lhs, node:$rhs), (fmul node:$lhs, node:$rhs),[{
return N->hasOneUse();
}]>;
// An 'fadd' node which checks for single non-hazardous use.
def fadd_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fadd node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
// An 'fsub' node which checks for single non-hazardous use.
def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
//===----------------------------------------------------------------------===//
// Operand Definitions.
//
// Branch target.
// FIXME: rename brtarget to t2_brtarget
def brtarget : Operand<OtherVT> {
let EncoderMethod = "getBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// FIXME: get rid of this one?
def uncondbrtarget : Operand<OtherVT> {
let EncoderMethod = "getUnconditionalBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Branch target for ARM. Handles conditional/unconditional
def br_target : Operand<OtherVT> {
let EncoderMethod = "getARMBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Call target.
// FIXME: rename bltarget to t2_bl_target?
def bltarget : Operand<i32> {
// Encoded the same as branch targets.
let EncoderMethod = "getBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Call target for ARM. Handles conditional/unconditional
// FIXME: rename bl_target to t2_bltarget?
def bl_target : Operand<i32> {
// Encoded the same as branch targets.
let EncoderMethod = "getARMBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// A list of registers separated by comma. Used by load/store multiple.
def RegListAsmOperand : AsmOperandClass { let Name = "RegList"; }
def reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = RegListAsmOperand;
let PrintMethod = "printRegisterList";
}
def DPRRegListAsmOperand : AsmOperandClass { let Name = "DPRRegList"; }
def dpr_reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = DPRRegListAsmOperand;
let PrintMethod = "printRegisterList";
}
def SPRRegListAsmOperand : AsmOperandClass { let Name = "SPRRegList"; }
def spr_reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = SPRRegListAsmOperand;
let PrintMethod = "printRegisterList";
}
// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
def cpinst_operand : Operand<i32> {
let PrintMethod = "printCPInstOperand";
}
// Local PC labels.
def pclabel : Operand<i32> {
let PrintMethod = "printPCLabel";
}
// ADR instruction labels.
def adrlabel : Operand<i32> {
let EncoderMethod = "getAdrLabelOpValue";
}
def neon_vcvt_imm32 : Operand<i32> {
let EncoderMethod = "getNEONVcvtImm32OpValue";
}
// rot_imm: An integer that encodes a rotate amount. Must be 8, 16, or 24.
def rot_imm_XFORM: SDNodeXForm<imm, [{
switch (N->getZExtValue()){
default: assert(0);
case 0: return CurDAG->getTargetConstant(0, MVT::i32);
case 8: return CurDAG->getTargetConstant(1, MVT::i32);
case 16: return CurDAG->getTargetConstant(2, MVT::i32);
case 24: return CurDAG->getTargetConstant(3, MVT::i32);
}
}]>;
def RotImmAsmOperand : AsmOperandClass {
let Name = "RotImm";
let ParserMethod = "parseRotImm";
}
def rot_imm : Operand<i32>, PatLeaf<(i32 imm), [{
int32_t v = N->getZExtValue();
return v == 8 || v == 16 || v == 24; }],
rot_imm_XFORM> {
let PrintMethod = "printRotImmOperand";
let ParserMatchClass = RotImmAsmOperand;
}
// shift_imm: An integer that encodes a shift amount and the type of shift
// (asr or lsl). The 6-bit immediate encodes as:
// {5} 0 ==> lsl
// 1 asr
// {4-0} imm5 shift amount.
// asr #32 encoded as imm5 == 0.
def ShifterImmAsmOperand : AsmOperandClass {
let Name = "ShifterImm";
let ParserMethod = "parseShifterImm";
}
def shift_imm : Operand<i32> {
let PrintMethod = "printShiftImmOperand";
let ParserMatchClass = ShifterImmAsmOperand;
}
// shifter_operand operands: so_reg_reg, so_reg_imm, and so_imm.
def ShiftedRegAsmOperand : AsmOperandClass { let Name = "RegShiftedReg"; }
def so_reg_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectRegShifterOperand",
[shl, srl, sra, rotr]> {
let EncoderMethod = "getSORegRegOpValue";
let PrintMethod = "printSORegRegOperand";
let ParserMatchClass = ShiftedRegAsmOperand;
let MIOperandInfo = (ops GPR, GPR, i32imm);
}
def ShiftedImmAsmOperand : AsmOperandClass { let Name = "RegShiftedImm"; }
def so_reg_imm : Operand<i32>, // reg imm
ComplexPattern<i32, 2, "SelectImmShifterOperand",
[shl, srl, sra, rotr]> {
let EncoderMethod = "getSORegImmOpValue";
let PrintMethod = "printSORegImmOperand";
let ParserMatchClass = ShiftedImmAsmOperand;
let MIOperandInfo = (ops GPR, i32imm);
}
// FIXME: Does this need to be distinct from so_reg?
def shift_so_reg_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectShiftRegShifterOperand",
[shl,srl,sra,rotr]> {
let EncoderMethod = "getSORegRegOpValue";
let PrintMethod = "printSORegRegOperand";
let MIOperandInfo = (ops GPR, GPR, i32imm);
}
// FIXME: Does this need to be distinct from so_reg?
def shift_so_reg_imm : Operand<i32>, // reg reg imm
ComplexPattern<i32, 2, "SelectShiftImmShifterOperand",
[shl,srl,sra,rotr]> {
let EncoderMethod = "getSORegImmOpValue";
let PrintMethod = "printSORegImmOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits.
def SOImmAsmOperand: AsmOperandClass { let Name = "ARMSOImm"; }
def so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
let EncoderMethod = "getSOImmOpValue";
let ParserMatchClass = SOImmAsmOperand;
}
// Break so_imm's up into two pieces. This handles immediates with up to 16
// bits set in them. This uses so_imm2part to match and so_imm2part_[12] to
// get the first/second pieces.
def so_imm2part : PatLeaf<(imm), [{
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
///
def arm_i32imm : PatLeaf<(imm), [{
if (Subtarget->hasV6T2Ops())
return true;
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
/// imm0_7 predicate - Immediate in the range [0,31].
def Imm0_7AsmOperand: AsmOperandClass { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 8;
}]> {
let ParserMatchClass = Imm0_7AsmOperand;
}
/// imm0_15 predicate - Immediate in the range [0,31].
def Imm0_15AsmOperand: AsmOperandClass { let Name = "Imm0_15"; }
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
let ParserMatchClass = Imm0_15AsmOperand;
}
/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
def Imm0_31AsmOperand: AsmOperandClass { let Name = "Imm0_31"; }
def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
let ParserMatchClass = Imm0_31AsmOperand;
}
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
// FIXME: This really needs a Thumb version separate from the ARM version.
// While the range is the same, and can thus use the same match class,
// the encoding is different so it should have a different encoder method.
def Imm0_65535ExprAsmOperand: AsmOperandClass { let Name = "Imm0_65535Expr"; }
def imm0_65535_expr : Operand<i32> {
let EncoderMethod = "getHiLo16ImmOpValue";
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
def Imm24bitAsmOperand: AsmOperandClass { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm <= 0xffffff;
}]> {
let ParserMatchClass = Imm24bitAsmOperand;
}
/// bf_inv_mask_imm predicate - An AND mask to clear an arbitrary width bitfield
/// e.g., 0xf000ffff
def BitfieldAsmOperand : AsmOperandClass {
let Name = "Bitfield";
let ParserMethod = "parseBitfield";
}
def bf_inv_mask_imm : Operand<i32>,
PatLeaf<(imm), [{
return ARM::isBitFieldInvertedMask(N->getZExtValue());
}] > {
let EncoderMethod = "getBitfieldInvertedMaskOpValue";
let PrintMethod = "printBitfieldInvMaskImmOperand";
let ParserMatchClass = BitfieldAsmOperand;
}
/// lsb_pos_imm - position of the lsb bit, used by BFI4p and t2BFI4p
def lsb_pos_imm : Operand<i32>, ImmLeaf<i32, [{
return isInt<5>(Imm);
}]>;
/// width_imm - number of bits to be copied, used by BFI4p and t2BFI4p
def width_imm : Operand<i32>, ImmLeaf<i32, [{
return Imm > 0 && Imm <= 32;
}] > {
let EncoderMethod = "getMsbOpValue";
}
def imm1_32_XFORM: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
}]>;
def Imm1_32AsmOperand: AsmOperandClass { let Name = "Imm1_32"; }
def imm1_32 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 32; }],
imm1_32_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_32AsmOperand;
}
def imm1_16_XFORM: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
}]>;
def Imm1_16AsmOperand: AsmOperandClass { let Name = "Imm1_16"; }
def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
imm1_16_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_16AsmOperand;
}
// Define ARM specific addressing modes.
// addrmode_imm12 := reg +/- imm12
//
def addrmode_imm12 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrModeImm12", []> {
// 12-bit immediate operand. Note that instructions using this encode
// #0 and #-0 differently. We flag #-0 as the magic value INT32_MIN. All other
// immediate values are as normal.
let EncoderMethod = "getAddrModeImm12OpValue";
let PrintMethod = "printAddrModeImm12Operand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
}
// ldst_so_reg := reg +/- reg shop imm
//
def ldst_so_reg : Operand<i32>,
ComplexPattern<i32, 3, "SelectLdStSOReg", []> {
let EncoderMethod = "getLdStSORegOpValue";
// FIXME: Simplify the printer
let PrintMethod = "printAddrMode2Operand";
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
// addrmode2 := reg +/- imm12
// := reg +/- reg shop imm
//
def MemMode2AsmOperand : AsmOperandClass {
let Name = "MemMode2";
let ParserMethod = "parseMemMode2Operand";
}
def addrmode2 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode2", []> {
let EncoderMethod = "getAddrMode2OpValue";
let PrintMethod = "printAddrMode2Operand";
let ParserMatchClass = MemMode2AsmOperand;
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def am2offset_reg : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode2OffsetReg",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
def am2offset_imm : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode2OffsetImm",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
// addrmode3 := reg +/- reg
// addrmode3 := reg +/- imm8
//
def MemMode3AsmOperand : AsmOperandClass {
let Name = "MemMode3";
let ParserMethod = "parseMemMode3Operand";
}
def addrmode3 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode3", []> {
let EncoderMethod = "getAddrMode3OpValue";
let PrintMethod = "printAddrMode3Operand";
let ParserMatchClass = MemMode3AsmOperand;
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def am3offset : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode3Offset",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode3OffsetOpValue";
let PrintMethod = "printAddrMode3OffsetOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
// ldstm_mode := {ia, ib, da, db}
//
def ldstm_mode : OptionalDefOperand<OtherVT, (ops i32), (ops (i32 1))> {
let EncoderMethod = "getLdStmModeOpValue";
let PrintMethod = "printLdStmModeOperand";
}
// addrmode5 := reg +/- imm8*4
//
def MemMode5AsmOperand : AsmOperandClass { let Name = "MemMode5"; }
def addrmode5 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode5", []> {
let PrintMethod = "printAddrMode5Operand";
let MIOperandInfo = (ops GPR:$base, i32imm);
let ParserMatchClass = MemMode5AsmOperand;
let EncoderMethod = "getAddrMode5OpValue";
}
// addrmode6 := reg with optional alignment
//
def addrmode6 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6AddressOpValue";
}
def am6offset : Operand<i32>,
ComplexPattern<i32, 1, "SelectAddrMode6Offset",
[], [SDNPWantRoot]> {
let PrintMethod = "printAddrMode6OffsetOperand";
let MIOperandInfo = (ops GPR);
let EncoderMethod = "getAddrMode6OffsetOpValue";
}
// Special version of addrmode6 to handle alignment encoding for VST1/VLD1
// (single element from one lane) for size 32.
def addrmode6oneL32 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6OneLane32AddressOpValue";
}
// Special version of addrmode6 to handle alignment encoding for VLD-dup
// instructions, specifically VLD4-dup.
def addrmode6dup : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6DupAddressOpValue";
}
// addrmodepc := pc + reg
//
def addrmodepc : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrModePC", []> {
let PrintMethod = "printAddrModePCOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
// addrmode7 := reg
// Used by load/store exclusive instructions. Useful to enable right assembly
// parsing and printing. Not used for any codegen matching.
//
def MemMode7AsmOperand : AsmOperandClass { let Name = "MemMode7"; }
def addrmode7 : Operand<i32> {
let PrintMethod = "printAddrMode7Operand";
let MIOperandInfo = (ops GPR);
let ParserMatchClass = MemMode7AsmOperand;
}
def nohash_imm : Operand<i32> {
let PrintMethod = "printNoHashImmediate";
}
def CoprocNumAsmOperand : AsmOperandClass {
let Name = "CoprocNum";
let ParserMethod = "parseCoprocNumOperand";
}
def p_imm : Operand<i32> {
let PrintMethod = "printPImmediate";
let ParserMatchClass = CoprocNumAsmOperand;
}
def CoprocRegAsmOperand : AsmOperandClass {
let Name = "CoprocReg";
let ParserMethod = "parseCoprocRegOperand";
}
def c_imm : Operand<i32> {
let PrintMethod = "printCImmediate";
let ParserMatchClass = CoprocRegAsmOperand;
}
//===----------------------------------------------------------------------===//
include "ARMInstrFormats.td"
//===----------------------------------------------------------------------===//
// Multiclass helpers...
//
/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
/// binop that produces a value.
multiclass AsI1_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, string baseOpc, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
iir, opc, "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{25} = 0;
let isCommutable = Commutable;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
}
def rsi : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
// Assembly aliases for optional destination operand when it's the same
// as the source operand.
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
(!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
so_imm:$imm, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
(!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
GPR:$Rm, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
(!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
so_reg_imm:$shift, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
(!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
so_reg_reg:$shift, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
}
/// AI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the
/// instruction modifies the CPSR register.
let isCodeGenOnly = 1, Defs = [CPSR] in {
multiclass AI1_bin_s_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
def ri : AI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
def rr : AI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
iir, opc, "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let isCommutable = Commutable;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
}
def rsi : AI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
}
/// AI1_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
/// patterns. Similar to AsI1_bin_irs except the instruction does not produce
/// a explicit result, only implicitly set CPSR.
let isCompare = 1, Defs = [CPSR] in {
multiclass AI1_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
def ri : AI1<opcod, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii,
opc, "\t$Rn, $imm",
[(opnode GPR:$Rn, so_imm:$imm)]> {
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-0} = imm;
}
def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir,
opc, "\t$Rn, $Rm",
[(opnode GPR:$Rn, GPR:$Rm)]> {
bits<4> Rn;
bits<4> Rm;
let isCommutable = Commutable;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
}
def rsi : AI1<opcod, (outs),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis,
opc, "\t$Rn, $shift",
[(opnode GPR:$Rn, so_reg_imm:$shift)]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AI1<opcod, (outs),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis,
opc, "\t$Rn, $shift",
[(opnode GPR:$Rn, so_reg_reg:$shift)]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
}
/// AI_ext_rrot - A unary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
/// FIXME: Remove the 'r' variant. Its rot_imm is zero.
class AI_ext_rrot<bits<8> opcod, string opc, PatFrag opnode>
: AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot",
[(set GPR:$Rd, (opnode (rotr GPR:$Rm, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rm;
bits<2> rot;
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{11-10} = rot;
let Inst{3-0} = Rm;
}
class AI_ext_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot", []>,
Requires<[IsARM, HasV6]> {
bits<2> rot;
let Inst{19-16} = 0b1111;
let Inst{11-10} = rot;
}
/// AI_exta_rrot - A binary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
class AI_exta_rrot<bits<8> opcod, string opc, PatFrag opnode>
: AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, rot_imm:$rot),
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot",
[(set GPR:$Rd, (opnode GPR:$Rn, (rotr GPR:$Rm, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
bits<2> rot;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-10} = rot;
let Inst{9-4} = 0b000111;
let Inst{3-0} = Rm;
}
class AI_exta_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, rot_imm:$rot),
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot", []>,
Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<2> rot;
let Inst{19-16} = Rn;
let Inst{11-10} = rot;
}
/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
string baseOpc, bit Commutable = 0> {
let Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{11-0} = imm;
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let isCommutable = Commutable;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def rsi : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
// Assembly aliases for optional destination operand when it's the same
// as the source operand.
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
(!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
so_imm:$imm, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
(!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
GPR:$Rm, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
(!cast<Instruction>(!strconcat(baseOpc, "rsi")) GPR:$Rdn, GPR:$Rdn,
so_reg_imm:$shift, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
(!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
so_reg_reg:$shift, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
}
// Carry setting variants
// NOTE: CPSR def omitted because it will be handled by the custom inserter.
let usesCustomInserter = 1 in {
multiclass AI1_adde_sube_s_irs<PatFrag opnode, bit Commutable = 0> {
def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
4, IIC_iALUi,
[(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>;
def rr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
4, IIC_iALUr,
[(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]> {
let isCommutable = Commutable;
}
def rsi : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]>;
def rsr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]>;
}
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
multiclass AI_ldr1<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12: AI2ldst<0b010, 1, isByte, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, iii, opc, "\t$Rt, $addr",
[(set GPR:$Rt, (opnode addrmode_imm12:$addr))]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 1, isByte, (outs GPR:$Rt), (ins ldst_so_reg:$shift),
AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
[(set GPR:$Rt, (opnode ldst_so_reg:$shift))]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
}
multiclass AI_str1<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12 : AI2ldst<0b010, 0, isByte, (outs),
(ins GPR:$Rt, addrmode_imm12:$addr),
AddrMode_i12, StFrm, iii, opc, "\t$Rt, $addr",
[(opnode GPR:$Rt, addrmode_imm12:$addr)]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 0, isByte, (outs), (ins GPR:$Rt, ldst_so_reg:$shift),
AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
[(opnode GPR:$Rt, ldst_so_reg:$shift)]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions.
//
/// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in
/// the function. The first operand is the ID# for this instruction, the second
/// is the index into the MachineConstantPool that this is, the third is the
/// size in bytes of this constant pool entry.
let neverHasSideEffects = 1, isNotDuplicable = 1 in
def CONSTPOOL_ENTRY :
PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
i32imm:$size), NoItinerary, []>;
// FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
// from removing one half of the matched pairs. That breaks PEI, which assumes
// these will always be in pairs, and asserts if it finds otherwise. Better way?
let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
def ADJCALLSTACKUP :
PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2, pred:$p), NoItinerary,
[(ARMcallseq_end timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKDOWN :
PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
[(ARMcallseq_start timm:$amt)]>;
}
def NOP : AI<(outs), (ins), MiscFrm, NoItinerary, "nop", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000000;
}
def YIELD : AI<(outs), (ins), MiscFrm, NoItinerary, "yield", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000001;
}
def WFE : AI<(outs), (ins), MiscFrm, NoItinerary, "wfe", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000010;
}
def WFI : AI<(outs), (ins), MiscFrm, NoItinerary, "wfi", "",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000011;
}
def SEL : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm, NoItinerary, "sel",
"\t$dst, $a, $b", []>, Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{27-20} = 0b01101000;
let Inst{7-4} = 0b1011;
let Inst{11-8} = 0b1111;
}
def SEV : AI<(outs), (ins), MiscFrm, NoItinerary, "sev", "",
[]>, Requires<[IsARM, HasV6T2]> {
let Inst{27-16} = 0b001100100000;
let Inst{15-8} = 0b11110000;
let Inst{7-0} = 0b00000100;
}
// The i32imm operand $val can be used by a debugger to store more information
// about the breakpoint.
def BKPT : AI<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"bkpt", "\t$val", []>, Requires<[IsARM]> {
bits<16> val;
let Inst{3-0} = val{3-0};
let Inst{19-8} = val{15-4};
let Inst{27-20} = 0b00010010;
let Inst{7-4} = 0b0111;
}
// Change Processor State is a system instruction -- for disassembly and
// parsing only.
// FIXME: Since the asm parser has currently no clean way to handle optional
// operands, create 3 versions of the same instruction. Once there's a clean
// framework to represent optional operands, change this behavior.
class CPS<dag iops, string asm_ops>
: AXI<(outs), iops, MiscFrm, NoItinerary, !strconcat("cps", asm_ops),
[/* For disassembly only; pattern left blank */]>, Requires<[IsARM]> {
bits<2> imod;
bits<3> iflags;
bits<5> mode;
bit M;
let Inst{31-28} = 0b1111;
let Inst{27-20} = 0b00010000;
let Inst{19-18} = imod;
let Inst{17} = M; // Enabled if mode is set;
let Inst{16} = 0;
let Inst{8-6} = iflags;
let Inst{5} = 0;
let Inst{4-0} = mode;
}
let M = 1 in
def CPS3p : CPS<(ins imod_op:$imod, iflags_op:$iflags, i32imm:$mode),
"$imod\t$iflags, $mode">;
let mode = 0, M = 0 in
def CPS2p : CPS<(ins imod_op:$imod, iflags_op:$iflags), "$imod\t$iflags">;
let imod = 0, iflags = 0, M = 1 in
def CPS1p : CPS<(ins i32imm:$mode), "\t$mode">;
// Preload signals the memory system of possible future data/instruction access.
// These are for disassembly only.
multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
def i12 : AXI<(outs), (ins addrmode_imm12:$addr), MiscFrm, IIC_Preload,
!strconcat(opc, "\t$addr"),
[(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]> {
bits<4> Rt;
bits<17> addr;
let Inst{31-26} = 0b111101;
let Inst{25} = 0; // 0 for immediate form
let Inst{24} = data;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = 0b1111;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AXI<(outs), (ins ldst_so_reg:$shift), MiscFrm, IIC_Preload,
!strconcat(opc, "\t$shift"),
[(ARMPreload ldst_so_reg:$shift, (i32 read), (i32 data))]> {
bits<17> shift;
let Inst{31-26} = 0b111101;
let Inst{25} = 1; // 1 for register form
let Inst{24} = data;
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = 0b1111;
let Inst{11-0} = shift{11-0};
}
}
defm PLD : APreLoad<1, 1, "pld">, Requires<[IsARM]>;
defm PLDW : APreLoad<0, 1, "pldw">, Requires<[IsARM,HasV7,HasMP]>;
defm PLI : APreLoad<1, 0, "pli">, Requires<[IsARM,HasV7]>;
def SETEND : AXI<(outs), (ins setend_op:$end), MiscFrm, NoItinerary,
"setend\t$end", []>, Requires<[IsARM]> {
bits<1> end;
let Inst{31-10} = 0b1111000100000001000000;
let Inst{9} = end;
let Inst{8-0} = 0;
}
def DBG : AI<(outs), (ins imm0_15:$opt), MiscFrm, NoItinerary, "dbg", "\t$opt",
[]>, Requires<[IsARM, HasV7]> {
bits<4> opt;
let Inst{27-4} = 0b001100100000111100001111;
let Inst{3-0} = opt;
}
// A5.4 Permanently UNDEFINED instructions.
let isBarrier = 1, isTerminator = 1 in
def TRAP : AXI<(outs), (ins), MiscFrm, NoItinerary,
"trap", [(trap)]>,
Requires<[IsARM]> {
let Inst = 0xe7ffdefe;
}
// Address computation and loads and stores in PIC mode.
let isNotDuplicable = 1 in {
def PICADD : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
4, IIC_iALUr,
[(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
let AddedComplexity = 10 in {
def PICLDR : ARMPseudoInst<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_r,
[(set GPR:$dst, (load addrmodepc:$addr))]>;
def PICLDRH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (zextloadi16 addrmodepc:$addr))]>;
def PICLDRB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (zextloadi8 addrmodepc:$addr))]>;
def PICLDRSH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (sextloadi16 addrmodepc:$addr))]>;
def PICLDRSB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (sextloadi8 addrmodepc:$addr))]>;
}
let AddedComplexity = 10 in {
def PICSTR : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_r, [(store GPR:$src, addrmodepc:$addr)]>;
def PICSTRH : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_bh_r, [(truncstorei16 GPR:$src,
addrmodepc:$addr)]>;
def PICSTRB : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_bh_r, [(truncstorei8 GPR:$src, addrmodepc:$addr)]>;
}
} // isNotDuplicable = 1
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
let neverHasSideEffects = 1, isReMaterializable = 1 in
// The 'adr' mnemonic encodes differently if the label is before or after
// the instruction. The {24-21} opcode bits are set by the fixup, as we don't
// know until then which form of the instruction will be used.
def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label),
MiscFrm, IIC_iALUi, "adr", "\t$Rd, $label", []> {
bits<4> Rd;
bits<12> label;
let Inst{27-25} = 0b001;
let Inst{20} = 0;
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{11-0} = label;
}
def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p),
4, IIC_iALUi, []>;
def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
4, IIC_iALUi, []>;
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
//
let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
// ARMV4T and above
def BX_RET : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"bx", "\tlr", [(ARMretflag)]>,
Requires<[IsARM, HasV4T]> {
let Inst{27-0} = 0b0001001011111111111100011110;
}
// ARMV4 only
def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"mov", "\tpc, lr", [(ARMretflag)]>,
Requires<[IsARM, NoV4T]> {
let Inst{27-0} = 0b0001101000001111000000001110;
}
}
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// ARMV4T and above
def BX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
[(brind GPR:$dst)]>,
Requires<[IsARM, HasV4T]> {
bits<4> dst;
let Inst{31-4} = 0b1110000100101111111111110001;
let Inst{3-0} = dst;
}
def BX_pred : AI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br,
"bx", "\t$dst", [/* pattern left blank */]>,
Requires<[IsARM, HasV4T]> {
bits<4> dst;
let Inst{27-4} = 0b000100101111111111110001;
let Inst{3-0} = dst;
}
}
// All calls clobber the non-callee saved registers. SP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
let isCall = 1,
// On non-Darwin platforms R9 is callee-saved.
// FIXME: Do we really need a non-predicated version? If so, it should
// at least be a pseudo instruction expanding to the predicated version
// at MC lowering time.
Defs = [R0, R1, R2, R3, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
Uses = [SP] in {
def BL : ABXI<0b1011, (outs), (ins bl_target:$func, variable_ops),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsARM, IsNotDarwin]> {
let Inst{31-28} = 0b1110;
bits<24> func;
let Inst{23-0} = func;
}
def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func, variable_ops),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
Requires<[IsARM, IsNotDarwin]> {
bits<24> func;
let Inst{23-0} = func;
}
// ARMv5T and above
def BLX : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
Requires<[IsARM, HasV5T, IsNotDarwin]> {
bits<4> func;
let Inst{31-4} = 0b1110000100101111111111110011;
let Inst{3-0} = func;
}
def BLX_pred : AI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx", "\t$func",
[(ARMcall_pred GPR:$func)]>,
Requires<[IsARM, HasV5T, IsNotDarwin]> {
bits<4> func;
let Inst{27-4} = 0b000100101111111111110011;
let Inst{3-0} = func;
}
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, HasV4T, IsNotDarwin]>;
// ARMv4
def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, NoV4T, IsNotDarwin]>;
}
let isCall = 1,
// On Darwin R9 is call-clobbered.
// R7 is marked as a use to prevent frame-pointer assignments from being
// moved above / below calls.
Defs = [R0, R1, R2, R3, R9, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
Uses = [R7, SP] in {
def BLr9 : ARMPseudoExpand<(outs), (ins bl_target:$func, variable_ops),
4, IIC_Br,
[(ARMcall tglobaladdr:$func)], (BL bl_target:$func)>,
Requires<[IsARM, IsDarwin]>;
def BLr9_pred : ARMPseudoExpand<(outs),
(ins bl_target:$func, pred:$p, variable_ops),
4, IIC_Br,
[(ARMcall_pred tglobaladdr:$func)],
(BL_pred bl_target:$func, pred:$p)>,
Requires<[IsARM, IsDarwin]>;
// ARMv5T and above
def BLXr9 : ARMPseudoExpand<(outs), (ins GPR:$func, variable_ops),
4, IIC_Br,
[(ARMcall GPR:$func)],
(BLX GPR:$func)>,
Requires<[IsARM, HasV5T, IsDarwin]>;
def BLXr9_pred: ARMPseudoExpand<(outs), (ins GPR:$func, pred:$p,variable_ops),
4, IIC_Br,
[(ARMcall_pred GPR:$func)],
(BLX_pred GPR:$func, pred:$p)>,
Requires<[IsARM, HasV5T, IsDarwin]>;
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
def BXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, HasV4T, IsDarwin]>;
// ARMv4
def BMOVPCRXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, NoV4T, IsDarwin]>;
}
let isBranch = 1, isTerminator = 1 in {
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
def Bcc : ABI<0b1010, (outs), (ins br_target:$target),
IIC_Br, "b", "\t$target",
[/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]> {
bits<24> target;
let Inst{23-0} = target;
}
let isBarrier = 1 in {
// B is "predicable" since it's just a Bcc with an 'always' condition.
let isPredicable = 1 in
// FIXME: We shouldn't need this pseudo at all. Just using Bcc directly
// should be sufficient.
// FIXME: Is B really a Barrier? That doesn't seem right.
def B : ARMPseudoExpand<(outs), (ins br_target:$target), 4, IIC_Br,
[(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>;
let isNotDuplicable = 1, isIndirectBranch = 1 in {
def BR_JTr : ARMPseudoInst<(outs),
(ins GPR:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>;
// FIXME: This shouldn't use the generic "addrmode2," but rather be split
// into i12 and rs suffixed versions.
def BR_JTm : ARMPseudoInst<(outs),
(ins addrmode2:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
imm:$id)]>;
def BR_JTadd : ARMPseudoInst<(outs),
(ins GPR:$target, GPR:$idx, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
imm:$id)]>;
} // isNotDuplicable = 1, isIndirectBranch = 1
} // isBarrier = 1
}
// BLX (immediate)
def BLXi : AXI<(outs), (ins br_target:$target), BrMiscFrm, NoItinerary,
"blx\t$target", []>,
Requires<[IsARM, HasV5T]> {
let Inst{31-25} = 0b1111101;
bits<25> target;
let Inst{23-0} = target{24-1};
let Inst{24} = target{0};
}
// Branch and Exchange Jazelle
def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
[/* pattern left blank */]> {
bits<4> func;
let Inst{23-20} = 0b0010;
let Inst{19-8} = 0xfff;
let Inst{7-4} = 0b0010;
let Inst{3-0} = func;
}
// Tail calls.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// Darwin versions.
let Defs = [R0, R1, R2, R3, R9, R12, QQQQ0, QQQQ2, QQQQ3, PC],
Uses = [SP] in {
def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
IIC_Br, []>, Requires<[IsDarwin]>;
def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
IIC_Br, []>, Requires<[IsDarwin]>;
def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst, variable_ops),
4, IIC_Br, [],
(Bcc br_target:$dst, (ops 14, zero_reg))>,
Requires<[IsARM, IsDarwin]>;
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
4, IIC_Br, [],
(BX GPR:$dst)>,
Requires<[IsARM, IsDarwin]>;
}
// Non-Darwin versions (the difference is R9).
let Defs = [R0, R1, R2, R3, R12, QQQQ0, QQQQ2, QQQQ3, PC],
Uses = [SP] in {
def TCRETURNdiND : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
IIC_Br, []>, Requires<[IsNotDarwin]>;
def TCRETURNriND : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
IIC_Br, []>, Requires<[IsNotDarwin]>;
def TAILJMPdND : ARMPseudoExpand<(outs), (ins brtarget:$dst, variable_ops),
4, IIC_Br, [],
(Bcc br_target:$dst, (ops 14, zero_reg))>,
Requires<[IsARM, IsNotDarwin]>;
def TAILJMPrND : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
4, IIC_Br, [],
(BX GPR:$dst)>,
Requires<[IsARM, IsNotDarwin]>;
}
}
// Secure Monitor Call is a system instruction -- for disassembly only
def SMC : ABI<0b0001, (outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
[]> {
bits<4> opt;
let Inst{23-4} = 0b01100000000000000111;
let Inst{3-0} = opt;
}
// Supervisor Call (Software Interrupt)
let isCall = 1, Uses = [SP] in {
def SVC : ABI<0b1111, (outs), (ins imm24b:$svc), IIC_Br, "svc", "\t$svc", []> {
bits<24> svc;
let Inst{23-0} = svc;
}
}
// Store Return State is a system instruction -- for disassembly only
let isCodeGenOnly = 1 in { // FIXME: This should not use submode!
def SRSW : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, i32imm:$mode),
NoItinerary, "srs${amode}\tsp!, $mode",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b110; // W = 1
let Inst{19-8} = 0xd05;
let Inst{7-5} = 0b000;
}
def SRS : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, i32imm:$mode),
NoItinerary, "srs${amode}\tsp, $mode",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b100; // W = 0
let Inst{19-8} = 0xd05;
let Inst{7-5} = 0b000;
}
// Return From Exception is a system instruction -- for disassembly only
def RFEW : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, GPR:$base),
NoItinerary, "rfe${amode}\t$base!",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b011; // W = 1
let Inst{15-0} = 0x0a00;
}
def RFE : ABXI<{1,0,0,?}, (outs), (ins ldstm_mode:$amode, GPR:$base),
NoItinerary, "rfe${amode}\t$base",
[/* For disassembly only; pattern left blank */]> {
let Inst{31-28} = 0b1111;
let Inst{22-20} = 0b001; // W = 0
let Inst{15-0} = 0x0a00;
}
} // isCodeGenOnly = 1
//===----------------------------------------------------------------------===//
// Load / store Instructions.
//
// Load
defm LDR : AI_ldr1<0, "ldr", IIC_iLoad_r, IIC_iLoad_si,
UnOpFrag<(load node:$Src)>>;
defm LDRB : AI_ldr1<1, "ldrb", IIC_iLoad_bh_r, IIC_iLoad_bh_si,
UnOpFrag<(zextloadi8 node:$Src)>>;
defm STR : AI_str1<0, "str", IIC_iStore_r, IIC_iStore_si,
BinOpFrag<(store node:$LHS, node:$RHS)>>;
defm STRB : AI_str1<1, "strb", IIC_iStore_bh_r, IIC_iStore_bh_si,
BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
// Special LDR for loads from non-pc-relative constpools.
let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1,
isReMaterializable = 1 in
def LDRcp : AI2ldst<0b010, 1, 0, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, IIC_iLoad_r, "ldr", "\t$Rt, $addr",
[]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
// Loads with zero extension
def LDRH : AI3ld<0b1011, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrh", "\t$Rt, $addr",
[(set GPR:$Rt, (zextloadi16 addrmode3:$addr))]>;
// Loads with sign extension
def LDRSH : AI3ld<0b1111, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrsh", "\t$Rt, $addr",
[(set GPR:$Rt, (sextloadi16 addrmode3:$addr))]>;
def LDRSB : AI3ld<0b1101, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrsb", "\t$Rt, $addr",
[(set GPR:$Rt, (sextloadi8 addrmode3:$addr))]>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2),
(ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_d_r, "ldrd", "\t$Rd, $dst2, $addr",
[]>, Requires<[IsARM, HasV5TE]>;
}
// Indexed loads
multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
def _PRE : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addrmode2:$addr), IndexModePre, LdFrm, itin,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
// {17-14} Rn
// {13} reg vs. imm
// {12} isAdd
// {11-0} imm12/Rm
bits<18> addr;
let Inst{25} = addr{13};
let Inst{23} = addr{12};
let Inst{19-16} = addr{17-14};
let Inst{11-0} = addr{11-0};
let AsmMatchConverter = "cvtLdWriteBackRegAddrMode2";
}
def _POST_REG : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins GPR:$Rn, am2offset_reg:$offset),
IndexModePost, LdFrm, itin,
opc, "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> Rn;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{19-16} = Rn;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def _POST_IMM : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins GPR:$Rn, am2offset_imm:$offset),
IndexModePost, LdFrm, itin,
opc, "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> Rn;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{19-16} = Rn;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
}
let mayLoad = 1, neverHasSideEffects = 1 in {
defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_ru>;
defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_ru>;
}
multiclass AI3_ldridx<bits<4> op, bit op20, string opc, InstrItinClass itin> {
def _PRE : AI3ldstidx<op, op20, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addrmode3:$addr), IndexModePre,
LdMiscFrm, itin,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
}
def _POST : AI3ldstidx<op, op20, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins GPR:$Rn, am3offset:$offset), IndexModePost,
LdMiscFrm, itin,
opc, "\t$Rt, [$Rn], $offset", "$Rn = $Rn_wb", []> {
bits<10> offset;
bits<4> Rn;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = Rn;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
}
}
let mayLoad = 1, neverHasSideEffects = 1 in {
defm LDRH : AI3_ldridx<0b1011, 1, "ldrh", IIC_iLoad_bh_ru>;
defm LDRSH : AI3_ldridx<0b1111, 1, "ldrsh", IIC_iLoad_bh_ru>;
defm LDRSB : AI3_ldridx<0b1101, 1, "ldrsb", IIC_iLoad_bh_ru>;
let hasExtraDefRegAllocReq = 1 in {
def LDRD_PRE : AI3ldstidx<0b1101, 0, 1, 1, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
(ins addrmode3:$addr), IndexModePre,
LdMiscFrm, IIC_iLoad_d_ru,
"ldrd", "\t$Rt, $Rt2, $addr!",
"$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def LDRD_POST: AI3ldstidx<0b1101, 0, 1, 0, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
(ins GPR:$Rn, am3offset:$offset), IndexModePost,
LdMiscFrm, IIC_iLoad_d_ru,
"ldrd", "\t$Rt, $Rt2, [$Rn], $offset",
"$Rn = $Rn_wb", []> {
bits<10> offset;
bits<4> Rn;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = Rn;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
} // hasExtraDefRegAllocReq = 1
} // mayLoad = 1, neverHasSideEffects = 1
// LDRT, LDRBT, LDRSBT, LDRHT, LDRSHT are for disassembly only.
let mayLoad = 1, neverHasSideEffects = 1 in {
def LDRT : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$base_wb),
(ins addrmode2:$addr), IndexModePost, LdFrm, IIC_iLoad_ru,
"ldrt", "\t$Rt, $addr", "$addr.base = $base_wb", []> {
// {17-14} Rn
// {13} 1 == Rm, 0 == imm12
// {12} isAdd
// {11-0} imm12/Rm
bits<18> addr;
let Inst{25} = addr{13};
let Inst{23} = addr{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr{17-14};
let Inst{11-0} = addr{11-0};
let AsmMatchConverter = "cvtLdWriteBackRegAddrMode2";
}
def LDRBT : AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$base_wb),
(ins addrmode2:$addr), IndexModePost, LdFrm, IIC_iLoad_bh_ru,
"ldrbt", "\t$Rt, $addr", "$addr.base = $base_wb", []> {
// {17-14} Rn
// {13} 1 == Rm, 0 == imm12
// {12} isAdd
// {11-0} imm12/Rm
bits<18> addr;
let Inst{25} = addr{13};
let Inst{23} = addr{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr{17-14};
let Inst{11-0} = addr{11-0};
let AsmMatchConverter = "cvtLdWriteBackRegAddrMode2";
}
def LDRSBT : AI3ldstidxT<0b1101, 1, 1, 0, (outs GPR:$Rt, GPR:$base_wb),
(ins addrmode3:$addr), IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru,
"ldrsbt", "\t$Rt, $addr", "$addr.base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
def LDRHT : AI3ldstidxT<0b1011, 1, 1, 0, (outs GPR:$Rt, GPR:$base_wb),
(ins addrmode3:$addr), IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru,
"ldrht", "\t$Rt, $addr", "$addr.base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
def LDRSHT : AI3ldstidxT<0b1111, 1, 1, 0, (outs GPR:$Rt, GPR:$base_wb),
(ins addrmode3:$addr), IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru,
"ldrsht", "\t$Rt, $addr", "$addr.base = $base_wb", []> {
let Inst{21} = 1; // overwrite
}
}
// Store
// Stores with truncate
def STRH : AI3str<0b1011, (outs), (ins GPR:$Rt, addrmode3:$addr), StMiscFrm,
IIC_iStore_bh_r, "strh", "\t$Rt, $addr",
[(truncstorei16 GPR:$Rt, addrmode3:$addr)]>;
// Store doubleword
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$src2, addrmode3:$addr),
StMiscFrm, IIC_iStore_d_r,
"strd", "\t$Rt, $src2, $addr", []>,
Requires<[IsARM, HasV5TE]> {
let Inst{21} = 0;
}
// Indexed stores
def STR_PRE_REG : AI2stridx_reg<0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset),
IndexModePre, StFrm, IIC_iStore_ru,
"str", "\t$Rt, [$Rn, $offset]!",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb,
(pre_store GPR:$Rt, GPR:$Rn, am2offset_reg:$offset))]>;
def STR_PRE_IMM : AI2stridx_imm<0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset),
IndexModePre, StFrm, IIC_iStore_ru,
"str", "\t$Rt, [$Rn, $offset]!",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb,
(pre_store GPR:$Rt, GPR:$Rn, am2offset_imm:$offset))]>;
def STR_POST_REG : AI2stridx_reg<0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset),
IndexModePost, StFrm, IIC_iStore_ru,
"str", "\t$Rt, [$Rn], $offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb,
(post_store GPR:$Rt, GPR:$Rn, am2offset_reg:$offset))]>;
def STR_POST_IMM : AI2stridx_imm<0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset),
IndexModePost, StFrm, IIC_iStore_ru,
"str", "\t$Rt, [$Rn], $offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb,
(post_store GPR:$Rt, GPR:$Rn, am2offset_imm:$offset))]>;
def STRB_PRE_REG : AI2stridx_reg<1, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset),
IndexModePre, StFrm, IIC_iStore_bh_ru,
"strb", "\t$Rt, [$Rn, $offset]!",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb, (pre_truncsti8 GPR:$Rt,
GPR:$Rn, am2offset_reg:$offset))]>;
def STRB_PRE_IMM : AI2stridx_imm<1, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset),
IndexModePre, StFrm, IIC_iStore_bh_ru,
"strb", "\t$Rt, [$Rn, $offset]!",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb, (pre_truncsti8 GPR:$Rt,
GPR:$Rn, am2offset_imm:$offset))]>;
def STRB_POST_REG: AI2stridx_reg<1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strb", "\t$Rt, [$Rn], $offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb, (post_truncsti8 GPR:$Rt,
GPR:$Rn, am2offset_reg:$offset))]>;
def STRB_POST_IMM: AI2stridx_imm<1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strb", "\t$Rt, [$Rn], $offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb, (post_truncsti8 GPR:$Rt,
GPR:$Rn, am2offset_imm:$offset))]>;
def STRH_PRE : AI3stridx<0b1011, 0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am3offset:$offset),
IndexModePre, StMiscFrm, IIC_iStore_ru,
"strh", "\t$Rt, [$Rn, $offset]!",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb,
(pre_truncsti16 GPR:$Rt, GPR:$Rn, am3offset:$offset))]>;
def STRH_POST: AI3stridx<0b1011, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am3offset:$offset),
IndexModePost, StMiscFrm, IIC_iStore_bh_ru,
"strh", "\t$Rt, [$Rn], $offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPR:$Rn_wb, (post_truncsti16 GPR:$Rt,
GPR:$Rn, am3offset:$offset))]>;
// For disassembly only
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
def STRD_PRE : AI3stdpr<(outs GPR:$base_wb),
(ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$src1, $src2, [$base, $offset]!",
"$base = $base_wb", []> {
bits<4> src1;
bits<4> base;
bits<10> offset;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = base;
let Inst{15-12} = src1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode3Instruction";
}
// For disassembly only
def STRD_POST: AI3stdpo<(outs GPR:$base_wb),
(ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$src1, $src2, [$base], $offset",
"$base = $base_wb", []> {
bits<4> src1;
bits<4> base;
bits<10> offset;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = base;
let Inst{15-12} = src1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode3Instruction";
}
} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
// STRT, STRBT, and STRHT are for disassembly only.
def STRTr : AI2stridxT<0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, ldst_so_reg:$addr),
IndexModePost, StFrm, IIC_iStore_ru,
"strt", "\t$Rt, $addr", "$addr.base = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{25} = 1;
let Inst{21} = 1; // overwrite
let Inst{4} = 0;
let AsmMatchConverter = "cvtStWriteBackRegAddrMode2";
}
def STRTi : AI2stridxT<0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode_imm12:$addr),
IndexModePost, StFrm, IIC_iStore_ru,
"strt", "\t$Rt, $addr", "$addr.base = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{25} = 0;
let Inst{21} = 1; // overwrite
let AsmMatchConverter = "cvtStWriteBackRegAddrMode2";
}
def STRBTr : AI2stridxT<1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, ldst_so_reg:$addr),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strbt", "\t$Rt, $addr", "$addr.base = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{25} = 1;
let Inst{21} = 1; // overwrite
let Inst{4} = 0;
let AsmMatchConverter = "cvtStWriteBackRegAddrMode2";
}
def STRBTi : AI2stridxT<1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode_imm12:$addr),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strbt", "\t$Rt, $addr", "$addr.base = $Rn_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{25} = 0;
let Inst{21} = 1; // overwrite
let AsmMatchConverter = "cvtStWriteBackRegAddrMode2";
}
def STRHT: AI3sthpo<(outs GPR:$base_wb), (ins GPR:$Rt, addrmode3:$addr),
StMiscFrm, IIC_iStore_bh_ru,
"strht", "\t$Rt, $addr", "$addr.base = $base_wb",
[/* For disassembly only; pattern left blank */]> {
let Inst{21} = 1; // overwrite
let AsmMatchConverter = "cvtStWriteBackRegAddrMode3";
}
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
InstrItinClass itin, InstrItinClass itin_upd> {
// IA is the default, so no need for an explicit suffix on the
// mnemonic here. Without it is the cannonical spelling.
def IA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "${p}\t$Rn, $regs"), "", []> {
let Inst{24-23} = 0b01; // Increment After
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
let Inst{24-23} = 0b01; // Increment After
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
}
def DA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "da${p}\t$Rn, $regs"), "", []> {
let Inst{24-23} = 0b00; // Decrement After
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "da${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
let Inst{24-23} = 0b00; // Decrement After
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
}
def DB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
let Inst{24-23} = 0b10; // Decrement Before
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
let Inst{24-23} = 0b10; // Decrement Before
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
}
def IB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "ib${p}\t$Rn, $regs"), "", []> {
let Inst{24-23} = 0b11; // Increment Before
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "ib${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
let Inst{24-23} = 0b11; // Increment Before
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
}
}
let neverHasSideEffects = 1 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm LDM : arm_ldst_mult<"ldm", 1, LdStMulFrm, IIC_iLoad_m, IIC_iLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm STM : arm_ldst_mult<"stm", 0, LdStMulFrm, IIC_iStore_m, IIC_iStore_mu>;
} // neverHasSideEffects
// FIXME: remove when we have a way to marking a MI with these properties.
// FIXME: Should pc be an implicit operand like PICADD, etc?
let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
def LDMIA_RET : ARMPseudoExpand<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
reglist:$regs, variable_ops),
4, IIC_iLoad_mBr, [],
(LDMIA_UPD GPR:$wb, GPR:$Rn, pred:$p, reglist:$regs)>,
RegConstraint<"$Rn = $wb">;
//===----------------------------------------------------------------------===//
// Move Instructions.
//
let neverHasSideEffects = 1 in
def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
"mov", "\t$Rd, $Rm", []>, UnaryDP {
bits<4> Rd;
bits<4> Rm;
let Inst{19-16} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
}
// A version for the smaller set of tail call registers.
let neverHasSideEffects = 1 in
def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP {
bits<4> Rd;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
}
def MOVsr : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_reg:$src),
DPSoRegRegFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src", [(set GPR:$Rd, shift_so_reg_reg:$src)]>,
UnaryDP {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-8} = src{11-8};
let Inst{7} = 0;
let Inst{6-5} = src{6-5};
let Inst{4} = 1;
let Inst{3-0} = src{3-0};
let Inst{25} = 0;
}
def MOVsi : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_imm:$src),
DPSoRegImmFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src", [(set GPR:$Rd, shift_so_reg_imm:$src)]>,
UnaryDP {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-5} = src{11-5};
let Inst{4} = 0;
let Inst{3-0} = src{3-0};
let Inst{25} = 0;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm, IIC_iMOVi,
"mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-0} = imm;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MOVi16 : AI1<0b1000, (outs GPR:$Rd), (ins imm0_65535_expr:$imm),
DPFrm, IIC_iMOVi,
"movw", "\t$Rd, $imm",
[(set GPR:$Rd, imm0_65535:$imm)]>,
Requires<[IsARM, HasV6T2]>, UnaryDP {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
let Inst{11-0} = imm{11-0};
let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
}
def : InstAlias<"mov${p} $Rd, $imm",
(MOVi16 GPR:$Rd, imm0_65535_expr:$imm, pred:$p)>,
Requires<[IsARM]>;
def MOVi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
(ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
let Constraints = "$src = $Rd" in {
def MOVTi16 : AI1<0b1010, (outs GPR:$Rd), (ins GPR:$src, imm0_65535_expr:$imm),
DPFrm, IIC_iMOVi,
"movt", "\t$Rd, $imm",
[(set GPR:$Rd,
(or (and GPR:$src, 0xffff),
lo16AllZero:$imm))]>, UnaryDP,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
let Inst{11-0} = imm{11-0};
let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
}
def MOVTi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
(ins GPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
} // Constraints
def : ARMPat<(or GPR:$src, 0xffff0000), (MOVTi16 GPR:$src, 0xffff)>,
Requires<[IsARM, HasV6T2]>;
let Uses = [CPSR] in
def RRX: PseudoInst<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVsi,
[(set GPR:$Rd, (ARMrrx GPR:$Rm))]>, UnaryDP,
Requires<[IsARM]>;
// These aren't really mov instructions, but we have to define them this way
// due to flag operands.
let Defs = [CPSR] in {
def MOVsrl_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP,
Requires<[IsARM]>;
def MOVsra_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP,
Requires<[IsARM]>;
}
//===----------------------------------------------------------------------===//
// Extend Instructions.
//
// Sign extenders
def SXTB : AI_ext_rrot<0b01101010,
"sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>;
def SXTH : AI_ext_rrot<0b01101011,
"sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>;
def SXTAB : AI_exta_rrot<0b01101010,
"sxtab", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
def SXTAH : AI_exta_rrot<0b01101011,
"sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
def SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
// Zero extenders
let AddedComplexity = 16 in {
def UXTB : AI_ext_rrot<0b01101110,
"uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>;
def UXTH : AI_ext_rrot<0b01101111,
"uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
def UXTB16 : AI_ext_rrot<0b01101100,
"uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
// instead so we can include a check for masking back in the upper
// eight bits of the source into the lower eight bits of the result.
//def : ARMV6Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
// (UXTB16r_rot GPR:$Src, 3)>;
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16 GPR:$Src, 1)>;
def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
def UXTAH : AI_exta_rrot<0b01101111, "uxtah",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
}
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
def SBFX : I<(outs GPR:$Rd),
(ins GPR:$Rn, imm0_31:$lsb, imm1_32:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"sbfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<5> lsb;
bits<5> width;
let Inst{27-21} = 0b0111101;
let Inst{6-4} = 0b101;
let Inst{20-16} = width;
let Inst{15-12} = Rd;
let Inst{11-7} = lsb;
let Inst{3-0} = Rn;
}
def UBFX : I<(outs GPR:$Rd),
(ins GPR:$Rn, imm0_31:$lsb, imm1_32:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"ubfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<5> lsb;
bits<5> width;
let Inst{27-21} = 0b0111111;
let Inst{6-4} = 0b101;
let Inst{20-16} = width;
let Inst{15-12} = Rd;
let Inst{11-7} = lsb;
let Inst{3-0} = Rn;
}
//===----------------------------------------------------------------------===//
// Arithmetic Instructions.
//
defm ADD : AsI1_bin_irs<0b0100, "add",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(add node:$LHS, node:$RHS)>, "ADD", 1>;
defm SUB : AsI1_bin_irs<0b0010, "sub",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(sub node:$LHS, node:$RHS)>, "SUB">;
// ADD and SUB with 's' bit set.
defm ADDS : AI1_bin_s_irs<0b0100, "adds",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>;
defm SUBS : AI1_bin_s_irs<0b0010, "subs",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(subc node:$LHS, node:$RHS)>>;
defm ADC : AI1_adde_sube_irs<0b0101, "adc",
BinOpFrag<(adde_dead_carry node:$LHS, node:$RHS)>,
"ADC", 1>;
defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
BinOpFrag<(sube_dead_carry node:$LHS, node:$RHS)>,
"SBC">;
// ADC and SUBC with 's' bit set.
let usesCustomInserter = 1 in {
defm ADCS : AI1_adde_sube_s_irs<
BinOpFrag<(adde_live_carry node:$LHS, node:$RHS)>, 1>;
defm SBCS : AI1_adde_sube_s_irs<
BinOpFrag<(sube_live_carry node:$LHS, node:$RHS) >>;
}
def RSBri : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
IIC_iALUi, "rsb", "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (sub so_imm:$imm, GPR:$Rn))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{11-0} = imm;
}
// The reg/reg form is only defined for the disassembler; for codegen it is
// equivalent to SUBrr.
def RSBrr : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
IIC_iALUr, "rsb", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def RSBrsi : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iALUsr, "rsb", "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (sub so_reg_imm:$shift, GPR:$Rn))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def RSBrsr : AsI1<0b0011, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, "rsb", "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (sub so_reg_reg:$shift, GPR:$Rn))]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
// RSB with 's' bit set.
// NOTE: CPSR def omitted because it will be handled by the custom inserter.
let usesCustomInserter = 1 in {
def RSBSri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
4, IIC_iALUi,
[(set GPR:$Rd, (subc so_imm:$imm, GPR:$Rn))]>;
def RSBSrr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
4, IIC_iALUr,
[/* For disassembly only; pattern left blank */]>;
def RSBSrsi : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (subc so_reg_imm:$shift, GPR:$Rn))]>;
def RSBSrsr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (subc so_reg_reg:$shift, GPR:$Rn))]>;
}
let Uses = [CPSR] in {
def RSCri : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, "rsc", "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (sube_dead_carry so_imm:$imm, GPR:$Rn))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{11-0} = imm;
}
// The reg/reg form is only defined for the disassembler; for codegen it is
// equivalent to SUBrr.
def RSCrr : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
DPFrm, IIC_iALUr, "rsc", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def RSCrsi : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iALUsr, "rsc", "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (sube_dead_carry so_reg_imm:$shift, GPR:$Rn))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def RSCrsr : AsI1<0b0111, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, "rsc", "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (sube_dead_carry so_reg_reg:$shift, GPR:$Rn))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
// NOTE: CPSR def omitted because it will be handled by the custom inserter.
let usesCustomInserter = 1, Uses = [CPSR] in {
def RSCSri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
4, IIC_iALUi,
[(set GPR:$Rd, (sube_dead_carry so_imm:$imm, GPR:$Rn))]>;
def RSCSrsi : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (sube_dead_carry so_reg_imm:$shift, GPR:$Rn))]>;
def RSCSrsr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
4, IIC_iALUsr,
[(set GPR:$Rd, (sube_dead_carry so_reg_reg:$shift, GPR:$Rn))]>;
}
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
// assume opposite meanings of the carry flag (i.e., carry == !borrow).
// See the definition of AddWithCarry() in the ARM ARM A2.2.1 for the gory
// details.
def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
(SUBri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(addc GPR:$src, so_imm_neg:$imm),
(SUBSri GPR:$src, so_imm_neg:$imm)>;
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
def : ARMPat<(adde_dead_carry GPR:$src, so_imm_not:$imm),
(SBCri GPR:$src, so_imm_not:$imm)>;
def : ARMPat<(adde_live_carry GPR:$src, so_imm_not:$imm),
(SBCSri GPR:$src, so_imm_not:$imm)>;
// Note: These are implemented in C++ code, because they have to generate
// ADD/SUBrs instructions, which use a complex pattern that a xform function
// cannot produce.
// (mul X, 2^n+1) -> (add (X << n), X)
// (mul X, 2^n-1) -> (rsb X, (X << n))
// ARM Arithmetic Instruction
// GPR:$dst = GPR:$a op GPR:$b
class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
list<dag> pattern = [],
dag iops = (ins GPR:$Rn, GPR:$Rm), string asm = "\t$Rd, $Rn, $Rm">
: AI<(outs GPR:$Rd), iops, DPFrm, IIC_iALUr, opc, asm, pattern> {
bits<4> Rn;
bits<4> Rd;
bits<4> Rm;
let Inst{27-20} = op27_20;
let Inst{11-4} = op11_4;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
}
// Saturating add/subtract
def QADD : AAI<0b00010000, 0b00000101, "qadd",
[(set GPR:$Rd, (int_arm_qadd GPR:$Rm, GPR:$Rn))],
(ins GPR:$Rm, GPR:$Rn), "\t$Rd, $Rm, $Rn">;
def QSUB : AAI<0b00010010, 0b00000101, "qsub",
[(set GPR:$Rd, (int_arm_qsub GPR:$Rm, GPR:$Rn))],
(ins GPR:$Rm, GPR:$Rn), "\t$Rd, $Rm, $Rn">;
def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [], (ins GPR:$Rm, GPR:$Rn),
"\t$Rd, $Rm, $Rn">;
def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [], (ins GPR:$Rm, GPR:$Rn),
"\t$Rd, $Rm, $Rn">;
def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
def QASX : AAI<0b01100010, 0b11110011, "qasx">;
def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
// Signed/Unsigned add/subtract
def SASX : AAI<0b01100001, 0b11110011, "sasx">;
def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
def UASX : AAI<0b01100101, 0b11110011, "uasx">;
def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
def USAX : AAI<0b01100101, 0b11110101, "usax">;
def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
// Signed/Unsigned halving add/subtract
def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
// Unsigned Sum of Absolute Differences [and Accumulate] -- for disassembly only
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
"\t$Rd, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{27-20} = 0b01111000;
let Inst{15-12} = 0b1111;
let Inst{7-4} = 0b0001;
let Inst{19-16} = Rd;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
"\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
bits<4> Ra;
let Inst{27-20} = 0b01111000;
let Inst{7-4} = 0b0001;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// Signed/Unsigned saturate -- for disassembly only
def SSAT : AI<(outs GPR:$Rd), (ins imm1_32:$sat_imm, GPR:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh", []> {
bits<4> Rd;
bits<5> sat_imm;
bits<4> Rn;
bits<8> sh;
let Inst{27-21} = 0b0110101;
let Inst{5-4} = 0b01;
let Inst{20-16} = sat_imm;
let Inst{15-12} = Rd;
let Inst{11-7} = sh{4-0};
let Inst{6} = sh{5};
let Inst{3-0} = Rn;
}
def SSAT16 : AI<(outs GPR:$Rd), (ins imm1_16:$sat_imm, GPR:$Rn), SatFrm,
NoItinerary, "ssat16", "\t$Rd, $sat_imm, $Rn", []> {
bits<4> Rd;
bits<4> sat_imm;
bits<4> Rn;
let Inst{27-20} = 0b01101010;
let Inst{11-4} = 0b11110011;
let Inst{15-12} = Rd;
let Inst{19-16} = sat_imm;
let Inst{3-0} = Rn;
}
def USAT : AI<(outs GPR:$Rd), (ins imm0_31:$sat_imm, GPR:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "usat", "\t$Rd, $sat_imm, $Rn$sh", []> {
bits<4> Rd;
bits<5> sat_imm;
bits<4> Rn;
bits<8> sh;
let Inst{27-21} = 0b0110111;
let Inst{5-4} = 0b01;
let Inst{15-12} = Rd;
let Inst{11-7} = sh{4-0};
let Inst{6} = sh{5};
let Inst{20-16} = sat_imm;
let Inst{3-0} = Rn;
}
def USAT16 : AI<(outs GPR:$Rd), (ins imm0_15:$sat_imm, GPR:$a), SatFrm,
NoItinerary, "usat16", "\t$Rd, $sat_imm, $a",
[/* For disassembly only; pattern left blank */]> {
bits<4> Rd;
bits<4> sat_imm;
bits<4> Rn;
let Inst{27-20} = 0b01101110;
let Inst{11-4} = 0b11110011;
let Inst{15-12} = Rd;
let Inst{19-16} = sat_imm;
let Inst{3-0} = Rn;
}
def : ARMV6Pat<(int_arm_ssat GPR:$a, imm:$pos), (SSAT imm:$pos, GPR:$a, 0)>;
def : ARMV6Pat<(int_arm_usat GPR:$a, imm:$pos), (USAT imm:$pos, GPR:$a, 0)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
//
defm AND : AsI1_bin_irs<0b0000, "and",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, node:$RHS)>, "AND", 1>;
defm ORR : AsI1_bin_irs<0b1100, "orr",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(or node:$LHS, node:$RHS)>, "ORR", 1>;
defm EOR : AsI1_bin_irs<0b0001, "eor",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(xor node:$LHS, node:$RHS)>, "EOR", 1>;
defm BIC : AsI1_bin_irs<0b1110, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, (not node:$RHS))>, "BIC">;
// FIXME: bf_inv_mask_imm should be two operands, the lsb and the msb, just
// like in the actual instruction encoding. The complexity of mapping the mask
// to the lsb/msb pair should be handled by ISel, not encapsulated in the
// instruction description.
def BFC : I<(outs GPR:$Rd), (ins GPR:$src, bf_inv_mask_imm:$imm),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"bfc", "\t$Rd, $imm", "$src = $Rd",
[(set GPR:$Rd, (and GPR:$src, bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<10> imm;
let Inst{27-21} = 0b0111110;
let Inst{6-0} = 0b0011111;
let Inst{15-12} = Rd;
let Inst{11-7} = imm{4-0}; // lsb
let Inst{20-16} = imm{9-5}; // msb
}
// A8.6.18 BFI - Bitfield insert (Encoding A1)
def BFI : I<(outs GPR:$Rd), (ins GPR:$src, GPR:$Rn, bf_inv_mask_imm:$imm),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"bfi", "\t$Rd, $Rn, $imm", "$src = $Rd",
[(set GPR:$Rd, (ARMbfi GPR:$src, GPR:$Rn,
bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<10> imm;
let Inst{27-21} = 0b0111110;
let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
let Inst{15-12} = Rd;
let Inst{11-7} = imm{4-0}; // lsb
let Inst{20-16} = imm{9-5}; // width
let Inst{3-0} = Rn;
}
// GNU as only supports this form of bfi (w/ 4 arguments)
let isAsmParserOnly = 1 in
def BFI4p : I<(outs GPR:$Rd), (ins GPR:$src, GPR:$Rn,
lsb_pos_imm:$lsb, width_imm:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"bfi", "\t$Rd, $Rn, $lsb, $width", "$src = $Rd",
[]>, Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<5> lsb;
bits<5> width;
let Inst{27-21} = 0b0111110;
let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
let Inst{15-12} = Rd;
let Inst{11-7} = lsb;
let Inst{20-16} = width; // Custom encoder => lsb+width-1
let Inst{3-0} = Rn;
}
def MVNr : AsI1<0b1111, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMVNr,
"mvn", "\t$Rd, $Rm",
[(set GPR:$Rd, (not GPR:$Rm))]>, UnaryDP {
bits<4> Rd;
bits<4> Rm;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
}
def MVNsi : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_imm:$shift), DPSoRegImmFrm,
IIC_iMVNsr, "mvn", "\t$Rd, $shift",
[(set GPR:$Rd, (not so_reg_imm:$shift))]>, UnaryDP {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def MVNsr : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_reg:$shift), DPSoRegRegFrm,
IIC_iMVNsr, "mvn", "\t$Rd, $shift",
[(set GPR:$Rd, (not so_reg_reg:$shift))]>, UnaryDP {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
IIC_iMVNi, "mvn", "\t$Rd, $imm",
[(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
def : ARMPat<(and GPR:$src, so_imm_not:$imm),
(BICri GPR:$src, so_imm_not:$imm)>;
//===----------------------------------------------------------------------===//
// Multiply Instructions.
//
class AsMul1I32<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = Rd;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// FIXME: The v5 pseudos are only necessary for the additional Constraint
// property. Remove them when it's possible to add those properties
// on an individual MachineInstr, not just an instuction description.
let isCommutable = 1 in {
def MUL : AsMul1I32<0b0000000, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b0000;
}
let Constraints = "@earlyclobber $Rd" in
def MULv5: ARMPseudoExpand<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm,
pred:$p, cc_out:$s),
4, IIC_iMUL32,
[(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))],
(MUL GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
}
def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
Requires<[IsARM, HasV6]> {
bits<4> Ra;
let Inst{15-12} = Ra;
}
let Constraints = "@earlyclobber $Rd" in
def MLAv5: ARMPseudoExpand<(outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s),
4, IIC_iMAC32,
[(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))],
(MLA GPR:$Rd, GPR:$Rn, GPR:$Rm, GPR:$Ra, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (sub GPR:$Ra, (mul GPR:$Rn, GPR:$Rm)))]>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
bits<4> Ra;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// Extra precision multiplies with low / high results
let neverHasSideEffects = 1 in {
let isCommutable = 1 in {
def SMULL : AsMul1I64<0b0000110, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
"smull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
def UMULL : AsMul1I64<0b0000100, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
"umull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
def SMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMUL64, [],
(SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMUL64, [],
(UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
}
}
// Multiply + accumulate
def SMLAL : AsMul1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
"smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
def UMLAL : AsMul1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
"umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
"umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = RdLo;
let Inst{15-12} = RdHi;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
def SMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
(SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
(UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMAALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p),
4, IIC_iMAC64, [],
(UMAAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p)>,
Requires<[IsARM, NoV6]>;
}
} // neverHasSideEffects
// Most significant word multiply
def SMMUL : AMul2I <0b0111010, 0b0001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL32, "smmul", "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (mulhs GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b1111;
}
def SMMULR : AMul2I <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL32, "smmulr", "\t$Rd, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b1111;
}
def SMMLA : AMul2Ia <0b0111010, 0b0001, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mulhs GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
Requires<[IsARM, HasV6]>;
def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmlar", "\t$Rd, $Rn, $Rm, $Ra",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]>;
def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (sub GPR:$Ra, (mulhs GPR:$Rn, GPR:$Rm)))]>,
Requires<[IsARM, HasV6]>;
def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV6]>;
multiclass AI_smul<string opc, PatFrag opnode> {
def BB : AMulxyI<0b0001011, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AMulxyI<0b0001011, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AMulxyI<0b0001011, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AMulxyI<0b0001011, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def WB : AMulxyI<0b0001001, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (sra (opnode GPR:$Rn,
(sext_inreg GPR:$Rm, i16)), (i32 16)))]>,
Requires<[IsARM, HasV5TE]>;
def WT : AMulxyI<0b0001001, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (sra (opnode GPR:$Rn,
(sra GPR:$Rm, (i32 16))), (i32 16)))]>,
Requires<[IsARM, HasV5TE]>;
}
multiclass AI_smla<string opc, PatFrag opnode> {
def BB : AMulxyIa<0b0001000, 0b00, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra,
(opnode (sext_inreg GPR:$Rn, i16),
(sext_inreg GPR:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AMulxyIa<0b0001000, 0b10, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra, (opnode (sext_inreg GPR:$Rn, i16),
(sra GPR:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AMulxyIa<0b0001000, 0b01, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra, (opnode (sra GPR:$Rn, (i32 16)),
(sext_inreg GPR:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AMulxyIa<0b0001000, 0b11, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra, (opnode (sra GPR:$Rn, (i32 16)),
(sra GPR:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE]>;
def WB : AMulxyIa<0b0001001, 0b00, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra, (sra (opnode GPR:$Rn,
(sext_inreg GPR:$Rm, i16)), (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def WT : AMulxyIa<0b0001001, 0b10, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add GPR:$Ra, (sra (opnode GPR:$Rn,
(sra GPR:$Rm, (i32 16))), (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
}
defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
defm SMLA : AI_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
// Halfword multiply accumulate long: SMLAL<x><y> -- for disassembly only
def SMLALBB : AMulxyI64<0b0001010, 0b00, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm),
IIC_iMAC64, "smlalbb", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV5TE]>;
def SMLALBT : AMulxyI64<0b0001010, 0b10, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm),
IIC_iMAC64, "smlalbt", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV5TE]>;
def SMLALTB : AMulxyI64<0b0001010, 0b01, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm),
IIC_iMAC64, "smlaltb", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV5TE]>;
def SMLALTT : AMulxyI64<0b0001010, 0b11, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm),
IIC_iMAC64, "smlaltt", "\t$RdLo, $RdHi, $Rn, $Rm",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV5TE]>;
// Helper class for AI_smld -- for disassembly only
class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<4> Rm;
let Inst{27-23} = 0b01110;
let Inst{22} = long;
let Inst{21-20} = 0b00;
let Inst{11-8} = Rm;
let Inst{7} = 0;
let Inst{6} = sub;
let Inst{5} = swap;
let Inst{4} = 1;
let Inst{3-0} = Rn;
}
class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> Rd;
let Inst{15-12} = 0b1111;
let Inst{19-16} = Rd;
}
class AMulDualIa<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> Ra;
bits<4> Rd;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
}
class AMulDualI64<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> RdLo;
bits<4> RdHi;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
}
multiclass AI_smld<bit sub, string opc> {
def D : AMulDualIa<0, sub, 0, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm, $Ra">;
def DX: AMulDualIa<0, sub, 1, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm, $Ra">;
def LD: AMulDualI64<1, sub, 0, (outs GPR:$RdLo,GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), NoItinerary,
!strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">;
def LDX : AMulDualI64<1, sub, 1, (outs GPR:$RdLo,GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), NoItinerary,
!strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">;
}
defm SMLA : AI_smld<0, "smla">;
defm SMLS : AI_smld<1, "smls">;
multiclass AI_sdml<bit sub, string opc> {
def D : AMulDualI<0, sub, 0, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm">;
def DX : AMulDualI<0, sub, 1, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm">;
}
defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
//===----------------------------------------------------------------------===//
// Misc. Arithmetic Instructions.
//
def CLZ : AMiscA1I<0b000010110, 0b0001, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "clz", "\t$Rd, $Rm",
[(set GPR:$Rd, (ctlz GPR:$Rm))]>, Requires<[IsARM, HasV5T]>;
def RBIT : AMiscA1I<0b01101111, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rbit", "\t$Rd, $Rm",
[(set GPR:$Rd, (ARMrbit GPR:$Rm))]>,
Requires<[IsARM, HasV6T2]>;
def REV : AMiscA1I<0b01101011, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rev", "\t$Rd, $Rm",
[(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>;
let AddedComplexity = 5 in
def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rev16", "\t$Rd, $Rm",
[(set GPR:$Rd, (rotr (bswap GPR:$Rm), (i32 16)))]>,
Requires<[IsARM, HasV6]>;
let AddedComplexity = 5 in
def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "revsh", "\t$Rd, $Rm",
[(set GPR:$Rd, (sra (bswap GPR:$Rm), (i32 16)))]>,
Requires<[IsARM, HasV6]>;
def : ARMV6Pat<(or (sra (shl GPR:$Rm, (i32 24)), (i32 16)),
(and (srl GPR:$Rm, (i32 8)), 0xFF)),
(REVSH GPR:$Rm)>;
def PKHBT : APKHI<0b01101000, 0, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, pkh_lsl_amt:$sh),
IIC_iALUsi, "pkhbt", "\t$Rd, $Rn, $Rm$sh",
[(set GPR:$Rd, (or (and GPR:$Rn, 0xFFFF),
(and (shl GPR:$Rm, pkh_lsl_amt:$sh),
0xFFFF0000)))]>,
Requires<[IsARM, HasV6]>;
// Alternate cases for PKHBT where identities eliminate some nodes.
def : ARMV6Pat<(or (and GPR:$Rn, 0xFFFF), (and GPR:$Rm, 0xFFFF0000)),
(PKHBT GPR:$Rn, GPR:$Rm, 0)>;
def : ARMV6Pat<(or (and GPR:$Rn, 0xFFFF), (shl GPR:$Rm, imm16_31:$sh)),
(PKHBT GPR:$Rn, GPR:$Rm, imm16_31:$sh)>;
// Note: Shifts of 1-15 bits will be transformed to srl instead of sra and
// will match the pattern below.
def PKHTB : APKHI<0b01101000, 1, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, pkh_asr_amt:$sh),
IIC_iBITsi, "pkhtb", "\t$Rd, $Rn, $Rm$sh",
[(set GPR:$Rd, (or (and GPR:$Rn, 0xFFFF0000),
(and (sra GPR:$Rm, pkh_asr_amt:$sh),
0xFFFF)))]>,
Requires<[IsARM, HasV6]>;
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF0000), (srl GPR:$src2, imm16_31:$sh)),
(PKHTB GPR:$src1, GPR:$src2, imm16_31:$sh)>;
def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF0000),
(and (srl GPR:$src2, imm1_15:$sh), 0xFFFF)),
(PKHTB GPR:$src1, GPR:$src2, imm1_15:$sh)>;
//===----------------------------------------------------------------------===//
// Comparison Instructions...
//
defm CMP : AI1_cmp_irs<0b1010, "cmp",
IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
// ARMcmpZ can re-use the above instruction definitions.
def : ARMPat<(ARMcmpZ GPR:$src, so_imm:$imm),
(CMPri GPR:$src, so_imm:$imm)>;
def : ARMPat<(ARMcmpZ GPR:$src, GPR:$rhs),
(CMPrr GPR:$src, GPR:$rhs)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_imm:$rhs),
(CMPrsi GPR:$src, so_reg_imm:$rhs)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_reg:$rhs),
(CMPrsr GPR:$src, so_reg_reg:$rhs)>;
// FIXME: We have to be careful when using the CMN instruction and comparison
// with 0. One would expect these two pieces of code should give identical
// results:
//
// rsbs r1, r1, 0
// cmp r0, r1
// mov r0, #0
// it ls
// mov r0, #1
//
// and:
//
// cmn r0, r1
// mov r0, #0
// it ls
// mov r0, #1
//
// However, the CMN gives the *opposite* result when r1 is 0. This is because
// the carry flag is set in the CMP case but not in the CMN case. In short, the
// CMP instruction doesn't perform a truncate of the (logical) NOT of 0 plus the
// value of r0 and the carry bit (because the "carry bit" parameter to
// AddWithCarry is defined as 1 in this case, the carry flag will always be set
// when r0 >= 0). The CMN instruction doesn't perform a NOT of 0 so there is
// never a "carry" when this AddWithCarry is performed (because the "carry bit"
// parameter to AddWithCarry is defined as 0).
//
// When x is 0 and unsigned:
//
// x = 0
// ~x = 0xFFFF FFFF
// ~x + 1 = 0x1 0000 0000
// (-x = 0) != (0x1 0000 0000 = ~x + 1)
//
// Therefore, we should disable CMN when comparing against zero, until we can
// limit when the CMN instruction is used (when we know that the RHS is not 0 or
// when it's a comparison which doesn't look at the 'carry' flag).
//
// (See the ARM docs for the "AddWithCarry" pseudo-code.)
//
// This is related to <rdar://problem/7569620>.
//
//defm CMN : AI1_cmp_irs<0b1011, "cmn",
// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
// Note that TST/TEQ don't set all the same flags that CMP does!
defm TST : AI1_cmp_irs<0b1000, "tst",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>, 1>;
defm TEQ : AI1_cmp_irs<0b1001, "teq",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>, 1>;
defm CMNz : AI1_cmp_irs<0b1011, "cmn",
IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
//def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
// (CMNri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
(CMNzri GPR:$src, so_imm_neg:$imm)>;
// Pseudo i64 compares for some floating point compares.
let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
Defs = [CPSR] in {
def BCCi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, brtarget:$dst),
IIC_Br,
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>;
def BCCZi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst), IIC_Br,
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>;
} // usesCustomInserter
// Conditional moves
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
def MOVCCr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$false, GPR:$Rm, pred:$p),
4, IIC_iCMOVr,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
def MOVCCsi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_reg_imm:$shift, pred:$p),
4, IIC_iCMOVsr,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_imm:$shift, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
def MOVCCsr : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_reg_reg:$shift, pred:$p),
4, IIC_iCMOVsr,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_reg:$shift, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
let isMoveImm = 1 in
def MOVCCi16 : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, imm0_65535_expr:$imm, pred:$p),
4, IIC_iMOVi,
[]>,
RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>;
let isMoveImm = 1 in
def MOVCCi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_imm:$imm, pred:$p),
4, IIC_iCMOVi,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
// Two instruction predicate mov immediate.
let isMoveImm = 1 in
def MOVCCi32imm : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, i32imm:$src, pred:$p),
8, IIC_iCMOVix2, []>, RegConstraint<"$false = $Rd">;
let isMoveImm = 1 in
def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_imm:$imm, pred:$p),
4, IIC_iCMOVi,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
// Atomic operations intrinsics
//
def MemBarrierOptOperand : AsmOperandClass {
let Name = "MemBarrierOpt";
let ParserMethod = "parseMemBarrierOptOperand";
}
def memb_opt : Operand<i32> {
let PrintMethod = "printMemBOption";
let ParserMatchClass = MemBarrierOptOperand;
}
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dmb", "\t$opt", [(ARMMemBarrier (i32 imm:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff05;
let Inst{3-0} = opt;
}
}
def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dsb", "\t$opt", []>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff04;
let Inst{3-0} = opt;
}
// ISB has only full system option
def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"isb", "\t$opt", []>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff06;
let Inst{3-0} = opt;
}
let usesCustomInserter = 1 in {
let Uses = [CPSR] in {
def ATOMIC_LOAD_ADD_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_add_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_sub_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_and_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_or_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_xor_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_nand_8 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_MIN_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_MAX_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_ADD_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_add_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_sub_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_and_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_or_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_xor_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_nand_16 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_MIN_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_MAX_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_ADD_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_AND_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_OR_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$incr))]>;
def ATOMIC_LOAD_MIN_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_MAX_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_min_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
[(set GPR:$dst, (atomic_load_max_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_SWAP_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_swap_8 GPR:$ptr, GPR:$new))]>;
def ATOMIC_SWAP_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_swap_16 GPR:$ptr, GPR:$new))]>;
def ATOMIC_SWAP_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_cmp_swap_8 GPR:$ptr, GPR:$old, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_cmp_swap_16 GPR:$ptr, GPR:$old, GPR:$new))]>;
def ATOMIC_CMP_SWAP_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
[(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$old, GPR:$new))]>;
}
}
let mayLoad = 1 in {
def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addrmode7:$addr), NoItinerary,
"ldrexb", "\t$Rt, $addr", []>;
def LDREXH : AIldrex<0b11, (outs GPR:$Rt), (ins addrmode7:$addr), NoItinerary,
"ldrexh", "\t$Rt, $addr", []>;
def LDREX : AIldrex<0b00, (outs GPR:$Rt), (ins addrmode7:$addr), NoItinerary,
"ldrex", "\t$Rt, $addr", []>;
let hasExtraDefRegAllocReq = 1 in
def LDREXD : AIldrex<0b01, (outs GPR:$Rt, GPR:$Rt2), (ins addrmode7:$addr),
NoItinerary, "ldrexd", "\t$Rt, $Rt2, $addr", []>;
}
let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
def STREXB : AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addrmode7:$addr),
NoItinerary, "strexb", "\t$Rd, $Rt, $addr", []>;
def STREXH : AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addrmode7:$addr),
NoItinerary, "strexh", "\t$Rd, $Rt, $addr", []>;
def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addrmode7:$addr),
NoItinerary, "strex", "\t$Rd, $Rt, $addr", []>;
}
let hasExtraSrcRegAllocReq = 1, Constraints = "@earlyclobber $Rd" in
def STREXD : AIstrex<0b01, (outs GPR:$Rd),
(ins GPR:$Rt, GPR:$Rt2, addrmode7:$addr),
NoItinerary, "strexd", "\t$Rd, $Rt, $Rt2, $addr", []>;
// Clear-Exclusive is for disassembly only.
def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
[/* For disassembly only; pattern left blank */]>,
Requires<[IsARM, HasV7]> {
let Inst{31-0} = 0b11110101011111111111000000011111;
}
// SWP/SWPB are deprecated in V6/V7.
let mayLoad = 1, mayStore = 1 in {
def SWP : AIswp<0, (outs GPR:$Rt), (ins GPR:$Rt2, addrmode7:$addr), "swp", []>;
def SWPB: AIswp<1, (outs GPR:$Rt), (ins GPR:$Rt2, addrmode7:$addr), "swpb", []>;
}
//===----------------------------------------------------------------------===//
// Coprocessor Instructions.
//
def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]> {
bits<4> opc1;
bits<4> CRn;
bits<4> CRd;
bits<4> cop;
bits<3> opc2;
bits<4> CRm;
let Inst{3-0} = CRm;
let Inst{4} = 0;
let Inst{7-5} = opc2;
let Inst{11-8} = cop;
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
}
def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]> {
let Inst{31-28} = 0b1111;
bits<4> opc1;
bits<4> CRn;
bits<4> CRd;
bits<4> cop;
bits<3> opc2;
bits<4> CRm;
let Inst{3-0} = CRm;
let Inst{4} = 0;
let Inst{7-5} = opc2;
let Inst{11-8} = cop;
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
}
class ACI<dag oops, dag iops, string opc, string asm,
IndexMode im = IndexModeNone>
: InoP<oops, iops, AddrModeNone, 4, im, BrFrm, NoItinerary,
opc, asm, "", [/* For disassembly only; pattern left blank */]> {
let Inst{27-25} = 0b110;
}
multiclass LdStCop<bits<4> op31_28, bit load, dag ops, string opc, string cond>{
def _OFFSET : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(opc, cond), "\tp$cop, cr$CRd, $addr"> {
let Inst{31-28} = op31_28;
let Inst{24} = 1; // P = 1
let Inst{21} = 0; // W = 0
let Inst{22} = 0; // D = 0
let Inst{20} = load;
}
def _PRE : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(opc, cond), "\tp$cop, cr$CRd, $addr!", IndexModePre> {
let Inst{31-28} = op31_28;
let Inst{24} = 1; // P = 1
let Inst{21} = 1; // W = 1
let Inst{22} = 0; // D = 0
let Inst{20} = load;
}
def _POST : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(opc, cond), "\tp$cop, cr$CRd, $addr", IndexModePost> {
let Inst{31-28} = op31_28;
let Inst{24} = 0; // P = 0
let Inst{21} = 1; // W = 1
let Inst{22} = 0; // D = 0
let Inst{20} = load;
}
def _OPTION : ACI<(outs),
!con((ins nohash_imm:$cop,nohash_imm:$CRd,GPR:$base, nohash_imm:$option),
ops),
!strconcat(opc, cond), "\tp$cop, cr$CRd, [$base], \\{$option\\}"> {
let Inst{31-28} = op31_28;
let Inst{24} = 0; // P = 0
let Inst{23} = 1; // U = 1
let Inst{21} = 0; // W = 0
let Inst{22} = 0; // D = 0
let Inst{20} = load;
}
def L_OFFSET : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(!strconcat(opc, "l"), cond), "\tp$cop, cr$CRd, $addr"> {
let Inst{31-28} = op31_28;
let Inst{24} = 1; // P = 1
let Inst{21} = 0; // W = 0
let Inst{22} = 1; // D = 1
let Inst{20} = load;
}
def L_PRE : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(!strconcat(opc, "l"), cond), "\tp$cop, cr$CRd, $addr!",
IndexModePre> {
let Inst{31-28} = op31_28;
let Inst{24} = 1; // P = 1
let Inst{21} = 1; // W = 1
let Inst{22} = 1; // D = 1
let Inst{20} = load;
}
def L_POST : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr), ops),
!strconcat(!strconcat(opc, "l"), cond), "\tp$cop, cr$CRd, $addr",
IndexModePost> {
let Inst{31-28} = op31_28;
let Inst{24} = 0; // P = 0
let Inst{21} = 1; // W = 1
let Inst{22} = 1; // D = 1
let Inst{20} = load;
}
def L_OPTION : ACI<(outs),
!con((ins nohash_imm:$cop, nohash_imm:$CRd,GPR:$base,nohash_imm:$option),
ops),
!strconcat(!strconcat(opc, "l"), cond),
"\tp$cop, cr$CRd, [$base], \\{$option\\}"> {
let Inst{31-28} = op31_28;
let Inst{24} = 0; // P = 0
let Inst{23} = 1; // U = 1
let Inst{21} = 0; // W = 0
let Inst{22} = 1; // D = 1
let Inst{20} = load;
}
}
defm LDC : LdStCop<{?,?,?,?}, 1, (ins pred:$p), "ldc", "${p}">;
defm LDC2 : LdStCop<0b1111, 1, (ins), "ldc2", "">;
defm STC : LdStCop<{?,?,?,?}, 0, (ins pred:$p), "stc", "${p}">;
defm STC2 : LdStCop<0b1111, 0, (ins), "stc2", "">;
//===----------------------------------------------------------------------===//
// Move between coprocessor and ARM core register -- for disassembly only
//
class MovRCopro<string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
: ABI<0b1110, oops, iops, NoItinerary, opc,
"\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2", pattern> {
let Inst{20} = direction;
let Inst{4} = 1;
bits<4> Rt;
bits<4> cop;
bits<3> opc1;
bits<3> opc2;
bits<4> CRm;
bits<4> CRn;
let Inst{15-12} = Rt;
let Inst{11-8} = cop;
let Inst{23-21} = opc1;
let Inst{7-5} = opc2;
let Inst{3-0} = CRm;
let Inst{19-16} = CRn;
}
def MCR : MovRCopro<"mcr", 0 /* from ARM core register to coprocessor */,
(outs),
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
def MRC : MovRCopro<"mrc", 1 /* from coprocessor to ARM core register */,
(outs GPR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
def : ARMPat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
(MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
class MovRCopro2<string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
: ABXI<0b1110, oops, iops, NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"), pattern> {
let Inst{31-28} = 0b1111;
let Inst{20} = direction;
let Inst{4} = 1;
bits<4> Rt;
bits<4> cop;
bits<3> opc1;
bits<3> opc2;
bits<4> CRm;
bits<4> CRn;
let Inst{15-12} = Rt;
let Inst{11-8} = cop;
let Inst{23-21} = opc1;
let Inst{7-5} = opc2;
let Inst{3-0} = CRm;
let Inst{19-16} = CRn;
}
def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
(outs),
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
(outs GPR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn,
imm:$CRm, imm:$opc2),
(MRC2 imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
class MovRRCopro<string opc, bit direction,
list<dag> pattern = [/* For disassembly only */]>
: ABI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
NoItinerary, opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> {
let Inst{23-21} = 0b010;
let Inst{20} = direction;
bits<4> Rt;
bits<4> Rt2;
bits<4> cop;
bits<4> opc1;
bits<4> CRm;
let Inst{15-12} = Rt;
let Inst{19-16} = Rt2;
let Inst{11-8} = cop;
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
}
def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2,
imm:$CRm)]>;
def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
class MovRRCopro2<string opc, bit direction,
list<dag> pattern = [/* For disassembly only */]>
: ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPR:$Rt, GPR:$Rt2, c_imm:$CRm), NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> {
let Inst{31-28} = 0b1111;
let Inst{23-21} = 0b010;
let Inst{20} = direction;
bits<4> Rt;
bits<4> Rt2;
bits<4> cop;
bits<4> opc1;
bits<4> CRm;
let Inst{15-12} = Rt;
let Inst{19-16} = Rt2;
let Inst{11-8} = cop;
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
}
def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2,
imm:$CRm)]>;
def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
//===----------------------------------------------------------------------===//
// Move between special register and ARM core register
//
// Move to ARM core register from Special Register
def MRS : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, apsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b00001111;
let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
}
def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPR:$Rd, pred:$p)>, Requires<[IsARM]>;
def MRSsys : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, spsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b01001111;
let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
}
// Move from ARM core register to Special Register
//
// No need to have both system and application versions, the encodings are the
// same and the assembly parser has no way to distinguish between them. The mask
// operand contains the special register (R Bit) in bit 4 and bits 3-0 contains
// the mask with the fields to be accessed in the special register.
def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
"msr", "\t$mask, $Rn", []> {
bits<5> mask;
bits<4> Rn;
let Inst{23} = 0;
let Inst{22} = mask{4}; // R bit
let Inst{21-20} = 0b10;
let Inst{19-16} = mask{3-0};
let Inst{15-12} = 0b1111;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rn;
}
def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
"msr", "\t$mask, $a", []> {
bits<5> mask;
bits<12> a;
let Inst{23} = 0;
let Inst{22} = mask{4}; // R bit
let Inst{21-20} = 0b10;
let Inst{19-16} = mask{3-0};
let Inst{15-12} = 0b1111;
let Inst{11-0} = a;
}
//===----------------------------------------------------------------------===//
// TLS Instructions
//
// __aeabi_read_tp preserves the registers r1-r3.
// This is a pseudo inst so that we can get the encoding right,
// complete with fixup for the aeabi_read_tp function.
let isCall = 1,
Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
def TPsoft : PseudoInst<(outs), (ins), IIC_Br,
[(set R0, ARMthread_pointer)]>;
}
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
// eh_sjlj_setjmp() is an instruction sequence to store the return
// address and save #0 in R0 for the non-longjmp case.
// Since by its nature we may be coming from some other function to get
// here, and we're using the stack frame for the containing function to
// save/restore registers, we can't keep anything live in regs across
// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
// when we get here from a longjmp(). We force everything out of registers
// except for our own input by listing the relevant registers in Defs. By
// doing so, we also cause the prologue/epilogue code to actively preserve
// all of the callee-saved resgisters, which is exactly what we want.
// A constant value is passed in $val, and we use the location as a scratch.
//
// These are pseudo-instructions and are lowered to individual MC-insts, so
// no encoding information is necessary.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
QQQQ0, QQQQ1, QQQQ2, QQQQ3 ], hasSideEffects = 1, isBarrier = 1 in {
def Int_eh_sjlj_setjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, HasVFP2]>;
}
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
hasSideEffects = 1, isBarrier = 1 in {
def Int_eh_sjlj_setjmp_nofp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, NoVFP]>;
}
// FIXME: Non-Darwin version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
Defs = [ R7, LR, SP ] in {
def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
NoItinerary,
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
Requires<[IsARM, IsDarwin]>;
}
// eh.sjlj.dispatchsetup pseudo-instruction.
// This pseudo is used for ARM, Thumb1 and Thumb2. Any differences are
// handled when the pseudo is expanded (which happens before any passes
// that need the instruction size).
let isBarrier = 1, hasSideEffects = 1 in
def Int_eh_sjlj_dispatchsetup :
PseudoInst<(outs), (ins GPR:$src), NoItinerary,
[(ARMeh_sjlj_dispatchsetup GPR:$src)]>,
Requires<[IsDarwin]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
// ARMv4 indirect branch using (MOVr PC, dst)
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in
def MOVPCRX : ARMPseudoExpand<(outs), (ins GPR:$dst),
4, IIC_Br, [(brind GPR:$dst)],
(MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
Requires<[IsARM, NoV4T]>;
// Large immediate handling.
// 32-bit immediate using two piece so_imms or movw + movt.
// This is a single pseudo instruction, the benefit is that it can be remat'd
// as a single unit instead of having to handle reg inputs.
// FIXME: Remove this when we can do generalized remat.
let isReMaterializable = 1, isMoveImm = 1 in
def MOVi32imm : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
[(set GPR:$dst, (arm_i32imm:$src))]>,
Requires<[IsARM]>;
// Pseudo instruction that combines movw + movt + add pc (if PIC).
// It also makes it possible to rematerialize the instructions.
// FIXME: Remove this when we can do generalized remat and when machine licm
// can properly the instructions.
let isReMaterializable = 1 in {
def MOV_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2addpc,
[(set GPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsARM, UseMovt]>;
def MOV_ga_dyn : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2,
[(set GPR:$dst, (ARMWrapperDYN tglobaladdr:$addr))]>,
Requires<[IsARM, UseMovt]>;
let AddedComplexity = 10 in
def MOV_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2ld,
[(set GPR:$dst, (load (ARMWrapperPIC tglobaladdr:$addr)))]>,
Requires<[IsARM, UseMovt]>;
} // isReMaterializable
// ConstantPool, GlobalAddress, and JumpTable
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (LEApcrel tglobaladdr :$dst)>,
Requires<[IsARM, DontUseMovt]>;
def : ARMPat<(ARMWrapper tconstpool :$dst), (LEApcrel tconstpool :$dst)>;
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (MOVi32imm tglobaladdr :$dst)>,
Requires<[IsARM, UseMovt]>;
def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
(LEApcrelJT tjumptable:$dst, imm:$id)>;
// TODO: add,sub,and, 3-instr forms?
// Tail calls
def : ARMPat<(ARMtcret tcGPR:$dst),
(TCRETURNri tcGPR:$dst)>, Requires<[IsDarwin]>;
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
def : ARMPat<(ARMtcret tcGPR:$dst),
(TCRETURNriND tcGPR:$dst)>, Requires<[IsNotDarwin]>;
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
(TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
(TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
// Direct calls
def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>,
Requires<[IsARM, IsNotDarwin]>;
def : ARMPat<(ARMcall texternalsym:$func), (BLr9 texternalsym:$func)>,
Requires<[IsARM, IsDarwin]>;
// zextload i1 -> zextload i8
def : ARMPat<(zextloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(zextloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
// extload -> zextload
def : ARMPat<(extloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(extloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
def : ARMPat<(extloadi8 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(extloadi8 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
def : ARMPat<(extloadi8 addrmodepc:$addr), (PICLDRB addrmodepc:$addr)>;
def : ARMPat<(extloadi16 addrmodepc:$addr), (PICLDRH addrmodepc:$addr)>;
// smul* and smla*
def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16))),
(SMULBB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul sext_16_node:$a, sext_16_node:$b),
(SMULBB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra GPR:$b, (i32 16))),
(SMULBT GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul sext_16_node:$a, (sra GPR:$b, (i32 16))),
(SMULBT GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16))),
(SMULTB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)), sext_16_node:$b),
(SMULTB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
(i32 16)),
(SMULWB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(sra (mul GPR:$a, sext_16_node:$b), (i32 16)),
(SMULWB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul sext_16_node:$a, sext_16_node:$b)),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul sext_16_node:$a, (sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)), sext_16_node:$b)),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
(i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5TEPat<(add GPR:$acc,
(sra (mul GPR:$a, sext_16_node:$b), (i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
// Pre-v7 uses MCR for synchronization barriers.
def : ARMPat<(ARMMemBarrierMCR GPR:$zero), (MCR 15, 0, GPR:$zero, 7, 10, 5)>,
Requires<[IsARM, HasV6]>;
// SXT/UXT with no rotate
let AddedComplexity = 16 in {
def : ARMV6Pat<(and GPR:$Src, 0x000000FF), (UXTB GPR:$Src, 0)>;
def : ARMV6Pat<(and GPR:$Src, 0x0000FFFF), (UXTH GPR:$Src, 0)>;
def : ARMV6Pat<(and GPR:$Src, 0x00FF00FF), (UXTB16 GPR:$Src, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (and GPR:$Rm, 0x00FF)),
(UXTAB GPR:$Rn, GPR:$Rm, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (and GPR:$Rm, 0xFFFF)),
(UXTAH GPR:$Rn, GPR:$Rm, 0)>;
}
def : ARMV6Pat<(sext_inreg GPR:$Src, i8), (SXTB GPR:$Src, 0)>;
def : ARMV6Pat<(sext_inreg GPR:$Src, i16), (SXTH GPR:$Src, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPR:$Rm, i8)),
(SXTAB GPR:$Rn, GPR:$Rm, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPR:$Rm, i16)),
(SXTAH GPR:$Rn, GPR:$Rm, 0)>;
//===----------------------------------------------------------------------===//
// Thumb Support
//
include "ARMInstrThumb.td"
//===----------------------------------------------------------------------===//
// Thumb2 Support
//
include "ARMInstrThumb2.td"
//===----------------------------------------------------------------------===//
// Floating Point Support
//
include "ARMInstrVFP.td"
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON) Support
//
include "ARMInstrNEON.td"
//===----------------------------------------------------------------------===//
// Assembler aliases
//
// Memory barriers
def : InstAlias<"dmb", (DMB 0xf)>, Requires<[IsARM, HasDB]>;
def : InstAlias<"dsb", (DSB 0xf)>, Requires<[IsARM, HasDB]>;
def : InstAlias<"isb", (ISB 0xf)>, Requires<[IsARM, HasDB]>;
// System instructions
def : MnemonicAlias<"swi", "svc">;
// Load / Store Multiple
def : MnemonicAlias<"ldmfd", "ldm">;
def : MnemonicAlias<"ldmia", "ldm">;
def : MnemonicAlias<"stmfd", "stmdb">;
def : MnemonicAlias<"stmia", "stm">;
def : MnemonicAlias<"stmea", "stm">;
// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT when the
// shift amount is zero (i.e., unspecified).
def : InstAlias<"pkhbt${p} $Rd, $Rn, $Rm",
(PKHBT GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"pkhtb${p} $Rd, $Rn, $Rm",
(PKHBT GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
// PUSH/POP aliases for STM/LDM
def : InstAlias<"push${p} $regs",
(STMDB_UPD SP, pred:$p, reglist:$regs)>;
def : InstAlias<"pop${p} $regs",
(LDMIA_UPD SP, pred:$p, reglist:$regs)>;
// RSB two-operand forms (optional explicit destination operand)
def : InstAlias<"rsb${s}${p} $Rdn, $imm",
(RSBri GPR:$Rdn, GPR:$Rdn, so_imm:$imm, pred:$p, cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<"rsb${s}${p} $Rdn, $Rm",
(RSBrr GPR:$Rdn, GPR:$Rdn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<"rsb${s}${p} $Rdn, $shift",
(RSBrsi GPR:$Rdn, GPR:$Rdn, so_reg_imm:$shift, pred:$p,
cc_out:$s)>, Requires<[IsARM]>;
def : InstAlias<"rsb${s}${p} $Rdn, $shift",
(RSBrsr GPR:$Rdn, GPR:$Rdn, so_reg_reg:$shift, pred:$p,
cc_out:$s)>, Requires<[IsARM]>;
// RSC two-operand forms (optional explicit destination operand)
def : InstAlias<"rsc${s}${p} $Rdn, $imm",
(RSCri GPR:$Rdn, GPR:$Rdn, so_imm:$imm, pred:$p, cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<"rsc${s}${p} $Rdn, $Rm",
(RSCrr GPR:$Rdn, GPR:$Rdn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<"rsc${s}${p} $Rdn, $shift",
(RSCrsi GPR:$Rdn, GPR:$Rdn, so_reg_imm:$shift, pred:$p,
cc_out:$s)>, Requires<[IsARM]>;
def : InstAlias<"rsc${s}${p} $Rdn, $shift",
(RSCrsr GPR:$Rdn, GPR:$Rdn, so_reg_reg:$shift, pred:$p,
cc_out:$s)>, Requires<[IsARM]>;
// SSAT/USAT optional shift operand.
def : InstAlias<"ssat${p} $Rd, $sat_imm, $Rn",
(SSAT GPR:$Rd, imm1_32:$sat_imm, GPR:$Rn, 0, pred:$p)>;
def : InstAlias<"usat${p} $Rd, $sat_imm, $Rn",
(USAT GPR:$Rd, imm0_31:$sat_imm, GPR:$Rn, 0, pred:$p)>;
// Extend instruction optional rotate operand.
def : InstAlias<"sxtab${p} $Rd, $Rn, $Rm",
(SXTAB GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"sxtah${p} $Rd, $Rn, $Rm",
(SXTAH GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"sxtab16${p} $Rd, $Rn, $Rm",
(SXTAB16 GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"sxtb${p} $Rd, $Rm", (SXTB GPR:$Rd, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"sxtb16${p} $Rd, $Rm", (SXTB16 GPR:$Rd, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"sxth${p} $Rd, $Rm", (SXTH GPR:$Rd, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtab${p} $Rd, $Rn, $Rm",
(UXTAB GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtah${p} $Rd, $Rn, $Rm",
(UXTAH GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtab16${p} $Rd, $Rn, $Rm",
(UXTAB16 GPR:$Rd, GPR:$Rn, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtb${p} $Rd, $Rm", (UXTB GPR:$Rd, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtb16${p} $Rd, $Rm", (UXTB16 GPR:$Rd, GPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxth${p} $Rd, $Rm", (UXTH GPR:$Rd, GPR:$Rm, 0, pred:$p)>;