llvm-6502/lib/Target/ARM/ARMInstrInfo.td
Saleem Abdulrasool c90ddb1869 ARM: add VLA extension for WoA Itanium ABI
The armv7-windows-itanium environment is nearly identical to the MSVC ABI. It
has a few divergences, mostly revolving around the use of the Itanium ABI for
C++. VLA support is one of the extensions that are amongst the set of the
extensions.

This adds support for proper VLA emission for this environment. This is
somewhat similar to the handling for __chkstk emission on X86 and the large
stack frame emission for ARM. The invocation style for chkstk is still
controlled via the -mcmodel flag to clang.

Make an explicit note that this is an extension.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210489 91177308-0d34-0410-b5e6-96231b3b80d8
2014-06-09 20:18:42 +00:00

5592 lines
211 KiB
TableGen

//===- ARMInstrInfo.td - Target Description for ARM Target -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the ARM instructions in TableGen format.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// ARM specific DAG Nodes.
//
// Type profiles.
def SDT_ARMCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
def SDT_ARMCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>;
def SDT_ARMStructByVal : SDTypeProfile<0, 4,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDT_ARMSaveCallPC : SDTypeProfile<0, 1, []>;
def SDT_ARMcall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SDT_ARMCMov : SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisVT<3, i32>]>;
def SDT_ARMBrcond : SDTypeProfile<0, 2,
[SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
def SDT_ARMBrJT : SDTypeProfile<0, 3,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>]>;
def SDT_ARMBr2JT : SDTypeProfile<0, 4,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDT_ARMBCC_i64 : SDTypeProfile<0, 6,
[SDTCisVT<0, i32>,
SDTCisVT<1, i32>, SDTCisVT<2, i32>,
SDTCisVT<3, i32>, SDTCisVT<4, i32>,
SDTCisVT<5, OtherVT>]>;
def SDT_ARMAnd : SDTypeProfile<1, 2,
[SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>]>;
def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
SDTCisPtrTy<1>, SDTCisVT<2, i32>]>;
def SDT_ARMThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
def SDT_ARMEH_SJLJ_Setjmp : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisPtrTy<1>,
SDTCisInt<2>]>;
def SDT_ARMEH_SJLJ_Longjmp: SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
def SDT_ARMMEMBARRIER : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMPREFETCH : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisSameAs<1, 2>,
SDTCisInt<1>]>;
def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDT_ARMBFI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDT_ARMVMAXNM : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>, SDTCisFP<2>]>;
def SDT_ARMVMINNM : SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>, SDTCisFP<2>]>;
def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>, SDTCisVT<1, i32>]>;
// SDTBinaryArithWithFlagsInOut - RES1, CPSR = op LHS, RHS, CPSR
def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
[SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisInt<0>,
SDTCisVT<1, i32>,
SDTCisVT<4, i32>]>;
def SDT_ARM64bitmlal : SDTypeProfile<2,4, [ SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>,
SDTCisVT<4, i32>, SDTCisVT<5, i32> ] >;
def ARMUmlal : SDNode<"ARMISD::UMLAL", SDT_ARM64bitmlal>;
def ARMSmlal : SDNode<"ARMISD::SMLAL", SDT_ARM64bitmlal>;
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
[SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
[SDNPHasChain, SDNPSideEffect,
SDNPOptInGlue, SDNPOutGlue]>;
def ARMcopystructbyval : SDNode<"ARMISD::COPY_STRUCT_BYVAL" ,
SDT_ARMStructByVal,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue,
SDNPMayStore, SDNPMayLoad]>;
def ARMcall : SDNode<"ARMISD::CALL", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_pred : SDNode<"ARMISD::CALL_PRED", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMcall_nolink : SDNode<"ARMISD::CALL_NOLINK", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def ARMretflag : SDNode<"ARMISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMintretflag : SDNode<"ARMISD::INTRET_FLAG", SDT_ARMcall,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
[SDNPInGlue]>;
def ARMbrcond : SDNode<"ARMISD::BRCOND", SDT_ARMBrcond,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
def ARMbrjt : SDNode<"ARMISD::BR_JT", SDT_ARMBrJT,
[SDNPHasChain]>;
def ARMbr2jt : SDNode<"ARMISD::BR2_JT", SDT_ARMBr2JT,
[SDNPHasChain]>;
def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64,
[SDNPHasChain]>;
def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
[SDNPOutGlue]>;
def ARMcmn : SDNode<"ARMISD::CMN", SDT_ARMCmp,
[SDNPOutGlue]>;
def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp,
[SDNPOutGlue, SDNPCommutative]>;
def ARMpic_add : SDNode<"ARMISD::PIC_ADD", SDT_ARMPICAdd>;
def ARMsrl_flag : SDNode<"ARMISD::SRL_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
def ARMsra_flag : SDNode<"ARMISD::SRA_FLAG", SDTIntUnaryOp, [SDNPOutGlue]>;
def ARMrrx : SDNode<"ARMISD::RRX" , SDTIntUnaryOp, [SDNPInGlue ]>;
def ARMaddc : SDNode<"ARMISD::ADDC", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def ARMsubc : SDNode<"ARMISD::SUBC", SDTBinaryArithWithFlags>;
def ARMadde : SDNode<"ARMISD::ADDE", SDTBinaryArithWithFlagsInOut>;
def ARMsube : SDNode<"ARMISD::SUBE", SDTBinaryArithWithFlagsInOut>;
def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>;
def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP",
SDT_ARMEH_SJLJ_Setjmp,
[SDNPHasChain, SDNPSideEffect]>;
def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
SDT_ARMEH_SJLJ_Longjmp,
[SDNPHasChain, SDNPSideEffect]>;
def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER,
[SDNPHasChain, SDNPSideEffect]>;
def ARMPreload : SDNode<"ARMISD::PRELOAD", SDT_ARMPREFETCH,
[SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
def ARMbfi : SDNode<"ARMISD::BFI", SDT_ARMBFI>;
def ARMvmaxnm : SDNode<"ARMISD::VMAXNM", SDT_ARMVMAXNM, []>;
def ARMvminnm : SDNode<"ARMISD::VMINNM", SDT_ARMVMINNM, []>;
//===----------------------------------------------------------------------===//
// ARM Instruction Predicate Definitions.
//
def HasV4T : Predicate<"Subtarget->hasV4TOps()">,
AssemblerPredicate<"HasV4TOps", "armv4t">;
def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">,
AssemblerPredicate<"HasV5TOps", "armv5t">;
def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">,
AssemblerPredicate<"HasV5TEOps", "armv5te">;
def HasV6 : Predicate<"Subtarget->hasV6Ops()">,
AssemblerPredicate<"HasV6Ops", "armv6">;
def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
def HasV6M : Predicate<"Subtarget->hasV6MOps()">,
AssemblerPredicate<"HasV6MOps",
"armv6m or armv6t2">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
AssemblerPredicate<"HasV6T2Ops", "armv6t2">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
def HasV7 : Predicate<"Subtarget->hasV7Ops()">,
AssemblerPredicate<"HasV7Ops", "armv7">;
def HasV8 : Predicate<"Subtarget->hasV8Ops()">,
AssemblerPredicate<"HasV8Ops", "armv8">;
def PreV8 : Predicate<"!Subtarget->hasV8Ops()">,
AssemblerPredicate<"!HasV8Ops", "armv7 or earlier">;
def NoVFP : Predicate<"!Subtarget->hasVFP2()">;
def HasVFP2 : Predicate<"Subtarget->hasVFP2()">,
AssemblerPredicate<"FeatureVFP2", "VFP2">;
def HasVFP3 : Predicate<"Subtarget->hasVFP3()">,
AssemblerPredicate<"FeatureVFP3", "VFP3">;
def HasVFP4 : Predicate<"Subtarget->hasVFP4()">,
AssemblerPredicate<"FeatureVFP4", "VFP4">;
def HasDPVFP : Predicate<"!Subtarget->isFPOnlySP()">,
AssemblerPredicate<"!FeatureVFPOnlySP",
"double precision VFP">;
def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
AssemblerPredicate<"FeatureFPARMv8", "FPARMv8">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON", "NEON">;
def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
AssemblerPredicate<"FeatureCrypto", "crypto">;
def HasCRC : Predicate<"Subtarget->hasCRC()">,
AssemblerPredicate<"FeatureCRC", "crc">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float">;
def HasDivide : Predicate<"Subtarget->hasDivide()">,
AssemblerPredicate<"FeatureHWDiv", "divide in THUMB">;
def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">,
AssemblerPredicate<"FeatureHWDivARM", "divide in ARM">;
def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
AssemblerPredicate<"FeatureT2XtPk",
"pack/extract">;
def HasThumb2DSP : Predicate<"Subtarget->hasThumb2DSP()">,
AssemblerPredicate<"FeatureDSPThumb2",
"thumb2-dsp">;
def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
AssemblerPredicate<"FeatureDB",
"data-barriers">;
def HasMP : Predicate<"Subtarget->hasMPExtension()">,
AssemblerPredicate<"FeatureMP",
"mp-extensions">;
def HasTrustZone : Predicate<"Subtarget->hasTrustZone()">,
AssemblerPredicate<"FeatureTrustZone",
"TrustZone">;
def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
def IsThumb : Predicate<"Subtarget->isThumb()">,
AssemblerPredicate<"ModeThumb", "thumb">;
def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
def IsThumb2 : Predicate<"Subtarget->isThumb2()">,
AssemblerPredicate<"ModeThumb,FeatureThumb2",
"thumb2">;
def IsMClass : Predicate<"Subtarget->isMClass()">,
AssemblerPredicate<"FeatureMClass", "armv*m">;
def IsNotMClass : Predicate<"!Subtarget->isMClass()">,
AssemblerPredicate<"!FeatureMClass",
"!armv*m">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb", "arm-mode">;
def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
def IsNotIOS : Predicate<"!Subtarget->isTargetIOS()">;
def IsMachO : Predicate<"Subtarget->isTargetMachO()">;
def IsNotMachO : Predicate<"!Subtarget->isTargetMachO()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
def UseNaClTrap : Predicate<"Subtarget->useNaClTrap()">,
AssemblerPredicate<"FeatureNaClTrap", "NaCl">;
def DontUseNaClTrap : Predicate<"!Subtarget->useNaClTrap()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
def UseMulOps : Predicate<"Subtarget->useMulOps()">;
// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
// But only select them if more precision in FP computation is allowed.
// Do not use them for Darwin platforms.
def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast && "
" Subtarget->hasVFP4()) && "
"!Subtarget->isTargetDarwin()">;
def DontUseFusedMAC : Predicate<"!(TM.Options.AllowFPOpFusion =="
" FPOpFusion::Fast &&"
" Subtarget->hasVFP4()) || "
"Subtarget->isTargetDarwin()">;
// VGETLNi32 is microcoded on Swift - prefer VMOV.
def HasFastVGETLNi32 : Predicate<"!Subtarget->isSwift()">;
def HasSlowVGETLNi32 : Predicate<"Subtarget->isSwift()">;
// VDUP.32 is microcoded on Swift - prefer VMOV.
def HasFastVDUP32 : Predicate<"!Subtarget->isSwift()">;
def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">;
// Cortex-A9 prefers VMOVSR to VMOVDRR even when using NEON for scalar FP, as
// this allows more effective execution domain optimization. See
// setExecutionDomain().
def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">;
def IsLE : Predicate<"getTargetLowering()->isLittleEndian()">;
def IsBE : Predicate<"getTargetLowering()->isBigEndian()">;
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
class RegConstraint<string C> {
string Constraints = C;
}
//===----------------------------------------------------------------------===//
// ARM specific transformation functions and pattern fragments.
//
// imm_neg_XFORM - Return the negation of an i32 immediate value.
def imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
// imm_not_XFORM - Return the complement of a i32 immediate value.
def imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
/// imm16_31 predicate - True if the 32-bit immediate is in the range [16,31].
def imm16_31 : ImmLeaf<i32, [{
return (int32_t)Imm >= 16 && (int32_t)Imm < 32;
}]>;
def so_imm_neg_asmoperand : AsmOperandClass { let Name = "ARMSOImmNeg"; }
def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
unsigned Value = -(unsigned)N->getZExtValue();
return Value && ARM_AM::getSOImmVal(Value) != -1;
}], imm_neg_XFORM> {
let ParserMatchClass = so_imm_neg_asmoperand;
}
// Note: this pattern doesn't require an encoder method and such, as it's
// only used on aliases (Pat<> and InstAlias<>). The actual encoding
// is handled by the destination instructions, which use so_imm.
def so_imm_not_asmoperand : AsmOperandClass { let Name = "ARMSOImmNot"; }
def so_imm_not : Operand<i32>, PatLeaf<(imm), [{
return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
}], imm_not_XFORM> {
let ParserMatchClass = so_imm_not_asmoperand;
}
// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
def sext_16_node : PatLeaf<(i32 GPR:$a), [{
return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
}]>;
/// Split a 32-bit immediate into two 16 bit parts.
def hi16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
}]>;
def lo16AllZero : PatLeaf<(i32 imm), [{
// Returns true if all low 16-bits are 0.
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
class BinOpWithFlagFrag<dag res> :
PatFrag<(ops node:$LHS, node:$RHS, node:$FLAG), res>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
class UnOpFrag <dag res> : PatFrag<(ops node:$Src), res>;
// An 'and' node with a single use.
def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'xor' node with a single use.
def xor_su : PatFrag<(ops node:$lhs, node:$rhs), (xor node:$lhs, node:$rhs), [{
return N->hasOneUse();
}]>;
// An 'fmul' node with a single use.
def fmul_su : PatFrag<(ops node:$lhs, node:$rhs), (fmul node:$lhs, node:$rhs),[{
return N->hasOneUse();
}]>;
// An 'fadd' node which checks for single non-hazardous use.
def fadd_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fadd node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
// An 'fsub' node which checks for single non-hazardous use.
def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
return hasNoVMLxHazardUse(N);
}]>;
//===----------------------------------------------------------------------===//
// Operand Definitions.
//
// Immediate operands with a shared generic asm render method.
class ImmAsmOperand : AsmOperandClass { let RenderMethod = "addImmOperands"; }
// Branch target.
// FIXME: rename brtarget to t2_brtarget
def brtarget : Operand<OtherVT> {
let EncoderMethod = "getBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
let DecoderMethod = "DecodeT2BROperand";
}
// FIXME: get rid of this one?
def uncondbrtarget : Operand<OtherVT> {
let EncoderMethod = "getUnconditionalBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Branch target for ARM. Handles conditional/unconditional
def br_target : Operand<OtherVT> {
let EncoderMethod = "getARMBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Call target.
// FIXME: rename bltarget to t2_bl_target?
def bltarget : Operand<i32> {
// Encoded the same as branch targets.
let EncoderMethod = "getBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Call target for ARM. Handles conditional/unconditional
// FIXME: rename bl_target to t2_bltarget?
def bl_target : Operand<i32> {
let EncoderMethod = "getARMBLTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
def blx_target : Operand<i32> {
let EncoderMethod = "getARMBLXTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// A list of registers separated by comma. Used by load/store multiple.
def RegListAsmOperand : AsmOperandClass { let Name = "RegList"; }
def reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = RegListAsmOperand;
let PrintMethod = "printRegisterList";
let DecoderMethod = "DecodeRegListOperand";
}
def GPRPairOp : RegisterOperand<GPRPair, "printGPRPairOperand">;
def DPRRegListAsmOperand : AsmOperandClass { let Name = "DPRRegList"; }
def dpr_reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = DPRRegListAsmOperand;
let PrintMethod = "printRegisterList";
let DecoderMethod = "DecodeDPRRegListOperand";
}
def SPRRegListAsmOperand : AsmOperandClass { let Name = "SPRRegList"; }
def spr_reglist : Operand<i32> {
let EncoderMethod = "getRegisterListOpValue";
let ParserMatchClass = SPRRegListAsmOperand;
let PrintMethod = "printRegisterList";
let DecoderMethod = "DecodeSPRRegListOperand";
}
// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
def cpinst_operand : Operand<i32> {
let PrintMethod = "printCPInstOperand";
}
// Local PC labels.
def pclabel : Operand<i32> {
let PrintMethod = "printPCLabel";
}
// ADR instruction labels.
def AdrLabelAsmOperand : AsmOperandClass { let Name = "AdrLabel"; }
def adrlabel : Operand<i32> {
let EncoderMethod = "getAdrLabelOpValue";
let ParserMatchClass = AdrLabelAsmOperand;
let PrintMethod = "printAdrLabelOperand<0>";
}
def neon_vcvt_imm32 : Operand<i32> {
let EncoderMethod = "getNEONVcvtImm32OpValue";
let DecoderMethod = "DecodeVCVTImmOperand";
}
// rot_imm: An integer that encodes a rotate amount. Must be 8, 16, or 24.
def rot_imm_XFORM: SDNodeXForm<imm, [{
switch (N->getZExtValue()){
default: assert(0);
case 0: return CurDAG->getTargetConstant(0, MVT::i32);
case 8: return CurDAG->getTargetConstant(1, MVT::i32);
case 16: return CurDAG->getTargetConstant(2, MVT::i32);
case 24: return CurDAG->getTargetConstant(3, MVT::i32);
}
}]>;
def RotImmAsmOperand : AsmOperandClass {
let Name = "RotImm";
let ParserMethod = "parseRotImm";
}
def rot_imm : Operand<i32>, PatLeaf<(i32 imm), [{
int32_t v = N->getZExtValue();
return v == 8 || v == 16 || v == 24; }],
rot_imm_XFORM> {
let PrintMethod = "printRotImmOperand";
let ParserMatchClass = RotImmAsmOperand;
}
// shift_imm: An integer that encodes a shift amount and the type of shift
// (asr or lsl). The 6-bit immediate encodes as:
// {5} 0 ==> lsl
// 1 asr
// {4-0} imm5 shift amount.
// asr #32 encoded as imm5 == 0.
def ShifterImmAsmOperand : AsmOperandClass {
let Name = "ShifterImm";
let ParserMethod = "parseShifterImm";
}
def shift_imm : Operand<i32> {
let PrintMethod = "printShiftImmOperand";
let ParserMatchClass = ShifterImmAsmOperand;
}
// shifter_operand operands: so_reg_reg, so_reg_imm, and so_imm.
def ShiftedRegAsmOperand : AsmOperandClass { let Name = "RegShiftedReg"; }
def so_reg_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectRegShifterOperand",
[shl, srl, sra, rotr]> {
let EncoderMethod = "getSORegRegOpValue";
let PrintMethod = "printSORegRegOperand";
let DecoderMethod = "DecodeSORegRegOperand";
let ParserMatchClass = ShiftedRegAsmOperand;
let MIOperandInfo = (ops GPRnopc, GPRnopc, i32imm);
}
def ShiftedImmAsmOperand : AsmOperandClass { let Name = "RegShiftedImm"; }
def so_reg_imm : Operand<i32>, // reg imm
ComplexPattern<i32, 2, "SelectImmShifterOperand",
[shl, srl, sra, rotr]> {
let EncoderMethod = "getSORegImmOpValue";
let PrintMethod = "printSORegImmOperand";
let DecoderMethod = "DecodeSORegImmOperand";
let ParserMatchClass = ShiftedImmAsmOperand;
let MIOperandInfo = (ops GPR, i32imm);
}
// FIXME: Does this need to be distinct from so_reg?
def shift_so_reg_reg : Operand<i32>, // reg reg imm
ComplexPattern<i32, 3, "SelectShiftRegShifterOperand",
[shl,srl,sra,rotr]> {
let EncoderMethod = "getSORegRegOpValue";
let PrintMethod = "printSORegRegOperand";
let DecoderMethod = "DecodeSORegRegOperand";
let ParserMatchClass = ShiftedRegAsmOperand;
let MIOperandInfo = (ops GPR, GPR, i32imm);
}
// FIXME: Does this need to be distinct from so_reg?
def shift_so_reg_imm : Operand<i32>, // reg reg imm
ComplexPattern<i32, 2, "SelectShiftImmShifterOperand",
[shl,srl,sra,rotr]> {
let EncoderMethod = "getSORegImmOpValue";
let PrintMethod = "printSORegImmOperand";
let DecoderMethod = "DecodeSORegImmOperand";
let ParserMatchClass = ShiftedImmAsmOperand;
let MIOperandInfo = (ops GPR, i32imm);
}
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits.
def SOImmAsmOperand: ImmAsmOperand { let Name = "ARMSOImm"; }
def so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
let EncoderMethod = "getSOImmOpValue";
let ParserMatchClass = SOImmAsmOperand;
let DecoderMethod = "DecodeSOImmOperand";
}
// Break so_imm's up into two pieces. This handles immediates with up to 16
// bits set in them. This uses so_imm2part to match and so_imm2part_[12] to
// get the first/second pieces.
def so_imm2part : PatLeaf<(imm), [{
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
/// arm_i32imm - True for +V6T2, or true only if so_imm2part is true.
///
def arm_i32imm : PatLeaf<(imm), [{
if (Subtarget->useMovt())
return true;
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
/// imm0_1 predicate - Immediate in the range [0,1].
def Imm0_1AsmOperand: ImmAsmOperand { let Name = "Imm0_1"; }
def imm0_1 : Operand<i32> { let ParserMatchClass = Imm0_1AsmOperand; }
/// imm0_3 predicate - Immediate in the range [0,3].
def Imm0_3AsmOperand: ImmAsmOperand { let Name = "Imm0_3"; }
def imm0_3 : Operand<i32> { let ParserMatchClass = Imm0_3AsmOperand; }
/// imm0_7 predicate - Immediate in the range [0,7].
def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 8;
}]> {
let ParserMatchClass = Imm0_7AsmOperand;
}
/// imm8 predicate - Immediate is exactly 8.
def Imm8AsmOperand: ImmAsmOperand { let Name = "Imm8"; }
def imm8 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 8; }]> {
let ParserMatchClass = Imm8AsmOperand;
}
/// imm16 predicate - Immediate is exactly 16.
def Imm16AsmOperand: ImmAsmOperand { let Name = "Imm16"; }
def imm16 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 16; }]> {
let ParserMatchClass = Imm16AsmOperand;
}
/// imm32 predicate - Immediate is exactly 32.
def Imm32AsmOperand: ImmAsmOperand { let Name = "Imm32"; }
def imm32 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 32; }]> {
let ParserMatchClass = Imm32AsmOperand;
}
/// imm1_7 predicate - Immediate in the range [1,7].
def Imm1_7AsmOperand: ImmAsmOperand { let Name = "Imm1_7"; }
def imm1_7 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 8; }]> {
let ParserMatchClass = Imm1_7AsmOperand;
}
/// imm1_15 predicate - Immediate in the range [1,15].
def Imm1_15AsmOperand: ImmAsmOperand { let Name = "Imm1_15"; }
def imm1_15 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 16; }]> {
let ParserMatchClass = Imm1_15AsmOperand;
}
/// imm1_31 predicate - Immediate in the range [1,31].
def Imm1_31AsmOperand: ImmAsmOperand { let Name = "Imm1_31"; }
def imm1_31 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 32; }]> {
let ParserMatchClass = Imm1_31AsmOperand;
}
/// imm0_15 predicate - Immediate in the range [0,15].
def Imm0_15AsmOperand: ImmAsmOperand {
let Name = "Imm0_15";
let DiagnosticType = "ImmRange0_15";
}
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
let ParserMatchClass = Imm0_15AsmOperand;
}
/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
def Imm0_31AsmOperand: ImmAsmOperand { let Name = "Imm0_31"; }
def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
let ParserMatchClass = Imm0_31AsmOperand;
}
/// imm0_32 predicate - True if the 32-bit immediate is in the range [0,32].
def Imm0_32AsmOperand: ImmAsmOperand { let Name = "Imm0_32"; }
def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
let ParserMatchClass = Imm0_32AsmOperand;
}
/// imm0_63 predicate - True if the 32-bit immediate is in the range [0,63].
def Imm0_63AsmOperand: ImmAsmOperand { let Name = "Imm0_63"; }
def imm0_63 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 64;
}]> {
let ParserMatchClass = Imm0_63AsmOperand;
}
/// imm0_239 predicate - Immediate in the range [0,239].
def Imm0_239AsmOperand : ImmAsmOperand {
let Name = "Imm0_239";
let DiagnosticType = "ImmRange0_239";
}
def imm0_239 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 240; }]> {
let ParserMatchClass = Imm0_239AsmOperand;
}
/// imm0_255 predicate - Immediate in the range [0,255].
def Imm0_255AsmOperand : ImmAsmOperand { let Name = "Imm0_255"; }
def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
let ParserMatchClass = Imm0_255AsmOperand;
}
/// imm0_65535 - An immediate is in the range [0.65535].
def Imm0_65535AsmOperand: ImmAsmOperand { let Name = "Imm0_65535"; }
def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 65536;
}]> {
let ParserMatchClass = Imm0_65535AsmOperand;
}
// imm0_65535_neg - An immediate whose negative value is in the range [0.65535].
def imm0_65535_neg : Operand<i32>, ImmLeaf<i32, [{
return -Imm >= 0 && -Imm < 65536;
}]>;
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
// FIXME: This really needs a Thumb version separate from the ARM version.
// While the range is the same, and can thus use the same match class,
// the encoding is different so it should have a different encoder method.
def Imm0_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm0_65535Expr"; }
def imm0_65535_expr : Operand<i32> {
let EncoderMethod = "getHiLo16ImmOpValue";
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
def Imm256_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm256_65535Expr"; }
def imm256_65535_expr : Operand<i32> {
let ParserMatchClass = Imm256_65535ExprAsmOperand;
}
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
def Imm24bitAsmOperand: ImmAsmOperand { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm <= 0xffffff;
}]> {
let ParserMatchClass = Imm24bitAsmOperand;
}
/// bf_inv_mask_imm predicate - An AND mask to clear an arbitrary width bitfield
/// e.g., 0xf000ffff
def BitfieldAsmOperand : AsmOperandClass {
let Name = "Bitfield";
let ParserMethod = "parseBitfield";
}
def bf_inv_mask_imm : Operand<i32>,
PatLeaf<(imm), [{
return ARM::isBitFieldInvertedMask(N->getZExtValue());
}] > {
let EncoderMethod = "getBitfieldInvertedMaskOpValue";
let PrintMethod = "printBitfieldInvMaskImmOperand";
let DecoderMethod = "DecodeBitfieldMaskOperand";
let ParserMatchClass = BitfieldAsmOperand;
}
def imm1_32_XFORM: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
}]>;
def Imm1_32AsmOperand: AsmOperandClass { let Name = "Imm1_32"; }
def imm1_32 : Operand<i32>, PatLeaf<(imm), [{
uint64_t Imm = N->getZExtValue();
return Imm > 0 && Imm <= 32;
}],
imm1_32_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_32AsmOperand;
}
def imm1_16_XFORM: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
}]>;
def Imm1_16AsmOperand: AsmOperandClass { let Name = "Imm1_16"; }
def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
imm1_16_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_16AsmOperand;
}
// Define ARM specific addressing modes.
// addrmode_imm12 := reg +/- imm12
//
def MemImm12OffsetAsmOperand : AsmOperandClass { let Name = "MemImm12Offset"; }
class AddrMode_Imm12 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrModeImm12", []> {
// 12-bit immediate operand. Note that instructions using this encode
// #0 and #-0 differently. We flag #-0 as the magic value INT32_MIN. All other
// immediate values are as normal.
let EncoderMethod = "getAddrModeImm12OpValue";
let DecoderMethod = "DecodeAddrModeImm12Operand";
let ParserMatchClass = MemImm12OffsetAsmOperand;
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
}
def addrmode_imm12 : AddrMode_Imm12 {
let PrintMethod = "printAddrModeImm12Operand<false>";
}
def addrmode_imm12_pre : AddrMode_Imm12 {
let PrintMethod = "printAddrModeImm12Operand<true>";
}
// ldst_so_reg := reg +/- reg shop imm
//
def MemRegOffsetAsmOperand : AsmOperandClass { let Name = "MemRegOffset"; }
def ldst_so_reg : Operand<i32>,
ComplexPattern<i32, 3, "SelectLdStSOReg", []> {
let EncoderMethod = "getLdStSORegOpValue";
// FIXME: Simplify the printer
let PrintMethod = "printAddrMode2Operand";
let DecoderMethod = "DecodeSORegMemOperand";
let ParserMatchClass = MemRegOffsetAsmOperand;
let MIOperandInfo = (ops GPR:$base, GPRnopc:$offsreg, i32imm:$shift);
}
// postidx_imm8 := +/- [0,255]
//
// 9 bit value:
// {8} 1 is imm8 is non-negative. 0 otherwise.
// {7-0} [0,255] imm8 value.
def PostIdxImm8AsmOperand : AsmOperandClass { let Name = "PostIdxImm8"; }
def postidx_imm8 : Operand<i32> {
let PrintMethod = "printPostIdxImm8Operand";
let ParserMatchClass = PostIdxImm8AsmOperand;
let MIOperandInfo = (ops i32imm);
}
// postidx_imm8s4 := +/- [0,1020]
//
// 9 bit value:
// {8} 1 is imm8 is non-negative. 0 otherwise.
// {7-0} [0,255] imm8 value, scaled by 4.
def PostIdxImm8s4AsmOperand : AsmOperandClass { let Name = "PostIdxImm8s4"; }
def postidx_imm8s4 : Operand<i32> {
let PrintMethod = "printPostIdxImm8s4Operand";
let ParserMatchClass = PostIdxImm8s4AsmOperand;
let MIOperandInfo = (ops i32imm);
}
// postidx_reg := +/- reg
//
def PostIdxRegAsmOperand : AsmOperandClass {
let Name = "PostIdxReg";
let ParserMethod = "parsePostIdxReg";
}
def postidx_reg : Operand<i32> {
let EncoderMethod = "getPostIdxRegOpValue";
let DecoderMethod = "DecodePostIdxReg";
let PrintMethod = "printPostIdxRegOperand";
let ParserMatchClass = PostIdxRegAsmOperand;
let MIOperandInfo = (ops GPRnopc, i32imm);
}
// addrmode2 := reg +/- imm12
// := reg +/- reg shop imm
//
// FIXME: addrmode2 should be refactored the rest of the way to always
// use explicit imm vs. reg versions above (addrmode_imm12 and ldst_so_reg).
def AddrMode2AsmOperand : AsmOperandClass { let Name = "AddrMode2"; }
def addrmode2 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode2", []> {
let EncoderMethod = "getAddrMode2OpValue";
let PrintMethod = "printAddrMode2Operand";
let ParserMatchClass = AddrMode2AsmOperand;
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def PostIdxRegShiftedAsmOperand : AsmOperandClass {
let Name = "PostIdxRegShifted";
let ParserMethod = "parsePostIdxReg";
}
def am2offset_reg : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode2OffsetReg",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
// When using this for assembly, it's always as a post-index offset.
let ParserMatchClass = PostIdxRegShiftedAsmOperand;
let MIOperandInfo = (ops GPRnopc, i32imm);
}
// FIXME: am2offset_imm should only need the immediate, not the GPR. Having
// the GPR is purely vestigal at this point.
def AM2OffsetImmAsmOperand : AsmOperandClass { let Name = "AM2OffsetImm"; }
def am2offset_imm : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode2OffsetImm",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
let ParserMatchClass = AM2OffsetImmAsmOperand;
let MIOperandInfo = (ops GPRnopc, i32imm);
}
// addrmode3 := reg +/- reg
// addrmode3 := reg +/- imm8
//
// FIXME: split into imm vs. reg versions.
def AddrMode3AsmOperand : AsmOperandClass { let Name = "AddrMode3"; }
class AddrMode3 : Operand<i32>,
ComplexPattern<i32, 3, "SelectAddrMode3", []> {
let EncoderMethod = "getAddrMode3OpValue";
let ParserMatchClass = AddrMode3AsmOperand;
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
}
def addrmode3 : AddrMode3
{
let PrintMethod = "printAddrMode3Operand<false>";
}
def addrmode3_pre : AddrMode3
{
let PrintMethod = "printAddrMode3Operand<true>";
}
// FIXME: split into imm vs. reg versions.
// FIXME: parser method to handle +/- register.
def AM3OffsetAsmOperand : AsmOperandClass {
let Name = "AM3Offset";
let ParserMethod = "parseAM3Offset";
}
def am3offset : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode3Offset",
[], [SDNPWantRoot]> {
let EncoderMethod = "getAddrMode3OffsetOpValue";
let PrintMethod = "printAddrMode3OffsetOperand";
let ParserMatchClass = AM3OffsetAsmOperand;
let MIOperandInfo = (ops GPR, i32imm);
}
// ldstm_mode := {ia, ib, da, db}
//
def ldstm_mode : OptionalDefOperand<OtherVT, (ops i32), (ops (i32 1))> {
let EncoderMethod = "getLdStmModeOpValue";
let PrintMethod = "printLdStmModeOperand";
}
// addrmode5 := reg +/- imm8*4
//
def AddrMode5AsmOperand : AsmOperandClass { let Name = "AddrMode5"; }
class AddrMode5 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode5", []> {
let EncoderMethod = "getAddrMode5OpValue";
let DecoderMethod = "DecodeAddrMode5Operand";
let ParserMatchClass = AddrMode5AsmOperand;
let MIOperandInfo = (ops GPR:$base, i32imm);
}
def addrmode5 : AddrMode5 {
let PrintMethod = "printAddrMode5Operand<false>";
}
def addrmode5_pre : AddrMode5 {
let PrintMethod = "printAddrMode5Operand<true>";
}
// addrmode6 := reg with optional alignment
//
def AddrMode6AsmOperand : AsmOperandClass { let Name = "AlignedMemory"; }
def addrmode6 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm:$align);
let EncoderMethod = "getAddrMode6AddressOpValue";
let DecoderMethod = "DecodeAddrMode6Operand";
let ParserMatchClass = AddrMode6AsmOperand;
}
def am6offset : Operand<i32>,
ComplexPattern<i32, 1, "SelectAddrMode6Offset",
[], [SDNPWantRoot]> {
let PrintMethod = "printAddrMode6OffsetOperand";
let MIOperandInfo = (ops GPR);
let EncoderMethod = "getAddrMode6OffsetOpValue";
let DecoderMethod = "DecodeGPRRegisterClass";
}
// Special version of addrmode6 to handle alignment encoding for VST1/VLD1
// (single element from one lane) for size 32.
def addrmode6oneL32 : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6OneLane32AddressOpValue";
}
// Base class for addrmode6 with specific alignment restrictions.
class AddrMode6Align : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm:$align);
let EncoderMethod = "getAddrMode6AddressOpValue";
let DecoderMethod = "DecodeAddrMode6Operand";
}
// Special version of addrmode6 to handle no allowed alignment encoding for
// VLD/VST instructions and checking the alignment is not specified.
def AddrMode6AlignNoneAsmOperand : AsmOperandClass {
let Name = "AlignedMemoryNone";
let DiagnosticType = "AlignedMemoryRequiresNone";
}
def addrmode6alignNone : AddrMode6Align {
// The alignment specifier can only be omitted.
let ParserMatchClass = AddrMode6AlignNoneAsmOperand;
}
// Special version of addrmode6 to handle 16-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align16AsmOperand : AsmOperandClass {
let Name = "AlignedMemory16";
let DiagnosticType = "AlignedMemoryRequires16";
}
def addrmode6align16 : AddrMode6Align {
// The alignment specifier can only be 16 or omitted.
let ParserMatchClass = AddrMode6Align16AsmOperand;
}
// Special version of addrmode6 to handle 32-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align32AsmOperand : AsmOperandClass {
let Name = "AlignedMemory32";
let DiagnosticType = "AlignedMemoryRequires32";
}
def addrmode6align32 : AddrMode6Align {
// The alignment specifier can only be 32 or omitted.
let ParserMatchClass = AddrMode6Align32AsmOperand;
}
// Special version of addrmode6 to handle 64-bit alignment encoding for
// VLD/VST instructions and checking the alignment value.
def AddrMode6Align64AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64";
let DiagnosticType = "AlignedMemoryRequires64";
}
def addrmode6align64 : AddrMode6Align {
// The alignment specifier can only be 64 or omitted.
let ParserMatchClass = AddrMode6Align64AsmOperand;
}
// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
// for VLD/VST instructions and checking the alignment value.
def AddrMode6Align64or128AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64or128";
let DiagnosticType = "AlignedMemoryRequires64or128";
}
def addrmode6align64or128 : AddrMode6Align {
// The alignment specifier can only be 64, 128 or omitted.
let ParserMatchClass = AddrMode6Align64or128AsmOperand;
}
// Special version of addrmode6 to handle 64-bit, 128-bit or 256-bit alignment
// encoding for VLD/VST instructions and checking the alignment value.
def AddrMode6Align64or128or256AsmOperand : AsmOperandClass {
let Name = "AlignedMemory64or128or256";
let DiagnosticType = "AlignedMemoryRequires64or128or256";
}
def addrmode6align64or128or256 : AddrMode6Align {
// The alignment specifier can only be 64, 128, 256 or omitted.
let ParserMatchClass = AddrMode6Align64or128or256AsmOperand;
}
// Special version of addrmode6 to handle alignment encoding for VLD-dup
// instructions, specifically VLD4-dup.
def addrmode6dup : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6DupAddressOpValue";
// FIXME: This is close, but not quite right. The alignment specifier is
// different.
let ParserMatchClass = AddrMode6AsmOperand;
}
// Base class for addrmode6dup with specific alignment restrictions.
class AddrMode6DupAlign : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6DupAddressOpValue";
}
// Special version of addrmode6 to handle no allowed alignment encoding for
// VLD-dup instruction and checking the alignment is not specified.
def AddrMode6dupAlignNoneAsmOperand : AsmOperandClass {
let Name = "DupAlignedMemoryNone";
let DiagnosticType = "DupAlignedMemoryRequiresNone";
}
def addrmode6dupalignNone : AddrMode6DupAlign {
// The alignment specifier can only be omitted.
let ParserMatchClass = AddrMode6dupAlignNoneAsmOperand;
}
// Special version of addrmode6 to handle 16-bit alignment encoding for VLD-dup
// instruction and checking the alignment value.
def AddrMode6dupAlign16AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory16";
let DiagnosticType = "DupAlignedMemoryRequires16";
}
def addrmode6dupalign16 : AddrMode6DupAlign {
// The alignment specifier can only be 16 or omitted.
let ParserMatchClass = AddrMode6dupAlign16AsmOperand;
}
// Special version of addrmode6 to handle 32-bit alignment encoding for VLD-dup
// instruction and checking the alignment value.
def AddrMode6dupAlign32AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory32";
let DiagnosticType = "DupAlignedMemoryRequires32";
}
def addrmode6dupalign32 : AddrMode6DupAlign {
// The alignment specifier can only be 32 or omitted.
let ParserMatchClass = AddrMode6dupAlign32AsmOperand;
}
// Special version of addrmode6 to handle 64-bit alignment encoding for VLD
// instructions and checking the alignment value.
def AddrMode6dupAlign64AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory64";
let DiagnosticType = "DupAlignedMemoryRequires64";
}
def addrmode6dupalign64 : AddrMode6DupAlign {
// The alignment specifier can only be 64 or omitted.
let ParserMatchClass = AddrMode6dupAlign64AsmOperand;
}
// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
// for VLD instructions and checking the alignment value.
def AddrMode6dupAlign64or128AsmOperand : AsmOperandClass {
let Name = "DupAlignedMemory64or128";
let DiagnosticType = "DupAlignedMemoryRequires64or128";
}
def addrmode6dupalign64or128 : AddrMode6DupAlign {
// The alignment specifier can only be 64, 128 or omitted.
let ParserMatchClass = AddrMode6dupAlign64or128AsmOperand;
}
// addrmodepc := pc + reg
//
def addrmodepc : Operand<i32>,
ComplexPattern<i32, 2, "SelectAddrModePC", []> {
let PrintMethod = "printAddrModePCOperand";
let MIOperandInfo = (ops GPR, i32imm);
}
// addr_offset_none := reg
//
def MemNoOffsetAsmOperand : AsmOperandClass { let Name = "MemNoOffset"; }
def addr_offset_none : Operand<i32>,
ComplexPattern<i32, 1, "SelectAddrOffsetNone", []> {
let PrintMethod = "printAddrMode7Operand";
let DecoderMethod = "DecodeAddrMode7Operand";
let ParserMatchClass = MemNoOffsetAsmOperand;
let MIOperandInfo = (ops GPR:$base);
}
def nohash_imm : Operand<i32> {
let PrintMethod = "printNoHashImmediate";
}
def CoprocNumAsmOperand : AsmOperandClass {
let Name = "CoprocNum";
let ParserMethod = "parseCoprocNumOperand";
}
def p_imm : Operand<i32> {
let PrintMethod = "printPImmediate";
let ParserMatchClass = CoprocNumAsmOperand;
let DecoderMethod = "DecodeCoprocessor";
}
def CoprocRegAsmOperand : AsmOperandClass {
let Name = "CoprocReg";
let ParserMethod = "parseCoprocRegOperand";
}
def c_imm : Operand<i32> {
let PrintMethod = "printCImmediate";
let ParserMatchClass = CoprocRegAsmOperand;
}
def CoprocOptionAsmOperand : AsmOperandClass {
let Name = "CoprocOption";
let ParserMethod = "parseCoprocOptionOperand";
}
def coproc_option_imm : Operand<i32> {
let PrintMethod = "printCoprocOptionImm";
let ParserMatchClass = CoprocOptionAsmOperand;
}
//===----------------------------------------------------------------------===//
include "ARMInstrFormats.td"
//===----------------------------------------------------------------------===//
// Multiclass helpers...
//
/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
/// binop that produces a value.
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (opnode GPR:$Rn, so_imm:$imm))]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
iir, opc, "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode GPR:$Rn, GPR:$Rm))]>,
Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{25} = 0;
let isCommutable = Commutable;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
}
def rsi : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_imm:$shift))]>,
Sched<[WriteALUsi, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode GPR:$Rn, so_reg_reg:$shift))]>,
Sched<[WriteALUsr, ReadALUsr]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
/// AsI1_rbin_irs - Same as AsI1_bin_irs except the order of operands are
/// reversed. The 'rr' form is only defined for the disassembler; for codegen
/// it is equivalent to the AsI1_bin_irs counterpart.
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
iii, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, (opnode so_imm:$imm, GPR:$Rn))]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
iir, opc, "\t$Rd, $Rn, $Rm",
[/* pattern left blank */]>,
Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def rsi : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode so_reg_imm:$shift, GPR:$Rn))]>,
Sched<[WriteALUsi, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
iis, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, (opnode so_reg_reg:$shift, GPR:$Rn))]>,
Sched<[WriteALUsr, ReadALUsr]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
/// AsI1_bin_s_irs - Same as AsI1_bin_irs except it sets the 's' bit by default.
///
/// These opcodes will be converted to the real non-S opcodes by
/// AdjustInstrPostInstrSelection after giving them an optional CPSR operand.
let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
InstrItinClass iis, PatFrag opnode,
bit Commutable = 0> {
def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
4, iii,
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>,
Sched<[WriteALU, ReadALU]>;
def rr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, pred:$p),
4, iir,
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm))]>,
Sched<[WriteALU, ReadALU, ReadALU]> {
let isCommutable = Commutable;
}
def rsi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift, pred:$p),
4, iis,
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn,
so_reg_imm:$shift))]>,
Sched<[WriteALUsi, ReadALU]>;
def rsr : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift, pred:$p),
4, iis,
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn,
so_reg_reg:$shift))]>,
Sched<[WriteALUSsr, ReadALUsr]>;
}
}
/// AsI1_rbin_s_is - Same as AsI1_bin_s_irs, except selection DAG
/// operands are reversed.
let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
InstrItinClass iis, PatFrag opnode,
bit Commutable = 0> {
def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
4, iii,
[(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>,
Sched<[WriteALU, ReadALU]>;
def rsi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift, pred:$p),
4, iis,
[(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift,
GPR:$Rn))]>,
Sched<[WriteALUsi, ReadALU]>;
def rsr : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$Rn, so_reg_reg:$shift, pred:$p),
4, iis,
[(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift,
GPR:$Rn))]>,
Sched<[WriteALUSsr, ReadALUsr]>;
}
}
/// AI1_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
/// patterns. Similar to AsI1_bin_irs except the instruction does not produce
/// a explicit result, only implicitly set CPSR.
let isCompare = 1, Defs = [CPSR] in {
multiclass AI1_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, bit Commutable = 0> {
def ri : AI1<opcod, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, iii,
opc, "\t$Rn, $imm",
[(opnode GPR:$Rn, so_imm:$imm)]>,
Sched<[WriteCMP, ReadALU]> {
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-0} = imm;
let Unpredictable{15-12} = 0b1111;
}
def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir,
opc, "\t$Rn, $Rm",
[(opnode GPR:$Rn, GPR:$Rm)]>,
Sched<[WriteCMP, ReadALU, ReadALU]> {
bits<4> Rn;
bits<4> Rm;
let isCommutable = Commutable;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
let Unpredictable{15-12} = 0b1111;
}
def rsi : AI1<opcod, (outs),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis,
opc, "\t$Rn, $shift",
[(opnode GPR:$Rn, so_reg_imm:$shift)]>,
Sched<[WriteCMPsi, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
def rsr : AI1<opcod, (outs),
(ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis,
opc, "\t$Rn, $shift",
[(opnode GPRnopc:$Rn, so_reg_reg:$shift)]>,
Sched<[WriteCMPsr, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
}
}
/// AI_ext_rrot - A unary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
/// FIXME: Remove the 'r' variant. Its rot_imm is zero.
class AI_ext_rrot<bits<8> opcod, string opc, PatFrag opnode>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot",
[(set GPRnopc:$Rd, (opnode (rotr GPRnopc:$Rm, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALUsi]> {
bits<4> Rd;
bits<4> Rm;
bits<2> rot;
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{11-10} = rot;
let Inst{3-0} = Rm;
}
class AI_ext_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTr, opc, "\t$Rd, $Rm$rot", []>,
Requires<[IsARM, HasV6]>, Sched<[WriteALUsi]> {
bits<2> rot;
let Inst{19-16} = 0b1111;
let Inst{11-10} = rot;
}
/// AI_exta_rrot - A binary operation with two forms: one whose operand is a
/// register and one whose operand is a register rotated by 8/16/24.
class AI_exta_rrot<bits<8> opcod, string opc, PatFrag opnode>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPR:$Rn, GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot",
[(set GPRnopc:$Rd, (opnode GPR:$Rn,
(rotr GPRnopc:$Rm, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALUsr]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
bits<2> rot;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-10} = rot;
let Inst{9-4} = 0b000111;
let Inst{3-0} = Rm;
}
class AI_exta_rrot_np<bits<8> opcod, string opc>
: AExtI<opcod, (outs GPRnopc:$Rd), (ins GPR:$Rn, GPRnopc:$Rm, rot_imm:$rot),
IIC_iEXTAr, opc, "\t$Rd, $Rn, $Rm$rot", []>,
Requires<[IsARM, HasV6]>, Sched<[WriteALUsr]> {
bits<4> Rn;
bits<2> rot;
let Inst{19-16} = Rn;
let Inst{11-10} = rot;
}
/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
bit Commutable = 0> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{11-0} = imm;
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let isCommutable = Commutable;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def rsi : AsI1<opcod, (outs GPR:$Rd),
(ins GPR:$Rn, so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_reg_imm:$shift, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALUsi, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPRnopc:$Rd, CPSR,
(opnode GPRnopc:$Rn, so_reg_reg:$shift, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALUsr, ReadALUsr]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
}
/// AI1_rsc_irs - Define instructions and patterns for rsc
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
[(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{11-0} = imm;
}
def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
DPFrm, IIC_iALUr, opc, "\t$Rd, $Rn, $Rm",
[/* pattern left blank */]>,
Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
}
def rsi : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift, GPR:$Rn, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALUsi, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def rsr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
[(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift, GPR:$Rn, CPSR))]>,
Requires<[IsARM]>,
Sched<[WriteALUsr, ReadALUsr]> {
bits<4> Rd;
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
}
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
multiclass AI_ldr1<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12: AI2ldst<0b010, 1, isByte, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, iii, opc, "\t$Rt, $addr",
[(set GPR:$Rt, (opnode addrmode_imm12:$addr))]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 1, isByte, (outs GPR:$Rt), (ins ldst_so_reg:$shift),
AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
[(set GPR:$Rt, (opnode ldst_so_reg:$shift))]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
multiclass AI_ldr1nopc<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12: AI2ldst<0b010, 1, isByte, (outs GPRnopc:$Rt),
(ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, iii, opc, "\t$Rt, $addr",
[(set GPRnopc:$Rt, (opnode addrmode_imm12:$addr))]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 1, isByte, (outs GPRnopc:$Rt),
(ins ldst_so_reg:$shift),
AddrModeNone, LdFrm, iir, opc, "\t$Rt, $shift",
[(set GPRnopc:$Rt, (opnode ldst_so_reg:$shift))]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
}
multiclass AI_str1<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12 : AI2ldst<0b010, 0, isByte, (outs),
(ins GPR:$Rt, addrmode_imm12:$addr),
AddrMode_i12, StFrm, iii, opc, "\t$Rt, $addr",
[(opnode GPR:$Rt, addrmode_imm12:$addr)]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 0, isByte, (outs), (ins GPR:$Rt, ldst_so_reg:$shift),
AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
[(opnode GPR:$Rt, ldst_so_reg:$shift)]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
multiclass AI_str1nopc<bit isByte, string opc, InstrItinClass iii,
InstrItinClass iir, PatFrag opnode> {
// Note: We use the complex addrmode_imm12 rather than just an input
// GPR and a constrained immediate so that we can use this to match
// frame index references and avoid matching constant pool references.
def i12 : AI2ldst<0b010, 0, isByte, (outs),
(ins GPRnopc:$Rt, addrmode_imm12:$addr),
AddrMode_i12, StFrm, iii, opc, "\t$Rt, $addr",
[(opnode GPRnopc:$Rt, addrmode_imm12:$addr)]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AI2ldst<0b011, 0, isByte, (outs),
(ins GPRnopc:$Rt, ldst_so_reg:$shift),
AddrModeNone, StFrm, iir, opc, "\t$Rt, $shift",
[(opnode GPRnopc:$Rt, ldst_so_reg:$shift)]> {
bits<4> Rt;
bits<17> shift;
let shift{4} = 0; // Inst{4} = 0
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = Rt;
let Inst{11-0} = shift{11-0};
}
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions.
//
/// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in
/// the function. The first operand is the ID# for this instruction, the second
/// is the index into the MachineConstantPool that this is, the third is the
/// size in bytes of this constant pool entry.
let neverHasSideEffects = 1, isNotDuplicable = 1 in
def CONSTPOOL_ENTRY :
PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
i32imm:$size), NoItinerary, []>;
// FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
// from removing one half of the matched pairs. That breaks PEI, which assumes
// these will always be in pairs, and asserts if it finds otherwise. Better way?
let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
def ADJCALLSTACKUP :
PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2, pred:$p), NoItinerary,
[(ARMcallseq_end timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKDOWN :
PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
[(ARMcallseq_start timm:$amt)]>;
}
def HINT : AI<(outs), (ins imm0_239:$imm), MiscFrm, NoItinerary,
"hint", "\t$imm", [(int_arm_hint imm0_239:$imm)]>,
Requires<[IsARM, HasV6]> {
bits<8> imm;
let Inst{27-8} = 0b00110010000011110000;
let Inst{7-0} = imm;
}
def : InstAlias<"nop$p", (HINT 0, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"yield$p", (HINT 1, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"wfe$p", (HINT 2, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"wfi$p", (HINT 3, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"sev$p", (HINT 4, pred:$p)>, Requires<[IsARM, HasV6T2]>;
def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
"\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
let Inst{19-16} = Rn;
let Inst{27-20} = 0b01101000;
let Inst{7-4} = 0b1011;
let Inst{11-8} = 0b1111;
let Unpredictable{11-8} = 0b1111;
}
// The 16-bit operand $val can be used by a debugger to store more information
// about the breakpoint.
def BKPT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"bkpt", "\t$val", []>, Requires<[IsARM]> {
bits<16> val;
let Inst{3-0} = val{3-0};
let Inst{19-8} = val{15-4};
let Inst{27-20} = 0b00010010;
let Inst{31-28} = 0xe; // AL
let Inst{7-4} = 0b0111;
}
// default immediate for breakpoint mnemonic
def : InstAlias<"bkpt", (BKPT 0)>, Requires<[IsARM]>;
def HLT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"hlt", "\t$val", []>, Requires<[IsARM, HasV8]> {
bits<16> val;
let Inst{3-0} = val{3-0};
let Inst{19-8} = val{15-4};
let Inst{27-20} = 0b00010000;
let Inst{31-28} = 0xe; // AL
let Inst{7-4} = 0b0111;
}
// Change Processor State
// FIXME: We should use InstAlias to handle the optional operands.
class CPS<dag iops, string asm_ops>
: AXI<(outs), iops, MiscFrm, NoItinerary, !strconcat("cps", asm_ops),
[]>, Requires<[IsARM]> {
bits<2> imod;
bits<3> iflags;
bits<5> mode;
bit M;
let Inst{31-28} = 0b1111;
let Inst{27-20} = 0b00010000;
let Inst{19-18} = imod;
let Inst{17} = M; // Enabled if mode is set;
let Inst{16-9} = 0b00000000;
let Inst{8-6} = iflags;
let Inst{5} = 0;
let Inst{4-0} = mode;
}
let DecoderMethod = "DecodeCPSInstruction" in {
let M = 1 in
def CPS3p : CPS<(ins imod_op:$imod, iflags_op:$iflags, imm0_31:$mode),
"$imod\t$iflags, $mode">;
let mode = 0, M = 0 in
def CPS2p : CPS<(ins imod_op:$imod, iflags_op:$iflags), "$imod\t$iflags">;
let imod = 0, iflags = 0, M = 1 in
def CPS1p : CPS<(ins imm0_31:$mode), "\t$mode">;
}
// Preload signals the memory system of possible future data/instruction access.
multiclass APreLoad<bits<1> read, bits<1> data, string opc> {
def i12 : AXIM<(outs), (ins addrmode_imm12:$addr), AddrMode_i12, MiscFrm,
IIC_Preload, !strconcat(opc, "\t$addr"),
[(ARMPreload addrmode_imm12:$addr, (i32 read), (i32 data))]>,
Sched<[WritePreLd]> {
bits<4> Rt;
bits<17> addr;
let Inst{31-26} = 0b111101;
let Inst{25} = 0; // 0 for immediate form
let Inst{24} = data;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
let Inst{19-16} = addr{16-13}; // Rn
let Inst{15-12} = 0b1111;
let Inst{11-0} = addr{11-0}; // imm12
}
def rs : AXI<(outs), (ins ldst_so_reg:$shift), MiscFrm, IIC_Preload,
!strconcat(opc, "\t$shift"),
[(ARMPreload ldst_so_reg:$shift, (i32 read), (i32 data))]>,
Sched<[WritePreLd]> {
bits<17> shift;
let Inst{31-26} = 0b111101;
let Inst{25} = 1; // 1 for register form
let Inst{24} = data;
let Inst{23} = shift{12}; // U (add = ('U' == 1))
let Inst{22} = read;
let Inst{21-20} = 0b01;
let Inst{19-16} = shift{16-13}; // Rn
let Inst{15-12} = 0b1111;
let Inst{11-0} = shift{11-0};
let Inst{4} = 0;
}
}
defm PLD : APreLoad<1, 1, "pld">, Requires<[IsARM]>;
defm PLDW : APreLoad<0, 1, "pldw">, Requires<[IsARM,HasV7,HasMP]>;
defm PLI : APreLoad<1, 0, "pli">, Requires<[IsARM,HasV7]>;
def SETEND : AXI<(outs), (ins setend_op:$end), MiscFrm, NoItinerary,
"setend\t$end", []>, Requires<[IsARM]>, Deprecated<HasV8Ops> {
bits<1> end;
let Inst{31-10} = 0b1111000100000001000000;
let Inst{9} = end;
let Inst{8-0} = 0;
}
def DBG : AI<(outs), (ins imm0_15:$opt), MiscFrm, NoItinerary, "dbg", "\t$opt",
[]>, Requires<[IsARM, HasV7]> {
bits<4> opt;
let Inst{27-4} = 0b001100100000111100001111;
let Inst{3-0} = opt;
}
// A8.8.247 UDF - Undefined (Encoding A1)
def UDF : AInoP<(outs), (ins imm0_65535:$imm16), MiscFrm, NoItinerary,
"udf", "\t$imm16", [(int_arm_undefined imm0_65535:$imm16)]> {
bits<16> imm16;
let Inst{31-28} = 0b1110; // AL
let Inst{27-25} = 0b011;
let Inst{24-20} = 0b11111;
let Inst{19-8} = imm16{15-4};
let Inst{7-4} = 0b1111;
let Inst{3-0} = imm16{3-0};
}
/*
* A5.4 Permanently UNDEFINED instructions.
*
* For most targets use UDF #65006, for which the OS will generate SIGTRAP.
* Other UDF encodings generate SIGILL.
*
* NaCl's OS instead chooses an ARM UDF encoding that's also a UDF in Thumb.
* Encoding A1:
* 1110 0111 1111 iiii iiii iiii 1111 iiii
* Encoding T1:
* 1101 1110 iiii iiii
* It uses the following encoding:
* 1110 0111 1111 1110 1101 1110 1111 0000
* - In ARM: UDF #60896;
* - In Thumb: UDF #254 followed by a branch-to-self.
*/
let isBarrier = 1, isTerminator = 1 in
def TRAPNaCl : AXI<(outs), (ins), MiscFrm, NoItinerary,
"trap", [(trap)]>,
Requires<[IsARM,UseNaClTrap]> {
let Inst = 0xe7fedef0;
}
let isBarrier = 1, isTerminator = 1 in
def TRAP : AXI<(outs), (ins), MiscFrm, NoItinerary,
"trap", [(trap)]>,
Requires<[IsARM,DontUseNaClTrap]> {
let Inst = 0xe7ffdefe;
}
// Address computation and loads and stores in PIC mode.
let isNotDuplicable = 1 in {
def PICADD : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
4, IIC_iALUr,
[(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>,
Sched<[WriteALU, ReadALU]>;
let AddedComplexity = 10 in {
def PICLDR : ARMPseudoInst<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_r,
[(set GPR:$dst, (load addrmodepc:$addr))]>;
def PICLDRH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (zextloadi16 addrmodepc:$addr))]>;
def PICLDRB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (zextloadi8 addrmodepc:$addr))]>;
def PICLDRSH : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (sextloadi16 addrmodepc:$addr))]>;
def PICLDRSB : ARMPseudoInst<(outs GPR:$Rt), (ins addrmodepc:$addr, pred:$p),
4, IIC_iLoad_bh_r,
[(set GPR:$Rt, (sextloadi8 addrmodepc:$addr))]>;
}
let AddedComplexity = 10 in {
def PICSTR : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_r, [(store GPR:$src, addrmodepc:$addr)]>;
def PICSTRH : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_bh_r, [(truncstorei16 GPR:$src,
addrmodepc:$addr)]>;
def PICSTRB : ARMPseudoInst<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
4, IIC_iStore_bh_r, [(truncstorei8 GPR:$src, addrmodepc:$addr)]>;
}
} // isNotDuplicable = 1
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
let neverHasSideEffects = 1, isReMaterializable = 1 in
// The 'adr' mnemonic encodes differently if the label is before or after
// the instruction. The {24-21} opcode bits are set by the fixup, as we don't
// know until then which form of the instruction will be used.
def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label),
MiscFrm, IIC_iALUi, "adr", "\t$Rd, $label", []>,
Sched<[WriteALU, ReadALU]> {
bits<4> Rd;
bits<14> label;
let Inst{27-25} = 0b001;
let Inst{24} = 0;
let Inst{23-22} = label{13-12};
let Inst{21} = 0;
let Inst{20} = 0;
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{11-0} = label{11-0};
}
let hasSideEffects = 1 in {
def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p),
4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
4, IIC_iALUi, []>, Sched<[WriteALU, ReadALU]>;
}
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
//
let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
// ARMV4T and above
def BX_RET : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"bx", "\tlr", [(ARMretflag)]>,
Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
let Inst{27-0} = 0b0001001011111111111100011110;
}
// ARMV4 only
def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
"mov", "\tpc, lr", [(ARMretflag)]>,
Requires<[IsARM, NoV4T]>, Sched<[WriteBr]> {
let Inst{27-0} = 0b0001101000001111000000001110;
}
// Exception return: N.b. doesn't set CPSR as far as we're concerned (it sets
// the user-space one).
def SUBS_PC_LR : ARMPseudoInst<(outs), (ins i32imm:$offset, pred:$p),
4, IIC_Br,
[(ARMintretflag imm:$offset)]>;
}
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// ARMV4T and above
def BX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
[(brind GPR:$dst)]>,
Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
bits<4> dst;
let Inst{31-4} = 0b1110000100101111111111110001;
let Inst{3-0} = dst;
}
def BX_pred : AI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br,
"bx", "\t$dst", [/* pattern left blank */]>,
Requires<[IsARM, HasV4T]>, Sched<[WriteBr]> {
bits<4> dst;
let Inst{27-4} = 0b000100101111111111110001;
let Inst{3-0} = dst;
}
}
// SP is marked as a use to prevent stack-pointer assignments that appear
// immediately before calls from potentially appearing dead.
let isCall = 1,
// FIXME: Do we really need a non-predicated version? If so, it should
// at least be a pseudo instruction expanding to the predicated version
// at MC lowering time.
Defs = [LR], Uses = [SP] in {
def BL : ABXI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBrL]> {
let Inst{31-28} = 0b1110;
bits<24> func;
let Inst{23-0} = func;
let DecoderMethod = "DecodeBranchImmInstruction";
}
def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{23-0} = func;
let DecoderMethod = "DecodeBranchImmInstruction";
}
// ARMv5T and above
def BLX : AXI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
bits<4> func;
let Inst{31-4} = 0b1110000100101111111111110011;
let Inst{3-0} = func;
}
def BLX_pred : AI<(outs), (ins GPR:$func), BrMiscFrm,
IIC_Br, "blx", "\t$func",
[(ARMcall_pred GPR:$func)]>,
Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
bits<4> func;
let Inst{27-4} = 0b000100101111111111110011;
let Inst{3-0} = func;
}
// ARMv4T
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, HasV4T]>, Sched<[WriteBr]>;
// ARMv4
def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
// mov lr, pc; b if callee is marked noreturn to avoid confusing the
// return stack predictor.
def BMOVPCB_CALL : ARMPseudoInst<(outs), (ins bl_target:$func),
8, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBr]>;
}
let isBranch = 1, isTerminator = 1 in {
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
def Bcc : ABI<0b1010, (outs), (ins br_target:$target),
IIC_Br, "b", "\t$target",
[/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]>,
Sched<[WriteBr]> {
bits<24> target;
let Inst{23-0} = target;
let DecoderMethod = "DecodeBranchImmInstruction";
}
let isBarrier = 1 in {
// B is "predicable" since it's just a Bcc with an 'always' condition.
let isPredicable = 1 in
// FIXME: We shouldn't need this pseudo at all. Just using Bcc directly
// should be sufficient.
// FIXME: Is B really a Barrier? That doesn't seem right.
def B : ARMPseudoExpand<(outs), (ins br_target:$target), 4, IIC_Br,
[(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>,
Sched<[WriteBr]>;
let isNotDuplicable = 1, isIndirectBranch = 1 in {
def BR_JTr : ARMPseudoInst<(outs),
(ins GPR:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>,
Sched<[WriteBr]>;
// FIXME: This shouldn't use the generic "addrmode2," but rather be split
// into i12 and rs suffixed versions.
def BR_JTm : ARMPseudoInst<(outs),
(ins addrmode2:$target, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
imm:$id)]>, Sched<[WriteBrTbl]>;
def BR_JTadd : ARMPseudoInst<(outs),
(ins GPR:$target, GPR:$idx, i32imm:$jt, i32imm:$id),
0, IIC_Br,
[(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
imm:$id)]>, Sched<[WriteBrTbl]>;
} // isNotDuplicable = 1, isIndirectBranch = 1
} // isBarrier = 1
}
// BLX (immediate)
def BLXi : AXI<(outs), (ins blx_target:$target), BrMiscFrm, NoItinerary,
"blx\t$target", []>,
Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
let Inst{31-25} = 0b1111101;
bits<25> target;
let Inst{23-0} = target{24-1};
let Inst{24} = target{0};
}
// Branch and Exchange Jazelle
def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
[/* pattern left blank */]>, Sched<[WriteBr]> {
bits<4> func;
let Inst{23-20} = 0b0010;
let Inst{19-8} = 0xfff;
let Inst{7-4} = 0b0010;
let Inst{3-0} = func;
}
// Tail calls.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst), IIC_Br, []>,
Sched<[WriteBr]>;
def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst), IIC_Br, []>,
Sched<[WriteBr]>;
def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst),
4, IIC_Br, [],
(Bcc br_target:$dst, (ops 14, zero_reg))>,
Requires<[IsARM]>, Sched<[WriteBr]>;
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
4, IIC_Br, [],
(BX GPR:$dst)>, Sched<[WriteBr]>,
Requires<[IsARM]>;
}
// Secure Monitor Call is a system instruction.
def SMC : ABI<0b0001, (outs), (ins imm0_15:$opt), NoItinerary, "smc", "\t$opt",
[]>, Requires<[IsARM, HasTrustZone]> {
bits<4> opt;
let Inst{23-4} = 0b01100000000000000111;
let Inst{3-0} = opt;
}
// Supervisor Call (Software Interrupt)
let isCall = 1, Uses = [SP] in {
def SVC : ABI<0b1111, (outs), (ins imm24b:$svc), IIC_Br, "svc", "\t$svc", []>,
Sched<[WriteBr]> {
bits<24> svc;
let Inst{23-0} = svc;
}
}
// Store Return State
class SRSI<bit wb, string asm>
: XI<(outs), (ins imm0_31:$mode), AddrModeNone, 4, IndexModeNone, BrFrm,
NoItinerary, asm, "", []> {
bits<5> mode;
let Inst{31-28} = 0b1111;
let Inst{27-25} = 0b100;
let Inst{22} = 1;
let Inst{21} = wb;
let Inst{20} = 0;
let Inst{19-16} = 0b1101; // SP
let Inst{15-5} = 0b00000101000;
let Inst{4-0} = mode;
}
def SRSDA : SRSI<0, "srsda\tsp, $mode"> {
let Inst{24-23} = 0;
}
def SRSDA_UPD : SRSI<1, "srsda\tsp!, $mode"> {
let Inst{24-23} = 0;
}
def SRSDB : SRSI<0, "srsdb\tsp, $mode"> {
let Inst{24-23} = 0b10;
}
def SRSDB_UPD : SRSI<1, "srsdb\tsp!, $mode"> {
let Inst{24-23} = 0b10;
}
def SRSIA : SRSI<0, "srsia\tsp, $mode"> {
let Inst{24-23} = 0b01;
}
def SRSIA_UPD : SRSI<1, "srsia\tsp!, $mode"> {
let Inst{24-23} = 0b01;
}
def SRSIB : SRSI<0, "srsib\tsp, $mode"> {
let Inst{24-23} = 0b11;
}
def SRSIB_UPD : SRSI<1, "srsib\tsp!, $mode"> {
let Inst{24-23} = 0b11;
}
def : ARMInstAlias<"srsda $mode", (SRSDA imm0_31:$mode)>;
def : ARMInstAlias<"srsda $mode!", (SRSDA_UPD imm0_31:$mode)>;
def : ARMInstAlias<"srsdb $mode", (SRSDB imm0_31:$mode)>;
def : ARMInstAlias<"srsdb $mode!", (SRSDB_UPD imm0_31:$mode)>;
def : ARMInstAlias<"srsia $mode", (SRSIA imm0_31:$mode)>;
def : ARMInstAlias<"srsia $mode!", (SRSIA_UPD imm0_31:$mode)>;
def : ARMInstAlias<"srsib $mode", (SRSIB imm0_31:$mode)>;
def : ARMInstAlias<"srsib $mode!", (SRSIB_UPD imm0_31:$mode)>;
// Return From Exception
class RFEI<bit wb, string asm>
: XI<(outs), (ins GPR:$Rn), AddrModeNone, 4, IndexModeNone, BrFrm,
NoItinerary, asm, "", []> {
bits<4> Rn;
let Inst{31-28} = 0b1111;
let Inst{27-25} = 0b100;
let Inst{22} = 0;
let Inst{21} = wb;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-0} = 0xa00;
}
def RFEDA : RFEI<0, "rfeda\t$Rn"> {
let Inst{24-23} = 0;
}
def RFEDA_UPD : RFEI<1, "rfeda\t$Rn!"> {
let Inst{24-23} = 0;
}
def RFEDB : RFEI<0, "rfedb\t$Rn"> {
let Inst{24-23} = 0b10;
}
def RFEDB_UPD : RFEI<1, "rfedb\t$Rn!"> {
let Inst{24-23} = 0b10;
}
def RFEIA : RFEI<0, "rfeia\t$Rn"> {
let Inst{24-23} = 0b01;
}
def RFEIA_UPD : RFEI<1, "rfeia\t$Rn!"> {
let Inst{24-23} = 0b01;
}
def RFEIB : RFEI<0, "rfeib\t$Rn"> {
let Inst{24-23} = 0b11;
}
def RFEIB_UPD : RFEI<1, "rfeib\t$Rn!"> {
let Inst{24-23} = 0b11;
}
//===----------------------------------------------------------------------===//
// Load / Store Instructions.
//
// Load
defm LDR : AI_ldr1<0, "ldr", IIC_iLoad_r, IIC_iLoad_si,
UnOpFrag<(load node:$Src)>>;
defm LDRB : AI_ldr1nopc<1, "ldrb", IIC_iLoad_bh_r, IIC_iLoad_bh_si,
UnOpFrag<(zextloadi8 node:$Src)>>;
defm STR : AI_str1<0, "str", IIC_iStore_r, IIC_iStore_si,
BinOpFrag<(store node:$LHS, node:$RHS)>>;
defm STRB : AI_str1nopc<1, "strb", IIC_iStore_bh_r, IIC_iStore_bh_si,
BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
// Special LDR for loads from non-pc-relative constpools.
let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1,
isReMaterializable = 1, isCodeGenOnly = 1 in
def LDRcp : AI2ldst<0b010, 1, 0, (outs GPR:$Rt), (ins addrmode_imm12:$addr),
AddrMode_i12, LdFrm, IIC_iLoad_r, "ldr", "\t$Rt, $addr",
[]> {
bits<4> Rt;
bits<17> addr;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = 0b1111;
let Inst{15-12} = Rt;
let Inst{11-0} = addr{11-0}; // imm12
}
// Loads with zero extension
def LDRH : AI3ld<0b1011, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrh", "\t$Rt, $addr",
[(set GPR:$Rt, (zextloadi16 addrmode3:$addr))]>;
// Loads with sign extension
def LDRSH : AI3ld<0b1111, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrsh", "\t$Rt, $addr",
[(set GPR:$Rt, (sextloadi16 addrmode3:$addr))]>;
def LDRSB : AI3ld<0b1101, 1, (outs GPR:$Rt), (ins addrmode3:$addr), LdMiscFrm,
IIC_iLoad_bh_r, "ldrsb", "\t$Rt, $addr",
[(set GPR:$Rt, (sextloadi8 addrmode3:$addr))]>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rt, GPR:$Rt2), (ins addrmode3:$addr),
LdMiscFrm, IIC_iLoad_d_r, "ldrd", "\t$Rt, $Rt2, $addr", []>,
Requires<[IsARM, HasV5TE]>;
}
def LDA : AIldracq<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "lda", "\t$Rt, $addr", []>;
def LDAB : AIldracq<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldab", "\t$Rt, $addr", []>;
def LDAH : AIldracq<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldah", "\t$Rt, $addr", []>;
// Indexed loads
multiclass AI2_ldridx<bit isByte, string opc,
InstrItinClass iii, InstrItinClass iir> {
def _PRE_IMM : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addrmode_imm12_pre:$addr), IndexModePre, LdFrm, iii,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 0;
let Inst{23} = addr{12};
let Inst{19-16} = addr{16-13};
let Inst{11-0} = addr{11-0};
let DecoderMethod = "DecodeLDRPreImm";
}
def _PRE_REG : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins ldst_so_reg:$addr), IndexModePre, LdFrm, iir,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 1;
let Inst{23} = addr{12};
let Inst{19-16} = addr{16-13};
let Inst{11-0} = addr{11-0};
let Inst{4} = 0;
let DecoderMethod = "DecodeLDRPreReg";
}
def _POST_REG : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, LdFrm, iir,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let Inst{4} = 0;
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def _POST_IMM : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, LdFrm, iii,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
}
let mayLoad = 1, neverHasSideEffects = 1 in {
// FIXME: for LDR_PRE_REG etc. the itineray should be either IIC_iLoad_ru or
// IIC_iLoad_siu depending on whether it the offset register is shifted.
defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_iu, IIC_iLoad_ru>;
defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_iu, IIC_iLoad_bh_ru>;
}
multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> {
def _PRE : AI3ldstidx<op, 1, 1, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addrmode3_pre:$addr), IndexModePre,
LdMiscFrm, itin,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def _POST : AI3ldstidx<op, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am3offset:$offset),
IndexModePost, LdMiscFrm, itin,
opc, "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb",
[]> {
bits<10> offset;
bits<4> addr;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
}
let mayLoad = 1, neverHasSideEffects = 1 in {
defm LDRH : AI3_ldridx<0b1011, "ldrh", IIC_iLoad_bh_ru>;
defm LDRSH : AI3_ldridx<0b1111, "ldrsh", IIC_iLoad_bh_ru>;
defm LDRSB : AI3_ldridx<0b1101, "ldrsb", IIC_iLoad_bh_ru>;
let hasExtraDefRegAllocReq = 1 in {
def LDRD_PRE : AI3ldstidx<0b1101, 0, 1, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
(ins addrmode3_pre:$addr), IndexModePre,
LdMiscFrm, IIC_iLoad_d_ru,
"ldrd", "\t$Rt, $Rt2, $addr!",
"$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def LDRD_POST: AI3ldstidx<0b1101, 0, 0, (outs GPR:$Rt, GPR:$Rt2, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am3offset:$offset),
IndexModePost, LdMiscFrm, IIC_iLoad_d_ru,
"ldrd", "\t$Rt, $Rt2, $addr, $offset",
"$addr.base = $Rn_wb", []> {
bits<10> offset;
bits<4> addr;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
} // hasExtraDefRegAllocReq = 1
} // mayLoad = 1, neverHasSideEffects = 1
// LDRT, LDRBT, LDRSBT, LDRHT, LDRSHT.
let mayLoad = 1, neverHasSideEffects = 1 in {
def LDRT_POST_REG : AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, LdFrm, IIC_iLoad_ru,
"ldrt", "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-5} = offset{11-5};
let Inst{4} = 0;
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def LDRT_POST_IMM
: AI2ldstidx<1, 0, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, LdFrm, IIC_iLoad_ru,
"ldrt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def LDRBT_POST_REG : AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, LdFrm, IIC_iLoad_bh_ru,
"ldrbt", "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-5} = offset{11-5};
let Inst{4} = 0;
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def LDRBT_POST_IMM
: AI2ldstidx<1, 1, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, LdFrm, IIC_iLoad_bh_ru,
"ldrbt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
multiclass AI3ldrT<bits<4> op, string opc> {
def i : AI3ldstidxT<op, 1, (outs GPR:$Rt, GPR:$base_wb),
(ins addr_offset_none:$addr, postidx_imm8:$offset),
IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru, opc,
"\t$Rt, $addr, $offset", "$addr.base = $base_wb", []> {
bits<9> offset;
let Inst{23} = offset{8};
let Inst{22} = 1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
}
def r : AI3ldstidxT<op, 1, (outs GPRnopc:$Rt, GPRnopc:$base_wb),
(ins addr_offset_none:$addr, postidx_reg:$Rm),
IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru, opc,
"\t$Rt, $addr, $Rm", "$addr.base = $base_wb", []> {
bits<5> Rm;
let Inst{23} = Rm{4};
let Inst{22} = 0;
let Inst{11-8} = 0;
let Unpredictable{11-8} = 0b1111;
let Inst{3-0} = Rm{3-0};
let DecoderMethod = "DecodeLDR";
}
}
defm LDRSBT : AI3ldrT<0b1101, "ldrsbt">;
defm LDRHT : AI3ldrT<0b1011, "ldrht">;
defm LDRSHT : AI3ldrT<0b1111, "ldrsht">;
}
def LDRT_POST
: ARMAsmPseudo<"ldrt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q),
(outs GPR:$Rt)>;
def LDRBT_POST
: ARMAsmPseudo<"ldrbt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q),
(outs GPR:$Rt)>;
// Store
// Stores with truncate
def STRH : AI3str<0b1011, (outs), (ins GPR:$Rt, addrmode3:$addr), StMiscFrm,
IIC_iStore_bh_r, "strh", "\t$Rt, $addr",
[(truncstorei16 GPR:$Rt, addrmode3:$addr)]>;
// Store doubleword
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$Rt2, addrmode3:$addr),
StMiscFrm, IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", []>,
Requires<[IsARM, HasV5TE]> {
let Inst{21} = 0;
}
}
// Indexed stores
multiclass AI2_stridx<bit isByte, string opc,
InstrItinClass iii, InstrItinClass iir> {
def _PRE_IMM : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode_imm12_pre:$addr), IndexModePre,
StFrm, iii,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 0;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{11-0} = addr{11-0}; // imm12
let DecoderMethod = "DecodeSTRPreImm";
}
def _PRE_REG : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, ldst_so_reg:$addr),
IndexModePre, StFrm, iir,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 1;
let Inst{23} = addr{12}; // U (add = ('U' == 1))
let Inst{19-16} = addr{16-13}; // Rn
let Inst{11-0} = addr{11-0};
let Inst{4} = 0; // Inst{4} = 0
let DecoderMethod = "DecodeSTRPreReg";
}
def _POST_REG : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, StFrm, iir,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let Inst{4} = 0;
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def _POST_IMM : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, StFrm, iii,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
}
let mayStore = 1, neverHasSideEffects = 1 in {
// FIXME: for STR_PRE_REG etc. the itineray should be either IIC_iStore_ru or
// IIC_iStore_siu depending on whether it the offset register is shifted.
defm STR : AI2_stridx<0, "str", IIC_iStore_iu, IIC_iStore_ru>;
defm STRB : AI2_stridx<1, "strb", IIC_iStore_bh_iu, IIC_iStore_bh_ru>;
}
def : ARMPat<(post_store GPR:$Rt, addr_offset_none:$addr,
am2offset_reg:$offset),
(STR_POST_REG GPR:$Rt, addr_offset_none:$addr,
am2offset_reg:$offset)>;
def : ARMPat<(post_store GPR:$Rt, addr_offset_none:$addr,
am2offset_imm:$offset),
(STR_POST_IMM GPR:$Rt, addr_offset_none:$addr,
am2offset_imm:$offset)>;
def : ARMPat<(post_truncsti8 GPR:$Rt, addr_offset_none:$addr,
am2offset_reg:$offset),
(STRB_POST_REG GPR:$Rt, addr_offset_none:$addr,
am2offset_reg:$offset)>;
def : ARMPat<(post_truncsti8 GPR:$Rt, addr_offset_none:$addr,
am2offset_imm:$offset),
(STRB_POST_IMM GPR:$Rt, addr_offset_none:$addr,
am2offset_imm:$offset)>;
// Pseudo-instructions for pattern matching the pre-indexed stores. We can't
// put the patterns on the instruction definitions directly as ISel wants
// the address base and offset to be separate operands, not a single
// complex operand like we represent the instructions themselves. The
// pseudos map between the two.
let usesCustomInserter = 1,
Constraints = "$Rn = $Rn_wb,@earlyclobber $Rn_wb" in {
def STRi_preidx: ARMPseudoInst<(outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset, pred:$p),
4, IIC_iStore_ru,
[(set GPR:$Rn_wb,
(pre_store GPR:$Rt, GPR:$Rn, am2offset_imm:$offset))]>;
def STRr_preidx: ARMPseudoInst<(outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset, pred:$p),
4, IIC_iStore_ru,
[(set GPR:$Rn_wb,
(pre_store GPR:$Rt, GPR:$Rn, am2offset_reg:$offset))]>;
def STRBi_preidx: ARMPseudoInst<(outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_imm:$offset, pred:$p),
4, IIC_iStore_ru,
[(set GPR:$Rn_wb,
(pre_truncsti8 GPR:$Rt, GPR:$Rn, am2offset_imm:$offset))]>;
def STRBr_preidx: ARMPseudoInst<(outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am2offset_reg:$offset, pred:$p),
4, IIC_iStore_ru,
[(set GPR:$Rn_wb,
(pre_truncsti8 GPR:$Rt, GPR:$Rn, am2offset_reg:$offset))]>;
def STRH_preidx: ARMPseudoInst<(outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rn, am3offset:$offset, pred:$p),
4, IIC_iStore_ru,
[(set GPR:$Rn_wb,
(pre_truncsti16 GPR:$Rt, GPR:$Rn, am3offset:$offset))]>;
}
def STRH_PRE : AI3ldstidx<0b1011, 0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode3_pre:$addr), IndexModePre,
StMiscFrm, IIC_iStore_bh_ru,
"strh", "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def STRH_POST : AI3ldstidx<0b1011, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am3offset:$offset),
IndexModePost, StMiscFrm, IIC_iStore_bh_ru,
"strh", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb",
[(set GPR:$Rn_wb, (post_truncsti16 GPR:$Rt,
addr_offset_none:$addr,
am3offset:$offset))]> {
bits<10> offset;
bits<4> addr;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
def STRD_PRE : AI3ldstidx<0b1111, 0, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rt2, addrmode3_pre:$addr),
IndexModePre, StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$Rt, $Rt2, $addr!",
"$addr.base = $Rn_wb", []> {
bits<14> addr;
let Inst{23} = addr{8}; // U bit
let Inst{22} = addr{13}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr{12-9}; // Rn
let Inst{11-8} = addr{7-4}; // imm7_4/zero
let Inst{3-0} = addr{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
def STRD_POST: AI3ldstidx<0b1111, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, GPR:$Rt2, addr_offset_none:$addr,
am3offset:$offset),
IndexModePost, StMiscFrm, IIC_iStore_d_ru,
"strd", "\t$Rt, $Rt2, $addr, $offset",
"$addr.base = $Rn_wb", []> {
bits<10> offset;
bits<4> addr;
let Inst{23} = offset{8}; // U bit
let Inst{22} = offset{9}; // 1 == imm8, 0 == Rm
let Inst{19-16} = addr;
let Inst{11-8} = offset{7-4}; // imm7_4/zero
let Inst{3-0} = offset{3-0}; // imm3_0/Rm
let DecoderMethod = "DecodeAddrMode3Instruction";
}
} // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
// STRT, STRBT, and STRHT
def STRBT_POST_REG : AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strbt", "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-5} = offset{11-5};
let Inst{4} = 0;
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def STRBT_POST_IMM
: AI2ldstidx<0, 1, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, StFrm, IIC_iStore_bh_ru,
"strbt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def STRBT_POST
: ARMAsmPseudo<"strbt${q} $Rt, $addr",
(ins GPR:$Rt, addr_offset_none:$addr, pred:$q)>;
let mayStore = 1, neverHasSideEffects = 1 in {
def STRT_POST_REG : AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
IndexModePost, StFrm, IIC_iStore_ru,
"strt", "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 1;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-5} = offset{11-5};
let Inst{4} = 0;
let Inst{3-0} = offset{3-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
def STRT_POST_IMM
: AI2ldstidx<0, 0, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
IndexModePost, StFrm, IIC_iStore_ru,
"strt", "\t$Rt, $addr, $offset", "$addr.base = $Rn_wb", []> {
// {12} isAdd
// {11-0} imm12/Rm
bits<14> offset;
bits<4> addr;
let Inst{25} = 0;
let Inst{23} = offset{12};
let Inst{21} = 1; // overwrite
let Inst{19-16} = addr;
let Inst{11-0} = offset{11-0};
let DecoderMethod = "DecodeAddrMode2IdxInstruction";
}
}
def STRT_POST
: ARMAsmPseudo<"strt${q} $Rt, $addr",
(ins GPR:$Rt, addr_offset_none:$addr, pred:$q)>;
multiclass AI3strT<bits<4> op, string opc> {
def i : AI3ldstidxT<op, 0, (outs GPR:$base_wb),
(ins GPR:$Rt, addr_offset_none:$addr, postidx_imm8:$offset),
IndexModePost, StMiscFrm, IIC_iStore_bh_ru, opc,
"\t$Rt, $addr, $offset", "$addr.base = $base_wb", []> {
bits<9> offset;
let Inst{23} = offset{8};
let Inst{22} = 1;
let Inst{11-8} = offset{7-4};
let Inst{3-0} = offset{3-0};
}
def r : AI3ldstidxT<op, 0, (outs GPR:$base_wb),
(ins GPR:$Rt, addr_offset_none:$addr, postidx_reg:$Rm),
IndexModePost, StMiscFrm, IIC_iStore_bh_ru, opc,
"\t$Rt, $addr, $Rm", "$addr.base = $base_wb", []> {
bits<5> Rm;
let Inst{23} = Rm{4};
let Inst{22} = 0;
let Inst{11-8} = 0;
let Inst{3-0} = Rm{3-0};
}
}
defm STRHT : AI3strT<0b1011, "strht">;
def STL : AIstrrel<0b00, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stl", "\t$Rt, $addr", []>;
def STLB : AIstrrel<0b10, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlb", "\t$Rt, $addr", []>;
def STLH : AIstrrel<0b11, (outs), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlh", "\t$Rt, $addr", []>;
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
multiclass arm_ldst_mult<string asm, string sfx, bit L_bit, bit P_bit, Format f,
InstrItinClass itin, InstrItinClass itin_upd> {
// IA is the default, so no need for an explicit suffix on the
// mnemonic here. Without it is the canonical spelling.
def IA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b01; // Increment After
let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b01; // Increment After
let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
let DecoderMethod = "DecodeMemMultipleWritebackInstruction";
}
def DA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "da${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b00; // Decrement After
let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "da${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b00; // Decrement After
let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
let DecoderMethod = "DecodeMemMultipleWritebackInstruction";
}
def DB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "db${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b10; // Decrement Before
let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "db${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b10; // Decrement Before
let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
let DecoderMethod = "DecodeMemMultipleWritebackInstruction";
}
def IB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
!strconcat(asm, "ib${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b11; // Increment Before
let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
!strconcat(asm, "ib${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b11; // Increment Before
let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
let DecoderMethod = "DecodeMemMultipleWritebackInstruction";
}
}
let neverHasSideEffects = 1 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm LDM : arm_ldst_mult<"ldm", "", 1, 0, LdStMulFrm, IIC_iLoad_m,
IIC_iLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm STM : arm_ldst_mult<"stm", "", 0, 0, LdStMulFrm, IIC_iStore_m,
IIC_iStore_mu>;
} // neverHasSideEffects
// FIXME: remove when we have a way to marking a MI with these properties.
// FIXME: Should pc be an implicit operand like PICADD, etc?
let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
def LDMIA_RET : ARMPseudoExpand<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
reglist:$regs, variable_ops),
4, IIC_iLoad_mBr, [],
(LDMIA_UPD GPR:$wb, GPR:$Rn, pred:$p, reglist:$regs)>,
RegConstraint<"$Rn = $wb">;
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
defm sysLDM : arm_ldst_mult<"ldm", " ^", 1, 1, LdStMulFrm, IIC_iLoad_m,
IIC_iLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
defm sysSTM : arm_ldst_mult<"stm", " ^", 0, 1, LdStMulFrm, IIC_iStore_m,
IIC_iStore_mu>;
//===----------------------------------------------------------------------===//
// Move Instructions.
//
let neverHasSideEffects = 1 in
def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
"mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
let Inst{19-16} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
}
// A version for the smaller set of tail call registers.
let neverHasSideEffects = 1 in
def MOVr_TC : AsI1<0b1101, (outs tcGPR:$Rd), (ins tcGPR:$Rm), DPFrm,
IIC_iMOVr, "mov", "\t$Rd, $Rm", []>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
let Inst{11-4} = 0b00000000;
let Inst{25} = 0;
let Inst{3-0} = Rm;
let Inst{15-12} = Rd;
}
def MOVsr : AsI1<0b1101, (outs GPRnopc:$Rd), (ins shift_so_reg_reg:$src),
DPSoRegRegFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src",
[(set GPRnopc:$Rd, shift_so_reg_reg:$src)]>, UnaryDP,
Sched<[WriteALU]> {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-8} = src{11-8};
let Inst{7} = 0;
let Inst{6-5} = src{6-5};
let Inst{4} = 1;
let Inst{3-0} = src{3-0};
let Inst{25} = 0;
}
def MOVsi : AsI1<0b1101, (outs GPR:$Rd), (ins shift_so_reg_imm:$src),
DPSoRegImmFrm, IIC_iMOVsr,
"mov", "\t$Rd, $src", [(set GPR:$Rd, shift_so_reg_imm:$src)]>,
UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<12> src;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-5} = src{11-5};
let Inst{4} = 0;
let Inst{3-0} = src{3-0};
let Inst{25} = 0;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MOVi : AsI1<0b1101, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm, IIC_iMOVi,
"mov", "\t$Rd, $imm", [(set GPR:$Rd, so_imm:$imm)]>, UnaryDP,
Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
let Inst{15-12} = Rd;
let Inst{19-16} = 0b0000;
let Inst{11-0} = imm;
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MOVi16 : AI1<0b1000, (outs GPR:$Rd), (ins imm0_65535_expr:$imm),
DPFrm, IIC_iMOVi,
"movw", "\t$Rd, $imm",
[(set GPR:$Rd, imm0_65535:$imm)]>,
Requires<[IsARM, HasV6T2]>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
let Inst{11-0} = imm{11-0};
let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
let DecoderMethod = "DecodeArmMOVTWInstruction";
}
def : InstAlias<"mov${p} $Rd, $imm",
(MOVi16 GPR:$Rd, imm0_65535_expr:$imm, pred:$p)>,
Requires<[IsARM]>;
def MOVi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
(ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
Sched<[WriteALU]>;
let Constraints = "$src = $Rd" in {
def MOVTi16 : AI1<0b1010, (outs GPRnopc:$Rd),
(ins GPR:$src, imm0_65535_expr:$imm),
DPFrm, IIC_iMOVi,
"movt", "\t$Rd, $imm",
[(set GPRnopc:$Rd,
(or (and GPR:$src, 0xffff),
lo16AllZero:$imm))]>, UnaryDP,
Requires<[IsARM, HasV6T2]>, Sched<[WriteALU]> {
bits<4> Rd;
bits<16> imm;
let Inst{15-12} = Rd;
let Inst{11-0} = imm{11-0};
let Inst{19-16} = imm{15-12};
let Inst{20} = 0;
let Inst{25} = 1;
let DecoderMethod = "DecodeArmMOVTWInstruction";
}
def MOVTi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
(ins GPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
Sched<[WriteALU]>;
} // Constraints
def : ARMPat<(or GPR:$src, 0xffff0000), (MOVTi16 GPR:$src, 0xffff)>,
Requires<[IsARM, HasV6T2]>;
let Uses = [CPSR] in
def RRX: PseudoInst<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVsi,
[(set GPR:$Rd, (ARMrrx GPR:$Rm))]>, UnaryDP,
Requires<[IsARM]>, Sched<[WriteALU]>;
// These aren't really mov instructions, but we have to define them this way
// due to flag operands.
let Defs = [CPSR] in {
def MOVsrl_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP,
Sched<[WriteALU]>, Requires<[IsARM]>;
def MOVsra_flag : PseudoInst<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
[(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP,
Sched<[WriteALU]>, Requires<[IsARM]>;
}
//===----------------------------------------------------------------------===//
// Extend Instructions.
//
// Sign extenders
def SXTB : AI_ext_rrot<0b01101010,
"sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>;
def SXTH : AI_ext_rrot<0b01101011,
"sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>;
def SXTAB : AI_exta_rrot<0b01101010,
"sxtab", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
def SXTAH : AI_exta_rrot<0b01101011,
"sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
def SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
// Zero extenders
let AddedComplexity = 16 in {
def UXTB : AI_ext_rrot<0b01101110,
"uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>;
def UXTH : AI_ext_rrot<0b01101111,
"uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
def UXTB16 : AI_ext_rrot<0b01101100,
"uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
// instead so we can include a check for masking back in the upper
// eight bits of the source into the lower eight bits of the result.
//def : ARMV6Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
// (UXTB16r_rot GPR:$Src, 3)>;
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16 GPR:$Src, 1)>;
def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
def UXTAH : AI_exta_rrot<0b01101111, "uxtah",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
}
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
def SBFX : I<(outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, imm0_31:$lsb, imm1_32:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"sbfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<5> lsb;
bits<5> width;
let Inst{27-21} = 0b0111101;
let Inst{6-4} = 0b101;
let Inst{20-16} = width;
let Inst{15-12} = Rd;
let Inst{11-7} = lsb;
let Inst{3-0} = Rn;
}
def UBFX : I<(outs GPR:$Rd),
(ins GPR:$Rn, imm0_31:$lsb, imm1_32:$width),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"ubfx", "\t$Rd, $Rn, $lsb, $width", "", []>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<5> lsb;
bits<5> width;
let Inst{27-21} = 0b0111111;
let Inst{6-4} = 0b101;
let Inst{20-16} = width;
let Inst{15-12} = Rd;
let Inst{11-7} = lsb;
let Inst{3-0} = Rn;
}
//===----------------------------------------------------------------------===//
// Arithmetic Instructions.
//
defm ADD : AsI1_bin_irs<0b0100, "add",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
defm SUB : AsI1_bin_irs<0b0010, "sub",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(sub node:$LHS, node:$RHS)>>;
// ADD and SUB with 's' bit set.
//
// Currently, ADDS/SUBS are pseudo opcodes that exist only in the
// selection DAG. They are "lowered" to real ADD/SUB opcodes by
// AdjustInstrPostInstrSelection where we determine whether or not to
// set the "s" bit based on CPSR liveness.
//
// FIXME: Eliminate ADDS/SUBS pseudo opcodes after adding tablegen
// support for an optional CPSR definition that corresponds to the DAG
// node's second value. We can then eliminate the implicit def of CPSR.
defm ADDS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
defm SUBS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm ADC : AI1_adde_sube_irs<0b0101, "adc",
BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>, 1>;
defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
defm RSB : AsI1_rbin_irs<0b0011, "rsb",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(sub node:$LHS, node:$RHS)>>;
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
defm RSBS : AsI1_rbin_s_is<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm RSC : AI1_rsc_irs<0b0111, "rsc",
BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
// assume opposite meanings of the carry flag (i.e., carry == !borrow).
// See the definition of AddWithCarry() in the ARM ARM A2.2.1 for the gory
// details.
def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
(SUBri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(ARMaddc GPR:$src, so_imm_neg:$imm),
(SUBSri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(add GPR:$src, imm0_65535_neg:$imm),
(SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>,
Requires<[IsARM, HasV6T2]>;
def : ARMPat<(ARMaddc GPR:$src, imm0_65535_neg:$imm),
(SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>,
Requires<[IsARM, HasV6T2]>;
// The with-carry-in form matches bitwise not instead of the negation.
// Effectively, the inverse interpretation of the carry flag already accounts
// for part of the negation.
def : ARMPat<(ARMadde GPR:$src, so_imm_not:$imm, CPSR),
(SBCri GPR:$src, so_imm_not:$imm)>;
def : ARMPat<(ARMadde GPR:$src, imm0_65535_neg:$imm, CPSR),
(SBCrr GPR:$src, (MOVi16 (imm_not_XFORM imm:$imm)))>;
// Note: These are implemented in C++ code, because they have to generate
// ADD/SUBrs instructions, which use a complex pattern that a xform function
// cannot produce.
// (mul X, 2^n+1) -> (add (X << n), X)
// (mul X, 2^n-1) -> (rsb X, (X << n))
// ARM Arithmetic Instruction
// GPR:$dst = GPR:$a op GPR:$b
class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
list<dag> pattern = [],
dag iops = (ins GPRnopc:$Rn, GPRnopc:$Rm),
string asm = "\t$Rd, $Rn, $Rm">
: AI<(outs GPRnopc:$Rd), iops, DPFrm, IIC_iALUr, opc, asm, pattern>,
Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rn;
bits<4> Rd;
bits<4> Rm;
let Inst{27-20} = op27_20;
let Inst{11-4} = op11_4;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
let Unpredictable{11-8} = 0b1111;
}
// Saturating add/subtract
let DecoderMethod = "DecodeQADDInstruction" in
def QADD : AAI<0b00010000, 0b00000101, "qadd",
[(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))],
(ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
def QSUB : AAI<0b00010010, 0b00000101, "qsub",
[(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))],
(ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [],
(ins GPRnopc:$Rm, GPRnopc:$Rn),
"\t$Rd, $Rm, $Rn">;
def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [],
(ins GPRnopc:$Rm, GPRnopc:$Rn),
"\t$Rd, $Rm, $Rn">;
def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
def QASX : AAI<0b01100010, 0b11110011, "qasx">;
def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
// Signed/Unsigned add/subtract
def SASX : AAI<0b01100001, 0b11110011, "sasx">;
def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
def UASX : AAI<0b01100101, 0b11110011, "uasx">;
def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
def USAX : AAI<0b01100101, 0b11110101, "usax">;
def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
// Signed/Unsigned halving add/subtract
def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
// Unsigned Sum of Absolute Differences [and Accumulate].
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
"\t$Rd, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{27-20} = 0b01111000;
let Inst{15-12} = 0b1111;
let Inst{7-4} = 0b0001;
let Inst{19-16} = Rd;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
"\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]>{
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
bits<4> Ra;
let Inst{27-20} = 0b01111000;
let Inst{7-4} = 0b0001;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// Signed/Unsigned saturate
def SSAT : AI<(outs GPRnopc:$Rd),
(ins imm1_32:$sat_imm, GPRnopc:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh", []> {
bits<4> Rd;
bits<5> sat_imm;
bits<4> Rn;
bits<8> sh;
let Inst{27-21} = 0b0110101;
let Inst{5-4} = 0b01;
let Inst{20-16} = sat_imm;
let Inst{15-12} = Rd;
let Inst{11-7} = sh{4-0};
let Inst{6} = sh{5};
let Inst{3-0} = Rn;
}
def SSAT16 : AI<(outs GPRnopc:$Rd),
(ins imm1_16:$sat_imm, GPRnopc:$Rn), SatFrm,
NoItinerary, "ssat16", "\t$Rd, $sat_imm, $Rn", []> {
bits<4> Rd;
bits<4> sat_imm;
bits<4> Rn;
let Inst{27-20} = 0b01101010;
let Inst{11-4} = 0b11110011;
let Inst{15-12} = Rd;
let Inst{19-16} = sat_imm;
let Inst{3-0} = Rn;
}
def USAT : AI<(outs GPRnopc:$Rd),
(ins imm0_31:$sat_imm, GPRnopc:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "usat", "\t$Rd, $sat_imm, $Rn$sh", []> {
bits<4> Rd;
bits<5> sat_imm;
bits<4> Rn;
bits<8> sh;
let Inst{27-21} = 0b0110111;
let Inst{5-4} = 0b01;
let Inst{15-12} = Rd;
let Inst{11-7} = sh{4-0};
let Inst{6} = sh{5};
let Inst{20-16} = sat_imm;
let Inst{3-0} = Rn;
}
def USAT16 : AI<(outs GPRnopc:$Rd),
(ins imm0_15:$sat_imm, GPRnopc:$Rn), SatFrm,
NoItinerary, "usat16", "\t$Rd, $sat_imm, $Rn", []> {
bits<4> Rd;
bits<4> sat_imm;
bits<4> Rn;
let Inst{27-20} = 0b01101110;
let Inst{11-4} = 0b11110011;
let Inst{15-12} = Rd;
let Inst{19-16} = sat_imm;
let Inst{3-0} = Rn;
}
def : ARMV6Pat<(int_arm_ssat GPRnopc:$a, imm:$pos),
(SSAT imm:$pos, GPRnopc:$a, 0)>;
def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm:$pos),
(USAT imm:$pos, GPRnopc:$a, 0)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
//
defm AND : AsI1_bin_irs<0b0000, "and",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
defm ORR : AsI1_bin_irs<0b1100, "orr",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
defm EOR : AsI1_bin_irs<0b0001, "eor",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
defm BIC : AsI1_bin_irs<0b1110, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
// FIXME: bf_inv_mask_imm should be two operands, the lsb and the msb, just
// like in the actual instruction encoding. The complexity of mapping the mask
// to the lsb/msb pair should be handled by ISel, not encapsulated in the
// instruction description.
def BFC : I<(outs GPR:$Rd), (ins GPR:$src, bf_inv_mask_imm:$imm),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"bfc", "\t$Rd, $imm", "$src = $Rd",
[(set GPR:$Rd, (and GPR:$src, bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<10> imm;
let Inst{27-21} = 0b0111110;
let Inst{6-0} = 0b0011111;
let Inst{15-12} = Rd;
let Inst{11-7} = imm{4-0}; // lsb
let Inst{20-16} = imm{9-5}; // msb
}
// A8.6.18 BFI - Bitfield insert (Encoding A1)
def BFI:I<(outs GPRnopc:$Rd), (ins GPRnopc:$src, GPR:$Rn, bf_inv_mask_imm:$imm),
AddrMode1, 4, IndexModeNone, DPFrm, IIC_iUNAsi,
"bfi", "\t$Rd, $Rn, $imm", "$src = $Rd",
[(set GPRnopc:$Rd, (ARMbfi GPRnopc:$src, GPR:$Rn,
bf_inv_mask_imm:$imm))]>,
Requires<[IsARM, HasV6T2]> {
bits<4> Rd;
bits<4> Rn;
bits<10> imm;
let Inst{27-21} = 0b0111110;
let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
let Inst{15-12} = Rd;
let Inst{11-7} = imm{4-0}; // lsb
let Inst{20-16} = imm{9-5}; // width
let Inst{3-0} = Rn;
}
def MVNr : AsI1<0b1111, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMVNr,
"mvn", "\t$Rd, $Rm",
[(set GPR:$Rd, (not GPR:$Rm))]>, UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<4> Rm;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
}
def MVNsi : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_imm:$shift),
DPSoRegImmFrm, IIC_iMVNsr, "mvn", "\t$Rd, $shift",
[(set GPR:$Rd, (not so_reg_imm:$shift))]>, UnaryDP,
Sched<[WriteALU]> {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
def MVNsr : AsI1<0b1111, (outs GPR:$Rd), (ins so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iMVNsr, "mvn", "\t$Rd, $shift",
[(set GPR:$Rd, (not so_reg_reg:$shift))]>, UnaryDP,
Sched<[WriteALU]> {
bits<4> Rd;
bits<12> shift;
let Inst{25} = 0;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
}
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def MVNi : AsI1<0b1111, (outs GPR:$Rd), (ins so_imm:$imm), DPFrm,
IIC_iMVNi, "mvn", "\t$Rd, $imm",
[(set GPR:$Rd, so_imm_not:$imm)]>,UnaryDP, Sched<[WriteALU]> {
bits<4> Rd;
bits<12> imm;
let Inst{25} = 1;
let Inst{19-16} = 0b0000;
let Inst{15-12} = Rd;
let Inst{11-0} = imm;
}
def : ARMPat<(and GPR:$src, so_imm_not:$imm),
(BICri GPR:$src, so_imm_not:$imm)>;
//===----------------------------------------------------------------------===//
// Multiply Instructions.
//
class AsMul1I32<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = Rd;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
class AsMla1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AsMul1I<opcod, oops, iops, itin, opc, asm, pattern> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// FIXME: The v5 pseudos are only necessary for the additional Constraint
// property. Remove them when it's possible to add those properties
// on an individual MachineInstr, not just an instruction description.
let isCommutable = 1, TwoOperandAliasConstraint = "$Rn = $Rd" in {
def MUL : AsMul1I32<0b0000000, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
[(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b0000;
let Unpredictable{15-12} = 0b1111;
}
let Constraints = "@earlyclobber $Rd" in
def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm,
pred:$p, cc_out:$s),
4, IIC_iMUL32,
[(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))],
(MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6, UseMulOps]>;
}
def MLA : AsMul1I32<0b0000001, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra),
IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd, (add (mul GPRnopc:$Rn, GPRnopc:$Rm), GPRnopc:$Ra))]>,
Requires<[IsARM, HasV6, UseMulOps]> {
bits<4> Ra;
let Inst{15-12} = Ra;
}
let Constraints = "@earlyclobber $Rd" in
def MLAv5: ARMPseudoExpand<(outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra,
pred:$p, cc_out:$s), 4, IIC_iMAC32,
[(set GPRnopc:$Rd, (add (mul GPRnopc:$Rn, GPRnopc:$Rm), GPRnopc:$Ra))],
(MLA GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (sub GPR:$Ra, (mul GPR:$Rn, GPR:$Rm)))]>,
Requires<[IsARM, HasV6T2, UseMulOps]> {
bits<4> Rd;
bits<4> Rm;
bits<4> Rn;
bits<4> Ra;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
// Extra precision multiplies with low / high results
let neverHasSideEffects = 1 in {
let isCommutable = 1 in {
def SMULL : AsMul1I64<0b0000110, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
"smull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
def UMULL : AsMul1I64<0b0000100, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMUL64,
"umull", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]>;
let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in {
def SMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMUL64, [],
(SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
4, IIC_iMUL64, [],
(UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
}
}
// Multiply + accumulate
def SMLAL : AsMla1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64,
"smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>;
def UMLAL : AsMla1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64,
"umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>;
def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
"umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
bits<4> Rn;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
let Inst{11-8} = Rm;
let Inst{3-0} = Rn;
}
let Constraints =
"@earlyclobber $RdLo,@earlyclobber $RdHi,$RLo = $RdLo,$RHi = $RdHi" in {
def SMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
(SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi,
pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi),
(ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s),
4, IIC_iMAC64, [],
(UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi,
pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
}
} // neverHasSideEffects
// Most significant word multiply
def SMMUL : AMul2I <0b0111010, 0b0001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL32, "smmul", "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (mulhs GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b1111;
}
def SMMULR : AMul2I <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL32, "smmulr", "\t$Rd, $Rn, $Rm", []>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b1111;
}
def SMMLA : AMul2Ia <0b0111010, 0b0001, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra",
[(set GPR:$Rd, (add (mulhs GPR:$Rn, GPR:$Rm), GPR:$Ra))]>,
Requires<[IsARM, HasV6, UseMulOps]>;
def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmlar", "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]>;
def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6, UseMulOps]>;
def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
(ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]>;
multiclass AI_smul<string opc, PatFrag opnode> {
def BB : AMulxyI<0b0001011, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AMulxyI<0b0001011, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AMulxyI<0b0001011, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AMulxyI<0b0001011, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def WB : AMulxyI<0b0001001, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (sra (opnode GPR:$Rn,
(sext_inreg GPR:$Rm, i16)), (i32 16)))]>,
Requires<[IsARM, HasV5TE]>;
def WT : AMulxyI<0b0001001, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (sra (opnode GPR:$Rn,
(sra GPR:$Rm, (i32 16))), (i32 16)))]>,
Requires<[IsARM, HasV5TE]>;
}
multiclass AI_smla<string opc, PatFrag opnode> {
let DecoderMethod = "DecodeSMLAInstruction" in {
def BB : AMulxyIa<0b0001000, 0b00, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd, (add GPR:$Ra,
(opnode (sext_inreg GPRnopc:$Rn, i16),
(sext_inreg GPRnopc:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
def BT : AMulxyIa<0b0001000, 0b10, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sext_inreg GPRnopc:$Rn, i16),
(sra GPRnopc:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
def TB : AMulxyIa<0b0001000, 0b01, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
(sext_inreg GPRnopc:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
def TT : AMulxyIa<0b0001000, 0b11, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
(sra GPRnopc:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
def WB : AMulxyIa<0b0001001, 0b00, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (sra (opnode GPRnopc:$Rn,
(sext_inreg GPRnopc:$Rm, i16)), (i32 16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
def WT : AMulxyIa<0b0001001, 0b10, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (sra (opnode GPRnopc:$Rn,
(sra GPRnopc:$Rm, (i32 16))), (i32 16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
}
}
defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
defm SMLA : AI_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
// Halfword multiply accumulate long: SMLAL<x><y>.
def SMLALBB : AMulxyI64<0b0001010, 0b00, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMAC64, "smlalbb", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV5TE]>;
def SMLALBT : AMulxyI64<0b0001010, 0b10, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMAC64, "smlalbt", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV5TE]>;
def SMLALTB : AMulxyI64<0b0001010, 0b01, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMAC64, "smlaltb", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV5TE]>;
def SMLALTT : AMulxyI64<0b0001010, 0b11, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMAC64, "smlaltt", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
Requires<[IsARM, HasV5TE]>;
// Helper class for AI_smld.
class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<4> Rm;
let Inst{27-23} = 0b01110;
let Inst{22} = long;
let Inst{21-20} = 0b00;
let Inst{11-8} = Rm;
let Inst{7} = 0;
let Inst{6} = sub;
let Inst{5} = swap;
let Inst{4} = 1;
let Inst{3-0} = Rn;
}
class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> Rd;
let Inst{15-12} = 0b1111;
let Inst{19-16} = Rd;
}
class AMulDualIa<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> Ra;
bits<4> Rd;
let Inst{19-16} = Rd;
let Inst{15-12} = Ra;
}
class AMulDualI64<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
: AMulDualIbase<long, sub, swap, oops, iops, itin, opc, asm> {
bits<4> RdLo;
bits<4> RdHi;
let Inst{19-16} = RdHi;
let Inst{15-12} = RdLo;
}
multiclass AI_smld<bit sub, string opc> {
def D : AMulDualIa<0, sub, 0, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm, $Ra">;
def DX: AMulDualIa<0, sub, 1, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm, $Ra">;
def LD: AMulDualI64<1, sub, 0, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
!strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">;
def LDX : AMulDualI64<1, sub, 1, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
(ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
!strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">;
}
defm SMLA : AI_smld<0, "smla">;
defm SMLS : AI_smld<1, "smls">;
multiclass AI_sdml<bit sub, string opc> {
def D:AMulDualI<0, sub, 0, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
NoItinerary, !strconcat(opc, "d"), "\t$Rd, $Rn, $Rm">;
def DX:AMulDualI<0, sub, 1, (outs GPRnopc:$Rd),(ins GPRnopc:$Rn, GPRnopc:$Rm),
NoItinerary, !strconcat(opc, "dx"), "\t$Rd, $Rn, $Rm">;
}
defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
//===----------------------------------------------------------------------===//
// Division Instructions (ARMv7-A with virtualization extension)
//
def SDIV : ADivA1I<0b001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV,
"sdiv", "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (sdiv GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasDivideInARM]>;
def UDIV : ADivA1I<0b011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV,
"udiv", "\t$Rd, $Rn, $Rm",
[(set GPR:$Rd, (udiv GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasDivideInARM]>;
//===----------------------------------------------------------------------===//
// Misc. Arithmetic Instructions.
//
def CLZ : AMiscA1I<0b000010110, 0b0001, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "clz", "\t$Rd, $Rm",
[(set GPR:$Rd, (ctlz GPR:$Rm))]>, Requires<[IsARM, HasV5T]>,
Sched<[WriteALU]>;
def RBIT : AMiscA1I<0b01101111, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rbit", "\t$Rd, $Rm",
[(set GPR:$Rd, (ARMrbit GPR:$Rm))]>,
Requires<[IsARM, HasV6T2]>,
Sched<[WriteALU]>;
def REV : AMiscA1I<0b01101011, 0b0011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rev", "\t$Rd, $Rm",
[(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>,
Sched<[WriteALU]>;
let AddedComplexity = 5 in
def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "rev16", "\t$Rd, $Rm",
[(set GPR:$Rd, (rotr (bswap GPR:$Rm), (i32 16)))]>,
Requires<[IsARM, HasV6]>,
Sched<[WriteALU]>;
def : ARMV6Pat<(srl (bswap (extloadi16 addrmode3:$addr)), (i32 16)),
(REV16 (LDRH addrmode3:$addr))>;
def : ARMV6Pat<(truncstorei16 (srl (bswap GPR:$Rn), (i32 16)), addrmode3:$addr),
(STRH (REV16 GPR:$Rn), addrmode3:$addr)>;
let AddedComplexity = 5 in
def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
IIC_iUNAr, "revsh", "\t$Rd, $Rm",
[(set GPR:$Rd, (sra (bswap GPR:$Rm), (i32 16)))]>,
Requires<[IsARM, HasV6]>,
Sched<[WriteALU]>;
def : ARMV6Pat<(or (sra (shl GPR:$Rm, (i32 24)), (i32 16)),
(and (srl GPR:$Rm, (i32 8)), 0xFF)),
(REVSH GPR:$Rm)>;
def PKHBT : APKHI<0b01101000, 0, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, pkh_lsl_amt:$sh),
IIC_iALUsi, "pkhbt", "\t$Rd, $Rn, $Rm$sh",
[(set GPRnopc:$Rd, (or (and GPRnopc:$Rn, 0xFFFF),
(and (shl GPRnopc:$Rm, pkh_lsl_amt:$sh),
0xFFFF0000)))]>,
Requires<[IsARM, HasV6]>,
Sched<[WriteALUsi, ReadALU]>;
// Alternate cases for PKHBT where identities eliminate some nodes.
def : ARMV6Pat<(or (and GPRnopc:$Rn, 0xFFFF), (and GPRnopc:$Rm, 0xFFFF0000)),
(PKHBT GPRnopc:$Rn, GPRnopc:$Rm, 0)>;
def : ARMV6Pat<(or (and GPRnopc:$Rn, 0xFFFF), (shl GPRnopc:$Rm, imm16_31:$sh)),
(PKHBT GPRnopc:$Rn, GPRnopc:$Rm, imm16_31:$sh)>;
// Note: Shifts of 1-15 bits will be transformed to srl instead of sra and
// will match the pattern below.
def PKHTB : APKHI<0b01101000, 1, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, pkh_asr_amt:$sh),
IIC_iBITsi, "pkhtb", "\t$Rd, $Rn, $Rm$sh",
[(set GPRnopc:$Rd, (or (and GPRnopc:$Rn, 0xFFFF0000),
(and (sra GPRnopc:$Rm, pkh_asr_amt:$sh),
0xFFFF)))]>,
Requires<[IsARM, HasV6]>,
Sched<[WriteALUsi, ReadALU]>;
// Alternate cases for PKHTB where identities eliminate some nodes. Note that
// a shift amount of 0 is *not legal* here, it is PKHBT instead.
// We also can not replace a srl (17..31) by an arithmetic shift we would use in
// pkhtb src1, src2, asr (17..31).
def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
(srl GPRnopc:$src2, imm16:$sh)),
(PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16:$sh)>;
def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
(sra GPRnopc:$src2, imm16_31:$sh)),
(PKHTB GPRnopc:$src1, GPRnopc:$src2, imm16_31:$sh)>;
def : ARMV6Pat<(or (and GPRnopc:$src1, 0xFFFF0000),
(and (srl GPRnopc:$src2, imm1_15:$sh), 0xFFFF)),
(PKHTB GPRnopc:$src1, GPRnopc:$src2, imm1_15:$sh)>;
//===----------------------------------------------------------------------===//
// CRC Instructions
//
// Polynomials:
// + CRC32{B,H,W} 0x04C11DB7
// + CRC32C{B,H,W} 0x1EDC6F41
//
class AI_crc32<bit C, bits<2> sz, string suffix, SDPatternOperator builtin>
: AInoP<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm), MiscFrm, NoItinerary,
!strconcat("crc32", suffix), "\t$Rd, $Rn, $Rm",
[(set GPRnopc:$Rd, (builtin GPRnopc:$Rn, GPRnopc:$Rm))]>,
Requires<[IsARM, HasV8, HasCRC]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
let Inst{31-28} = 0b1110;
let Inst{27-23} = 0b00010;
let Inst{22-21} = sz;
let Inst{20} = 0;
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{11-10} = 0b00;
let Inst{9} = C;
let Inst{8} = 0;
let Inst{7-4} = 0b0100;
let Inst{3-0} = Rm;
let Unpredictable{11-8} = 0b1101;
}
def CRC32B : AI_crc32<0, 0b00, "b", int_arm_crc32b>;
def CRC32CB : AI_crc32<1, 0b00, "cb", int_arm_crc32cb>;
def CRC32H : AI_crc32<0, 0b01, "h", int_arm_crc32h>;
def CRC32CH : AI_crc32<1, 0b01, "ch", int_arm_crc32ch>;
def CRC32W : AI_crc32<0, 0b10, "w", int_arm_crc32w>;
def CRC32CW : AI_crc32<1, 0b10, "cw", int_arm_crc32cw>;
//===----------------------------------------------------------------------===//
// Comparison Instructions...
//
defm CMP : AI1_cmp_irs<0b1010, "cmp",
IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
// ARMcmpZ can re-use the above instruction definitions.
def : ARMPat<(ARMcmpZ GPR:$src, so_imm:$imm),
(CMPri GPR:$src, so_imm:$imm)>;
def : ARMPat<(ARMcmpZ GPR:$src, GPR:$rhs),
(CMPrr GPR:$src, GPR:$rhs)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_imm:$rhs),
(CMPrsi GPR:$src, so_reg_imm:$rhs)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_reg_reg:$rhs),
(CMPrsr GPR:$src, so_reg_reg:$rhs)>;
// CMN register-integer
let isCompare = 1, Defs = [CPSR] in {
def CMNri : AI1<0b1011, (outs), (ins GPR:$Rn, so_imm:$imm), DPFrm, IIC_iCMPi,
"cmn", "\t$Rn, $imm",
[(ARMcmn GPR:$Rn, so_imm:$imm)]>,
Sched<[WriteCMP, ReadALU]> {
bits<4> Rn;
bits<12> imm;
let Inst{25} = 1;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-0} = imm;
let Unpredictable{15-12} = 0b1111;
}
// CMN register-register/shift
def CMNzrr : AI1<0b1011, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, IIC_iCMPr,
"cmn", "\t$Rn, $Rm",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
GPR:$Rn, GPR:$Rm)]>, Sched<[WriteCMP, ReadALU, ReadALU]> {
bits<4> Rn;
bits<4> Rm;
let isCommutable = 1;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
let Unpredictable{15-12} = 0b1111;
}
def CMNzrsi : AI1<0b1011, (outs),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, IIC_iCMPsr,
"cmn", "\t$Rn, $shift",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
GPR:$Rn, so_reg_imm:$shift)]>,
Sched<[WriteCMPsi, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
def CMNzrsr : AI1<0b1011, (outs),
(ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, IIC_iCMPsr,
"cmn", "\t$Rn, $shift",
[(BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>
GPRnopc:$Rn, so_reg_reg:$shift)]>,
Sched<[WriteCMPsr, ReadALU]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
let Inst{20} = 1;
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-8} = shift{11-8};
let Inst{7} = 0;
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
}
def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
(CMNri GPR:$src, so_imm_neg:$imm)>;
def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
(CMNri GPR:$src, so_imm_neg:$imm)>;
// Note that TST/TEQ don't set all the same flags that CMP does!
defm TST : AI1_cmp_irs<0b1000, "tst",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>, 1>;
defm TEQ : AI1_cmp_irs<0b1001, "teq",
IIC_iTSTi, IIC_iTSTr, IIC_iTSTsr,
BinOpFrag<(ARMcmpZ (xor_su node:$LHS, node:$RHS), 0)>, 1>;
// Pseudo i64 compares for some floating point compares.
let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
Defs = [CPSR] in {
def BCCi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, brtarget:$dst),
IIC_Br,
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>,
Sched<[WriteBr]>;
def BCCZi64 : PseudoInst<(outs),
(ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst), IIC_Br,
[(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>,
Sched<[WriteBr]>;
} // usesCustomInserter
// Conditional moves
let neverHasSideEffects = 1 in {
let isCommutable = 1, isSelect = 1 in
def MOVCCr : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, GPR:$Rm, cmovpred:$p),
4, IIC_iCMOVr,
[(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
def MOVCCsi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_reg_imm:$shift, cmovpred:$p),
4, IIC_iCMOVsr,
[(set GPR:$Rd,
(ARMcmov GPR:$false, so_reg_imm:$shift,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
def MOVCCsr : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_reg_reg:$shift, cmovpred:$p),
4, IIC_iCMOVsr,
[(set GPR:$Rd, (ARMcmov GPR:$false, so_reg_reg:$shift,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
let isMoveImm = 1 in
def MOVCCi16
: ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, imm0_65535_expr:$imm, cmovpred:$p),
4, IIC_iMOVi,
[(set GPR:$Rd, (ARMcmov GPR:$false, imm0_65535:$imm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>,
Sched<[WriteALU]>;
let isMoveImm = 1 in
def MOVCCi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
[(set GPR:$Rd, (ARMcmov GPR:$false, so_imm:$imm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
// Two instruction predicate mov immediate.
let isMoveImm = 1 in
def MOVCCi32imm
: ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, i32imm:$src, cmovpred:$p),
8, IIC_iCMOVix2,
[(set GPR:$Rd, (ARMcmov GPR:$false, imm:$src,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Requires<[IsARM, HasV6T2]>;
let isMoveImm = 1 in
def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_imm:$imm, cmovpred:$p),
4, IIC_iCMOVi,
[(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm,
cmovpred:$p))]>,
RegConstraint<"$false = $Rd">, Sched<[WriteALU]>;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
// Atomic operations intrinsics
//
def MemBarrierOptOperand : AsmOperandClass {
let Name = "MemBarrierOpt";
let ParserMethod = "parseMemBarrierOptOperand";
}
def memb_opt : Operand<i32> {
let PrintMethod = "printMemBOption";
let ParserMatchClass = MemBarrierOptOperand;
let DecoderMethod = "DecodeMemBarrierOption";
}
def InstSyncBarrierOptOperand : AsmOperandClass {
let Name = "InstSyncBarrierOpt";
let ParserMethod = "parseInstSyncBarrierOptOperand";
}
def instsyncb_opt : Operand<i32> {
let PrintMethod = "printInstSyncBOption";
let ParserMatchClass = InstSyncBarrierOptOperand;
let DecoderMethod = "DecodeInstSyncBarrierOption";
}
// memory barriers protect the atomic sequences
let hasSideEffects = 1 in {
def DMB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dmb", "\t$opt", [(int_arm_dmb (i32 imm0_15:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff05;
let Inst{3-0} = opt;
}
}
def DSB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
"dsb", "\t$opt", [(int_arm_dsb (i32 imm0_15:$opt))]>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff04;
let Inst{3-0} = opt;
}
// ISB has only full system option
def ISB : AInoP<(outs), (ins instsyncb_opt:$opt), MiscFrm, NoItinerary,
"isb", "\t$opt", []>,
Requires<[IsARM, HasDB]> {
bits<4> opt;
let Inst{31-4} = 0xf57ff06;
let Inst{3-0} = opt;
}
let usesCustomInserter = 1, Defs = [CPSR] in {
// Pseudo instruction that combines movs + predicated rsbmi
// to implement integer ABS
def ABS : ARMPseudoInst<(outs GPR:$dst), (ins GPR:$src), 8, NoItinerary, []>;
}
let usesCustomInserter = 1 in {
def COPY_STRUCT_BYVAL_I32 : PseudoInst<
(outs), (ins GPR:$dst, GPR:$src, i32imm:$size, i32imm:$alignment),
NoItinerary,
[(ARMcopystructbyval GPR:$dst, GPR:$src, imm:$size, imm:$alignment)]>;
}
def ldrex_1 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def ldrex_2 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def ldrex_4 : PatFrag<(ops node:$ptr), (int_arm_ldrex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def strex_1 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_strex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def strex_2 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_strex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def strex_4 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_strex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def ldaex_1 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def ldaex_2 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def ldaex_4 : PatFrag<(ops node:$ptr), (int_arm_ldaex node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def stlex_1 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_stlex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def stlex_2 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_stlex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def stlex_4 : PatFrag<(ops node:$val, node:$ptr),
(int_arm_stlex node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
let mayLoad = 1 in {
def LDREXB : AIldrex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldrexb", "\t$Rt, $addr",
[(set GPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>;
def LDREXH : AIldrex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldrexh", "\t$Rt, $addr",
[(set GPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>;
def LDREX : AIldrex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldrex", "\t$Rt, $addr",
[(set GPR:$Rt, (ldrex_4 addr_offset_none:$addr))]>;
let hasExtraDefRegAllocReq = 1 in
def LDREXD : AIldrex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
NoItinerary, "ldrexd", "\t$Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegLoad";
}
def LDAEXB : AIldaex<0b10, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldaexb", "\t$Rt, $addr",
[(set GPR:$Rt, (ldaex_1 addr_offset_none:$addr))]>;
def LDAEXH : AIldaex<0b11, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldaexh", "\t$Rt, $addr",
[(set GPR:$Rt, (ldaex_2 addr_offset_none:$addr))]>;
def LDAEX : AIldaex<0b00, (outs GPR:$Rt), (ins addr_offset_none:$addr),
NoItinerary, "ldaex", "\t$Rt, $addr",
[(set GPR:$Rt, (ldaex_4 addr_offset_none:$addr))]>;
let hasExtraDefRegAllocReq = 1 in
def LDAEXD : AIldaex<0b01, (outs GPRPairOp:$Rt),(ins addr_offset_none:$addr),
NoItinerary, "ldaexd", "\t$Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegLoad";
}
}
let mayStore = 1, Constraints = "@earlyclobber $Rd" in {
def STREXB: AIstrex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strexb", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd, (strex_1 GPR:$Rt,
addr_offset_none:$addr))]>;
def STREXH: AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strexh", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd, (strex_2 GPR:$Rt,
addr_offset_none:$addr))]>;
def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strex", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd, (strex_4 GPR:$Rt,
addr_offset_none:$addr))]>;
let hasExtraSrcRegAllocReq = 1 in
def STREXD : AIstrex<0b01, (outs GPR:$Rd),
(ins GPRPairOp:$Rt, addr_offset_none:$addr),
NoItinerary, "strexd", "\t$Rd, $Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegStore";
}
def STLEXB: AIstlex<0b10, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlexb", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd,
(stlex_1 GPR:$Rt, addr_offset_none:$addr))]>;
def STLEXH: AIstlex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlexh", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd,
(stlex_2 GPR:$Rt, addr_offset_none:$addr))]>;
def STLEX : AIstlex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "stlex", "\t$Rd, $Rt, $addr",
[(set GPR:$Rd,
(stlex_4 GPR:$Rt, addr_offset_none:$addr))]>;
let hasExtraSrcRegAllocReq = 1 in
def STLEXD : AIstlex<0b01, (outs GPR:$Rd),
(ins GPRPairOp:$Rt, addr_offset_none:$addr),
NoItinerary, "stlexd", "\t$Rd, $Rt, $addr", []> {
let DecoderMethod = "DecodeDoubleRegStore";
}
}
def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
[(int_arm_clrex)]>,
Requires<[IsARM, HasV7]> {
let Inst{31-0} = 0b11110101011111111111000000011111;
}
def : ARMPat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
(STREXB GPR:$Rt, addr_offset_none:$addr)>;
def : ARMPat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
(STREXH GPR:$Rt, addr_offset_none:$addr)>;
def : ARMPat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
(STLEXB GPR:$Rt, addr_offset_none:$addr)>;
def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
(STLEXH GPR:$Rt, addr_offset_none:$addr)>;
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
return Ordering == Acquire || Ordering == SequentiallyConsistent;
}]>;
def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
def atomic_load_acquire_16 : acquiring_load<atomic_load_16>;
def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
class releasing_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
return Ordering == Release || Ordering == SequentiallyConsistent;
}]>;
def atomic_store_release_8 : releasing_store<atomic_store_8>;
def atomic_store_release_16 : releasing_store<atomic_store_16>;
def atomic_store_release_32 : releasing_store<atomic_store_32>;
let AddedComplexity = 8 in {
def : ARMPat<(atomic_load_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>;
def : ARMPat<(atomic_load_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>;
def : ARMPat<(atomic_load_acquire_32 addr_offset_none:$addr), (LDA addr_offset_none:$addr)>;
def : ARMPat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (STLB GPR:$val, addr_offset_none:$addr)>;
def : ARMPat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (STLH GPR:$val, addr_offset_none:$addr)>;
def : ARMPat<(atomic_store_release_32 addr_offset_none:$addr, GPR:$val), (STL GPR:$val, addr_offset_none:$addr)>;
}
// SWP/SWPB are deprecated in V6/V7.
let mayLoad = 1, mayStore = 1 in {
def SWP : AIswp<0, (outs GPRnopc:$Rt),
(ins GPRnopc:$Rt2, addr_offset_none:$addr), "swp", []>,
Requires<[PreV8]>;
def SWPB: AIswp<1, (outs GPRnopc:$Rt),
(ins GPRnopc:$Rt2, addr_offset_none:$addr), "swpb", []>,
Requires<[PreV8]>;
}
//===----------------------------------------------------------------------===//
// Coprocessor Instructions.
//
def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp", "\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
Requires<[PreV8]> {
bits<4> opc1;
bits<4> CRn;
bits<4> CRd;
bits<4> cop;
bits<3> opc2;
bits<4> CRm;
let Inst{3-0} = CRm;
let Inst{4} = 0;
let Inst{7-5} = opc2;
let Inst{11-8} = cop;
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
}
def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
Requires<[PreV8]> {
let Inst{31-28} = 0b1111;
bits<4> opc1;
bits<4> CRn;
bits<4> CRd;
bits<4> cop;
bits<3> opc2;
bits<4> CRm;
let Inst{3-0} = CRm;
let Inst{4} = 0;
let Inst{7-5} = opc2;
let Inst{11-8} = cop;
let Inst{15-12} = CRd;
let Inst{19-16} = CRn;
let Inst{23-20} = opc1;
}
class ACI<dag oops, dag iops, string opc, string asm,
IndexMode im = IndexModeNone>
: I<oops, iops, AddrModeNone, 4, im, BrFrm, NoItinerary,
opc, asm, "", []> {
let Inst{27-25} = 0b110;
}
class ACInoP<dag oops, dag iops, string opc, string asm,
IndexMode im = IndexModeNone>
: InoP<oops, iops, AddrModeNone, 4, im, BrFrm, NoItinerary,
opc, asm, "", []> {
let Inst{31-28} = 0b1111;
let Inst{27-25} = 0b110;
}
multiclass LdStCop<bit load, bit Dbit, string asm> {
def _OFFSET : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
asm, "\t$cop, $CRd, $addr"> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 1; // P = 1
let Inst{23} = addr{8};
let Inst{22} = Dbit;
let Inst{21} = 0; // W = 0
let Inst{20} = load;
let Inst{19-16} = addr{12-9};
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = addr{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _PRE : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
asm, "\t$cop, $CRd, $addr!", IndexModePre> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 1; // P = 1
let Inst{23} = addr{8};
let Inst{22} = Dbit;
let Inst{21} = 1; // W = 1
let Inst{20} = load;
let Inst{19-16} = addr{12-9};
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = addr{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _POST: ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
postidx_imm8s4:$offset),
asm, "\t$cop, $CRd, $addr, $offset", IndexModePost> {
bits<9> offset;
bits<4> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 0; // P = 0
let Inst{23} = offset{8};
let Inst{22} = Dbit;
let Inst{21} = 1; // W = 1
let Inst{20} = load;
let Inst{19-16} = addr;
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = offset{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _OPTION : ACI<(outs),
(ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
coproc_option_imm:$option),
asm, "\t$cop, $CRd, $addr, $option"> {
bits<8> option;
bits<4> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 0; // P = 0
let Inst{23} = 1; // U = 1
let Inst{22} = Dbit;
let Inst{21} = 0; // W = 0
let Inst{20} = load;
let Inst{19-16} = addr;
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = option;
let DecoderMethod = "DecodeCopMemInstruction";
}
}
multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
def _OFFSET : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
asm, "\t$cop, $CRd, $addr"> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 1; // P = 1
let Inst{23} = addr{8};
let Inst{22} = Dbit;
let Inst{21} = 0; // W = 0
let Inst{20} = load;
let Inst{19-16} = addr{12-9};
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = addr{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _PRE : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
asm, "\t$cop, $CRd, $addr!", IndexModePre> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 1; // P = 1
let Inst{23} = addr{8};
let Inst{22} = Dbit;
let Inst{21} = 1; // W = 1
let Inst{20} = load;
let Inst{19-16} = addr{12-9};
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = addr{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _POST: ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
postidx_imm8s4:$offset),
asm, "\t$cop, $CRd, $addr, $offset", IndexModePost> {
bits<9> offset;
bits<4> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 0; // P = 0
let Inst{23} = offset{8};
let Inst{22} = Dbit;
let Inst{21} = 1; // W = 1
let Inst{20} = load;
let Inst{19-16} = addr;
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = offset{7-0};
let DecoderMethod = "DecodeCopMemInstruction";
}
def _OPTION : ACInoP<(outs),
(ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
coproc_option_imm:$option),
asm, "\t$cop, $CRd, $addr, $option"> {
bits<8> option;
bits<4> addr;
bits<4> cop;
bits<4> CRd;
let Inst{24} = 0; // P = 0
let Inst{23} = 1; // U = 1
let Inst{22} = Dbit;
let Inst{21} = 0; // W = 0
let Inst{20} = load;
let Inst{19-16} = addr;
let Inst{15-12} = CRd;
let Inst{11-8} = cop;
let Inst{7-0} = option;
let DecoderMethod = "DecodeCopMemInstruction";
}
}
defm LDC : LdStCop <1, 0, "ldc">;
defm LDCL : LdStCop <1, 1, "ldcl">;
defm STC : LdStCop <0, 0, "stc">;
defm STCL : LdStCop <0, 1, "stcl">;
defm LDC2 : LdSt2Cop<1, 0, "ldc2">, Requires<[PreV8]>;
defm LDC2L : LdSt2Cop<1, 1, "ldc2l">, Requires<[PreV8]>;
defm STC2 : LdSt2Cop<0, 0, "stc2">, Requires<[PreV8]>;
defm STC2L : LdSt2Cop<0, 1, "stc2l">, Requires<[PreV8]>;
//===----------------------------------------------------------------------===//
// Move between coprocessor and ARM core register.
//
class MovRCopro<string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
: ABI<0b1110, oops, iops, NoItinerary, opc,
"\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2", pattern> {
let Inst{20} = direction;
let Inst{4} = 1;
bits<4> Rt;
bits<4> cop;
bits<3> opc1;
bits<3> opc2;
bits<4> CRm;
bits<4> CRn;
let Inst{15-12} = Rt;
let Inst{11-8} = cop;
let Inst{23-21} = opc1;
let Inst{7-5} = opc2;
let Inst{3-0} = CRm;
let Inst{19-16} = CRn;
}
def MCR : MovRCopro<"mcr", 0 /* from ARM core register to coprocessor */,
(outs),
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
ComplexDeprecationPredicate<"MCR">;
def : ARMInstAlias<"mcr${p} $cop, $opc1, $Rt, $CRn, $CRm",
(MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0, pred:$p)>;
def MRC : MovRCopro<"mrc", 1 /* from coprocessor to ARM core register */,
(outs GPRwithAPSR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
def : ARMInstAlias<"mrc${p} $cop, $opc1, $Rt, $CRn, $CRm",
(MRC GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0, pred:$p)>;
def : ARMPat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
(MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
class MovRCopro2<string opc, bit direction, dag oops, dag iops,
list<dag> pattern>
: ABXI<0b1110, oops, iops, NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $CRn, $CRm, $opc2"), pattern> {
let Inst{31-24} = 0b11111110;
let Inst{20} = direction;
let Inst{4} = 1;
bits<4> Rt;
bits<4> cop;
bits<3> opc1;
bits<3> opc2;
bits<4> CRm;
bits<4> CRn;
let Inst{15-12} = Rt;
let Inst{11-8} = cop;
let Inst{23-21} = opc1;
let Inst{7-5} = opc2;
let Inst{3-0} = CRm;
let Inst{19-16} = CRn;
}
def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
(outs),
(ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>,
Requires<[PreV8]>;
def : ARMInstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm",
(MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, 0)>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
(outs GPRwithAPSR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>,
Requires<[PreV8]>;
def : ARMInstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm",
(MRC2 GPRwithAPSR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, 0)>;
def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn,
imm:$CRm, imm:$opc2),
(MRC2 imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
class MovRRCopro<string opc, bit direction, list<dag> pattern = []>
: ABI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm),
NoItinerary, opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> {
let Inst{23-21} = 0b010;
let Inst{20} = direction;
bits<4> Rt;
bits<4> Rt2;
bits<4> cop;
bits<4> opc1;
bits<4> CRm;
let Inst{15-12} = Rt;
let Inst{19-16} = Rt2;
let Inst{11-8} = cop;
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
}
def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr imm:$cop, imm:$opc1, GPRnopc:$Rt,
GPRnopc:$Rt2, imm:$CRm)]>;
def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
: ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern>,
Requires<[PreV8]> {
let Inst{31-28} = 0b1111;
let Inst{23-21} = 0b010;
let Inst{20} = direction;
bits<4> Rt;
bits<4> Rt2;
bits<4> cop;
bits<4> opc1;
bits<4> CRm;
let Inst{15-12} = Rt;
let Inst{19-16} = Rt2;
let Inst{11-8} = cop;
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
let DecoderMethod = "DecodeMRRC2";
}
def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt,
GPRnopc:$Rt2, imm:$CRm)]>;
def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
//===----------------------------------------------------------------------===//
// Move between special register and ARM core register
//
// Move to ARM core register from Special Register
def MRS : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, apsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b00001111;
let Unpredictable{19-17} = 0b111;
let Inst{15-12} = Rd;
let Inst{11-0} = 0b000000000000;
let Unpredictable{11-0} = 0b110100001111;
}
def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>,
Requires<[IsARM]>;
// The MRSsys instruction is the MRS instruction from the ARM ARM,
// section B9.3.9, with the R bit set to 1.
def MRSsys : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, spsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b01001111;
let Unpredictable{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{11-0} = 0b000000000000;
let Unpredictable{11-0} = 0b110100001111;
}
// Move from ARM core register to Special Register
//
// No need to have both system and application versions, the encodings are the
// same and the assembly parser has no way to distinguish between them. The mask
// operand contains the special register (R Bit) in bit 4 and bits 3-0 contains
// the mask with the fields to be accessed in the special register.
def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
"msr", "\t$mask, $Rn", []> {
bits<5> mask;
bits<4> Rn;
let Inst{23} = 0;
let Inst{22} = mask{4}; // R bit
let Inst{21-20} = 0b10;
let Inst{19-16} = mask{3-0};
let Inst{15-12} = 0b1111;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rn;
}
def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, so_imm:$a), NoItinerary,
"msr", "\t$mask, $a", []> {
bits<5> mask;
bits<12> a;
let Inst{23} = 0;
let Inst{22} = mask{4}; // R bit
let Inst{21-20} = 0b10;
let Inst{19-16} = mask{3-0};
let Inst{15-12} = 0b1111;
let Inst{11-0} = a;
}
// Dynamic stack allocation yields a _chkstk for Windows targets. These calls
// are needed to probe the stack when allocating more than
// 4k bytes in one go. Touching the stack at 4K increments is necessary to
// ensure that the guard pages used by the OS virtual memory manager are
// allocated in correct sequence.
// The main point of having separate instruction are extra unmodelled effects
// (compared to ordinary calls) like stack pointer change.
def win__chkstk : SDNode<"ARMISD::WIN__CHKSTK", SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
let usesCustomInserter = 1, Uses = [R4], Defs = [R4, SP] in
def WIN__CHKSTK : PseudoInst<(outs), (ins), NoItinerary, [(win__chkstk)]>;
//===----------------------------------------------------------------------===//
// TLS Instructions
//
// __aeabi_read_tp preserves the registers r1-r3.
// This is a pseudo inst so that we can get the encoding right,
// complete with fixup for the aeabi_read_tp function.
let isCall = 1,
Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
def TPsoft : PseudoInst<(outs), (ins), IIC_Br,
[(set R0, ARMthread_pointer)]>, Sched<[WriteBr]>;
}
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
// eh_sjlj_setjmp() is an instruction sequence to store the return
// address and save #0 in R0 for the non-longjmp case.
// Since by its nature we may be coming from some other function to get
// here, and we're using the stack frame for the containing function to
// save/restore registers, we can't keep anything live in regs across
// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
// when we get here from a longjmp(). We force everything out of registers
// except for our own input by listing the relevant registers in Defs. By
// doing so, we also cause the prologue/epilogue code to actively preserve
// all of the callee-saved resgisters, which is exactly what we want.
// A constant value is passed in $val, and we use the location as a scratch.
//
// These are pseudo-instructions and are lowered to individual MC-insts, so
// no encoding information is necessary.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15 ],
hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
def Int_eh_sjlj_setjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, HasVFP2]>;
}
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
def Int_eh_sjlj_setjmp_nofp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, NoVFP]>;
}
// FIXME: Non-IOS version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
Defs = [ R7, LR, SP ] in {
def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
NoItinerary,
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
Requires<[IsARM, IsIOS]>;
}
// eh.sjlj.dispatchsetup pseudo-instruction.
// This pseudo is used for both ARM and Thumb. Any differences are handled when
// the pseudo is expanded (which happens before any passes that need the
// instruction size).
let isBarrier = 1 in
def Int_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
// ARMv4 indirect branch using (MOVr PC, dst)
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in
def MOVPCRX : ARMPseudoExpand<(outs), (ins GPR:$dst),
4, IIC_Br, [(brind GPR:$dst)],
(MOVr PC, GPR:$dst, (ops 14, zero_reg), zero_reg)>,
Requires<[IsARM, NoV4T]>, Sched<[WriteBr]>;
// Large immediate handling.
// 32-bit immediate using two piece so_imms or movw + movt.
// This is a single pseudo instruction, the benefit is that it can be remat'd
// as a single unit instead of having to handle reg inputs.
// FIXME: Remove this when we can do generalized remat.
let isReMaterializable = 1, isMoveImm = 1 in
def MOVi32imm : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVix2,
[(set GPR:$dst, (arm_i32imm:$src))]>,
Requires<[IsARM]>;
def LDRLIT_ga_abs : PseudoInst<(outs GPR:$dst), (ins i32imm:$src), IIC_iLoad_i,
[(set GPR:$dst, (ARMWrapper tglobaladdr:$src))]>,
Requires<[IsARM, DontUseMovt]>;
// Pseudo instruction that combines movw + movt + add pc (if PIC).
// It also makes it possible to rematerialize the instructions.
// FIXME: Remove this when we can do generalized remat and when machine licm
// can properly the instructions.
let isReMaterializable = 1 in {
def MOV_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2addpc,
[(set GPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsARM, UseMovt]>;
def LDRLIT_ga_pcrel : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iLoadiALU,
[(set GPR:$dst,
(ARMWrapperPIC tglobaladdr:$addr))]>,
Requires<[IsARM, DontUseMovt]>;
def LDRLIT_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
NoItinerary,
[(set GPR:$dst,
(load (ARMWrapperPIC tglobaladdr:$addr)))]>,
Requires<[IsARM, DontUseMovt]>;
let AddedComplexity = 10 in
def MOV_ga_pcrel_ldr : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2ld,
[(set GPR:$dst, (load (ARMWrapperPIC tglobaladdr:$addr)))]>,
Requires<[IsARM, UseMovt]>;
} // isReMaterializable
// ConstantPool, GlobalAddress, and JumpTable
def : ARMPat<(ARMWrapper tconstpool :$dst), (LEApcrel tconstpool :$dst)>;
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (MOVi32imm tglobaladdr :$dst)>,
Requires<[IsARM, UseMovt]>;
def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
(LEApcrelJT tjumptable:$dst, imm:$id)>;
// TODO: add,sub,and, 3-instr forms?
// Tail calls. These patterns also apply to Thumb mode.
def : Pat<(ARMtcret tcGPR:$dst), (TCRETURNri tcGPR:$dst)>;
def : Pat<(ARMtcret (i32 tglobaladdr:$dst)), (TCRETURNdi texternalsym:$dst)>;
def : Pat<(ARMtcret (i32 texternalsym:$dst)), (TCRETURNdi texternalsym:$dst)>;
// Direct calls
def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>;
def : ARMPat<(ARMcall_nolink texternalsym:$func),
(BMOVPCB_CALL texternalsym:$func)>;
// zextload i1 -> zextload i8
def : ARMPat<(zextloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(zextloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
// extload -> zextload
def : ARMPat<(extloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(extloadi1 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
def : ARMPat<(extloadi8 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
def : ARMPat<(extloadi8 ldst_so_reg:$addr), (LDRBrs ldst_so_reg:$addr)>;
def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
def : ARMPat<(extloadi8 addrmodepc:$addr), (PICLDRB addrmodepc:$addr)>;
def : ARMPat<(extloadi16 addrmodepc:$addr), (PICLDRH addrmodepc:$addr)>;
// smul* and smla*
def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16))),
(SMULBB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul sext_16_node:$a, sext_16_node:$b),
(SMULBB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra GPR:$b, (i32 16))),
(SMULBT GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul sext_16_node:$a, (sra GPR:$b, (i32 16))),
(SMULBT GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16))),
(SMULTB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)), sext_16_node:$b),
(SMULTB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
(i32 16)),
(SMULWB GPR:$a, GPR:$b)>;
def : ARMV5TEPat<(sra (mul GPR:$a, sext_16_node:$b), (i32 16)),
(SMULWB GPR:$a, GPR:$b)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul sext_16_node:$a, sext_16_node:$b)),
(SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
(sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul sext_16_node:$a, (sra GPR:$b, (i32 16)))),
(SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)),
(sra (shl GPR:$b, (i32 16)), (i32 16)))),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(mul (sra GPR:$a, (i32 16)), sext_16_node:$b)),
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
(i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
def : ARMV5MOPat<(add GPR:$acc,
(sra (mul GPR:$a, sext_16_node:$b), (i32 16))),
(SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
// Pre-v7 uses MCR for synchronization barriers.
def : ARMPat<(ARMMemBarrierMCR GPR:$zero), (MCR 15, 0, GPR:$zero, 7, 10, 5)>,
Requires<[IsARM, HasV6]>;
// SXT/UXT with no rotate
let AddedComplexity = 16 in {
def : ARMV6Pat<(and GPR:$Src, 0x000000FF), (UXTB GPR:$Src, 0)>;
def : ARMV6Pat<(and GPR:$Src, 0x0000FFFF), (UXTH GPR:$Src, 0)>;
def : ARMV6Pat<(and GPR:$Src, 0x00FF00FF), (UXTB16 GPR:$Src, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (and GPR:$Rm, 0x00FF)),
(UXTAB GPR:$Rn, GPR:$Rm, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (and GPR:$Rm, 0xFFFF)),
(UXTAH GPR:$Rn, GPR:$Rm, 0)>;
}
def : ARMV6Pat<(sext_inreg GPR:$Src, i8), (SXTB GPR:$Src, 0)>;
def : ARMV6Pat<(sext_inreg GPR:$Src, i16), (SXTH GPR:$Src, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPRnopc:$Rm, i8)),
(SXTAB GPR:$Rn, GPRnopc:$Rm, 0)>;
def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPRnopc:$Rm, i16)),
(SXTAH GPR:$Rn, GPRnopc:$Rm, 0)>;
// Atomic load/store patterns
def : ARMPat<(atomic_load_8 ldst_so_reg:$src),
(LDRBrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_8 addrmode_imm12:$src),
(LDRBi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_load_16 addrmode3:$src),
(LDRH addrmode3:$src)>;
def : ARMPat<(atomic_load_32 ldst_so_reg:$src),
(LDRrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_32 addrmode_imm12:$src),
(LDRi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_store_8 ldst_so_reg:$ptr, GPR:$val),
(STRBrs GPR:$val, ldst_so_reg:$ptr)>;
def : ARMPat<(atomic_store_8 addrmode_imm12:$ptr, GPR:$val),
(STRBi12 GPR:$val, addrmode_imm12:$ptr)>;
def : ARMPat<(atomic_store_16 addrmode3:$ptr, GPR:$val),
(STRH GPR:$val, addrmode3:$ptr)>;
def : ARMPat<(atomic_store_32 ldst_so_reg:$ptr, GPR:$val),
(STRrs GPR:$val, ldst_so_reg:$ptr)>;
def : ARMPat<(atomic_store_32 addrmode_imm12:$ptr, GPR:$val),
(STRi12 GPR:$val, addrmode_imm12:$ptr)>;
//===----------------------------------------------------------------------===//
// Thumb Support
//
include "ARMInstrThumb.td"
//===----------------------------------------------------------------------===//
// Thumb2 Support
//
include "ARMInstrThumb2.td"
//===----------------------------------------------------------------------===//
// Floating Point Support
//
include "ARMInstrVFP.td"
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON) Support
//
include "ARMInstrNEON.td"
//===----------------------------------------------------------------------===//
// Assembler aliases
//
// Memory barriers
def : InstAlias<"dmb", (DMB 0xf)>, Requires<[IsARM, HasDB]>;
def : InstAlias<"dsb", (DSB 0xf)>, Requires<[IsARM, HasDB]>;
def : InstAlias<"isb", (ISB 0xf)>, Requires<[IsARM, HasDB]>;
// System instructions
def : MnemonicAlias<"swi", "svc">;
// Load / Store Multiple
def : MnemonicAlias<"ldmfd", "ldm">;
def : MnemonicAlias<"ldmia", "ldm">;
def : MnemonicAlias<"ldmea", "ldmdb">;
def : MnemonicAlias<"stmfd", "stmdb">;
def : MnemonicAlias<"stmia", "stm">;
def : MnemonicAlias<"stmea", "stm">;
// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT when the
// shift amount is zero (i.e., unspecified).
def : InstAlias<"pkhbt${p} $Rd, $Rn, $Rm",
(PKHBT GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, 0, pred:$p)>,
Requires<[IsARM, HasV6]>;
def : InstAlias<"pkhtb${p} $Rd, $Rn, $Rm",
(PKHBT GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, 0, pred:$p)>,
Requires<[IsARM, HasV6]>;
// PUSH/POP aliases for STM/LDM
def : ARMInstAlias<"push${p} $regs", (STMDB_UPD SP, pred:$p, reglist:$regs)>;
def : ARMInstAlias<"pop${p} $regs", (LDMIA_UPD SP, pred:$p, reglist:$regs)>;
// SSAT/USAT optional shift operand.
def : ARMInstAlias<"ssat${p} $Rd, $sat_imm, $Rn",
(SSAT GPRnopc:$Rd, imm1_32:$sat_imm, GPRnopc:$Rn, 0, pred:$p)>;
def : ARMInstAlias<"usat${p} $Rd, $sat_imm, $Rn",
(USAT GPRnopc:$Rd, imm0_31:$sat_imm, GPRnopc:$Rn, 0, pred:$p)>;
// Extend instruction optional rotate operand.
def : ARMInstAlias<"sxtab${p} $Rd, $Rn, $Rm",
(SXTAB GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"sxtah${p} $Rd, $Rn, $Rm",
(SXTAH GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"sxtab16${p} $Rd, $Rn, $Rm",
(SXTAB16 GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"sxtb${p} $Rd, $Rm",
(SXTB GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"sxtb16${p} $Rd, $Rm",
(SXTB16 GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"sxth${p} $Rd, $Rm",
(SXTH GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxtab${p} $Rd, $Rn, $Rm",
(UXTAB GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxtah${p} $Rd, $Rn, $Rm",
(UXTAH GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxtab16${p} $Rd, $Rn, $Rm",
(UXTAB16 GPRnopc:$Rd, GPR:$Rn, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxtb${p} $Rd, $Rm",
(UXTB GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxtb16${p} $Rd, $Rm",
(UXTB16 GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
def : ARMInstAlias<"uxth${p} $Rd, $Rm",
(UXTH GPRnopc:$Rd, GPRnopc:$Rm, 0, pred:$p)>;
// RFE aliases
def : MnemonicAlias<"rfefa", "rfeda">;
def : MnemonicAlias<"rfeea", "rfedb">;
def : MnemonicAlias<"rfefd", "rfeia">;
def : MnemonicAlias<"rfeed", "rfeib">;
def : MnemonicAlias<"rfe", "rfeia">;
// SRS aliases
def : MnemonicAlias<"srsfa", "srsib">;
def : MnemonicAlias<"srsea", "srsia">;
def : MnemonicAlias<"srsfd", "srsdb">;
def : MnemonicAlias<"srsed", "srsda">;
def : MnemonicAlias<"srs", "srsia">;
// QSAX == QSUBADDX
def : MnemonicAlias<"qsubaddx", "qsax">;
// SASX == SADDSUBX
def : MnemonicAlias<"saddsubx", "sasx">;
// SHASX == SHADDSUBX
def : MnemonicAlias<"shaddsubx", "shasx">;
// SHSAX == SHSUBADDX
def : MnemonicAlias<"shsubaddx", "shsax">;
// SSAX == SSUBADDX
def : MnemonicAlias<"ssubaddx", "ssax">;
// UASX == UADDSUBX
def : MnemonicAlias<"uaddsubx", "uasx">;
// UHASX == UHADDSUBX
def : MnemonicAlias<"uhaddsubx", "uhasx">;
// UHSAX == UHSUBADDX
def : MnemonicAlias<"uhsubaddx", "uhsax">;
// UQASX == UQADDSUBX
def : MnemonicAlias<"uqaddsubx", "uqasx">;
// UQSAX == UQSUBADDX
def : MnemonicAlias<"uqsubaddx", "uqsax">;
// USAX == USUBADDX
def : MnemonicAlias<"usubaddx", "usax">;
// "mov Rd, so_imm_not" can be handled via "mvn" in assembly, just like
// for isel.
def : ARMInstAlias<"mov${s}${p} $Rd, $imm",
(MVNi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
def : ARMInstAlias<"mvn${s}${p} $Rd, $imm",
(MOVi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
// Same for AND <--> BIC
def : ARMInstAlias<"bic${s}${p} $Rd, $Rn, $imm",
(ANDri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"bic${s}${p} $Rdn, $imm",
(ANDri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"and${s}${p} $Rd, $Rn, $imm",
(BICri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
pred:$p, cc_out:$s)>;
def : ARMInstAlias<"and${s}${p} $Rdn, $imm",
(BICri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
pred:$p, cc_out:$s)>;
// Likewise, "add Rd, so_imm_neg" -> sub
def : ARMInstAlias<"add${s}${p} $Rd, $Rn, $imm",
(SUBri GPR:$Rd, GPR:$Rn, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
def : ARMInstAlias<"add${s}${p} $Rd, $imm",
(SUBri GPR:$Rd, GPR:$Rd, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
// Same for CMP <--> CMN via so_imm_neg
def : ARMInstAlias<"cmp${p} $Rd, $imm",
(CMNri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
def : ARMInstAlias<"cmn${p} $Rd, $imm",
(CMPri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
// The shifter forms of the MOV instruction are aliased to the ASR, LSL,
// LSR, ROR, and RRX instructions.
// FIXME: We need C++ parser hooks to map the alias to the MOV
// encoding. It seems we should be able to do that sort of thing
// in tblgen, but it could get ugly.
let TwoOperandAliasConstraint = "$Rm = $Rd" in {
def ASRi : ARMAsmPseudo<"asr${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_32:$imm, pred:$p,
cc_out:$s)>;
def LSRi : ARMAsmPseudo<"lsr${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_32:$imm, pred:$p,
cc_out:$s)>;
def LSLi : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_31:$imm, pred:$p,
cc_out:$s)>;
def RORi : ARMAsmPseudo<"ror${s}${p} $Rd, $Rm, $imm",
(ins GPR:$Rd, GPR:$Rm, imm0_31:$imm, pred:$p,
cc_out:$s)>;
}
def RRXi : ARMAsmPseudo<"rrx${s}${p} $Rd, $Rm",
(ins GPR:$Rd, GPR:$Rm, pred:$p, cc_out:$s)>;
let TwoOperandAliasConstraint = "$Rn = $Rd" in {
def ASRr : ARMAsmPseudo<"asr${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
def LSRr : ARMAsmPseudo<"lsr${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
def LSLr : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
def RORr : ARMAsmPseudo<"ror${s}${p} $Rd, $Rn, $Rm",
(ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
cc_out:$s)>;
}
// "neg" is and alias for "rsb rd, rn, #0"
def : ARMInstAlias<"neg${s}${p} $Rd, $Rm",
(RSBri GPR:$Rd, GPR:$Rm, 0, pred:$p, cc_out:$s)>;
// Pre-v6, 'mov r0, r0' was used as a NOP encoding.
def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>,
Requires<[IsARM, NoV6]>;
// MUL/UMLAL/SMLAL/UMULL/SMULL are available on all arches, but
// the instruction definitions need difference constraints pre-v6.
// Use these aliases for the assembly parsing on pre-v6.
def : InstAlias<"mul${s}${p} $Rd, $Rn, $Rm",
(MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"mla${s}${p} $Rd, $Rn, $Rm, $Ra",
(MLA GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra,
pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"smlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
(SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"umlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
(UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"smull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
(SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"umull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
(UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
// 'it' blocks in ARM mode just validate the predicates. The IT itself
// is discarded.
def ITasm : ARMAsmPseudo<"it$mask $cc", (ins it_pred:$cc, it_mask:$mask)>,
ComplexDeprecationPredicate<"IT">;