[ARM64] Re-work parsing of ADD/SUB shifted immediate operands

The parsing of ADD/SUB shifted immediates needs to be done explicitly so
that better diagnostics can be emitted, as a side effect this also
removes some of the hacks in the current method of handling this operand
type.

Additionally remove manual CMP aliasing to ADD/SUB and use InstAlias
instead.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208329 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bradley Smith 2014-05-08 15:39:58 +00:00
parent 4f83e2ca5d
commit 0d2a5d26f2
3 changed files with 210 additions and 130 deletions

View File

@ -102,12 +102,6 @@ def ArithmeticShifterOperand : AsmOperandClass {
let Name = "ArithmeticShifter";
}
// Shifter operand for arithmetic shifted encodings for ADD/SUB instructions.
def AddSubShifterOperand : AsmOperandClass {
let SuperClasses = [ArithmeticShifterOperand];
let Name = "AddSubShifter";
}
// Shifter operand for logical vector 128/64-bit shifted encodings.
def LogicalVecShifterOperand : AsmOperandClass {
let SuperClasses = [ShifterOperand];
@ -556,18 +550,21 @@ def move_vec_shift : Operand<i32> {
let ParserMatchClass = MoveVecShifterOperand;
}
def AddSubImmOperand : AsmOperandClass {
let Name = "AddSubImm";
let ParserMethod = "tryParseAddSubImm";
let DiagnosticType = "AddSubSecondSource";
}
// An ADD/SUB immediate shifter operand:
// second operand:
// {7-6} - shift type: 00 = lsl
// {5-0} - imm6: #0 or #12
def addsub_shift : Operand<i32> {
let ParserMatchClass = AddSubShifterOperand;
}
class addsub_shifted_imm<ValueType Ty>
: Operand<Ty>, ComplexPattern<Ty, 2, "SelectArithImmed", [imm]> {
let PrintMethod = "printAddSubImm";
let EncoderMethod = "getAddSubImmOpValue";
let MIOperandInfo = (ops i32imm, addsub_shift);
let ParserMatchClass = AddSubImmOperand;
let MIOperandInfo = (ops i32imm, i32imm);
}
def addsub_shifted_imm32 : addsub_shifted_imm<i32>;
@ -577,7 +574,8 @@ class neg_addsub_shifted_imm<ValueType Ty>
: Operand<Ty>, ComplexPattern<Ty, 2, "SelectNegArithImmed", [imm]> {
let PrintMethod = "printAddSubImm";
let EncoderMethod = "getAddSubImmOpValue";
let MIOperandInfo = (ops i32imm, addsub_shift);
let ParserMatchClass = AddSubImmOperand;
let MIOperandInfo = (ops i32imm, i32imm);
}
def neg_addsub_shifted_imm32 : neg_addsub_shifted_imm<i32>;
@ -1522,7 +1520,7 @@ multiclass AddSub<bit isSub, string mnemonic,
GPR64sp, GPR64sp, GPR64, 24>; // UXTX #0
}
multiclass AddSubS<bit isSub, string mnemonic, SDNode OpNode> {
multiclass AddSubS<bit isSub, string mnemonic, SDNode OpNode, string cmp> {
let isCompare = 1, Defs = [NZCV] in {
// Add/Subtract immediate
def Wri : BaseAddSubImm<isSub, 1, GPR32, GPR32sp, addsub_shifted_imm32,
@ -1568,6 +1566,28 @@ multiclass AddSubS<bit isSub, string mnemonic, SDNode OpNode> {
}
} // Defs = [NZCV]
// Compare aliases
def : InstAlias<cmp#" $src, $imm", (!cast<Instruction>(NAME#"Wri")
WZR, GPR32sp:$src, addsub_shifted_imm32:$imm)>;
def : InstAlias<cmp#" $src, $imm", (!cast<Instruction>(NAME#"Xri")
XZR, GPR64sp:$src, addsub_shifted_imm64:$imm)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Wrx")
WZR, GPR32sp:$src1, GPR32:$src2, arith_extend:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrx")
XZR, GPR64sp:$src1, GPR32:$src2, arith_extend:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrx64")
XZR, GPR64sp:$src1, GPR64:$src2, arith_extendlsl64:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Wrs")
WZR, GPR32:$src1, GPR32:$src2, arith_shift:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrs")
XZR, GPR64:$src1, GPR64:$src2, arith_shift:$sh)>;
// Compare shorthands
def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Wrs")
WZR, GPR32:$src1, GPR32:$src2, 0)>;
def : InstAlias<cmp#" $src1, $src2", (!cast<Instruction>(NAME#"Xrs")
XZR, GPR64:$src1, GPR64:$src2, 0)>;
// Register/register aliases with no shift when SP is not used.
def : AddSubRegAlias<mnemonic, !cast<Instruction>(NAME#"Wrs"),
GPR32, GPR32, GPR32, 0>;

View File

@ -474,8 +474,8 @@ def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
defm ADD : AddSub<0, "add", add>;
defm SUB : AddSub<1, "sub">;
defm ADDS : AddSubS<0, "adds", ARM64add_flag>;
defm SUBS : AddSubS<1, "subs", ARM64sub_flag>;
defm ADDS : AddSubS<0, "adds", ARM64add_flag, "cmn">;
defm SUBS : AddSubS<1, "subs", ARM64sub_flag, "cmp">;
// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),

View File

@ -96,6 +96,7 @@ private:
OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
bool tryParseVectorRegister(OperandVector &Operands);
public:
@ -142,6 +143,7 @@ public:
private:
enum KindTy {
k_Immediate,
k_ShiftedImm,
k_Memory,
k_Register,
k_VectorList,
@ -184,6 +186,11 @@ private:
const MCExpr *Val;
};
struct ShiftedImmOp {
const MCExpr *Val;
unsigned ShiftAmount;
};
struct FPImmOp {
unsigned Val; // Encoded 8-bit representation.
};
@ -232,6 +239,7 @@ private:
struct VectorListOp VectorList;
struct VectorIndexOp VectorIndex;
struct ImmOp Imm;
struct ShiftedImmOp ShiftedImm;
struct FPImmOp FPImm;
struct BarrierOp Barrier;
struct SysRegOp SysReg;
@ -261,6 +269,9 @@ public:
case k_Immediate:
Imm = o.Imm;
break;
case k_ShiftedImm:
ShiftedImm = o.ShiftedImm;
break;
case k_FPImm:
FPImm = o.FPImm;
break;
@ -319,6 +330,16 @@ public:
return Imm.Val;
}
const MCExpr *getShiftedImmVal() const {
assert(Kind == k_ShiftedImm && "Invalid access!");
return ShiftedImm.Val;
}
unsigned getShiftedImmShift() const {
assert(Kind == k_ShiftedImm && "Invalid access!");
return ShiftedImm.ShiftAmount;
}
unsigned getFPImm() const {
assert(Kind == k_FPImm && "Invalid access!");
return FPImm.Val;
@ -549,6 +570,45 @@ public:
return false;
return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
}
bool isShiftedImm() const { return Kind == k_ShiftedImm; }
bool isAddSubImm() const {
if (!isShiftedImm() && !isImm())
return false;
const MCExpr *Expr;
// An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
if (isShiftedImm()) {
unsigned Shift = ShiftedImm.ShiftAmount;
Expr = ShiftedImm.Val;
if (Shift != 0 && Shift != 12)
return false;
} else {
Expr = getImm();
}
ARM64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
int64_t Addend;
if (ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind,
DarwinRefKind, Addend)) {
return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
|| DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
|| (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
|| ELFRefKind == ARM64MCExpr::VK_LO12
|| ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12
|| ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12
|| ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC
|| ELFRefKind == ARM64MCExpr::VK_TPREL_HI12
|| ELFRefKind == ARM64MCExpr::VK_TPREL_LO12
|| ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC
|| ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12;
}
// Otherwise it should be a real immediate in range:
const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
}
bool isSIMDImmType10() const {
if (!isImm())
return false;
@ -805,17 +865,6 @@ public:
return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
}
bool isAddSubShifter() const {
if (!isShifter())
return false;
// An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
unsigned Val = Shifter.Val;
return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
(ARM64_AM::getShiftValue(Val) == 0 ||
ARM64_AM::getShiftValue(Val) == 12);
}
bool isLogicalVecShifter() const {
if (!isShifter())
return false;
@ -1132,6 +1181,17 @@ public:
addExpr(Inst, getImm());
}
void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
if (isShiftedImm()) {
addExpr(Inst, getShiftedImmVal());
Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
} else {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::CreateImm(0));
}
}
void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
@ -1399,11 +1459,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(getShifter()));
}
void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(getShifter()));
}
void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(getShifter()));
@ -1664,6 +1719,16 @@ public:
return Op;
}
static ARM64Operand *CreateShiftedImm(const MCExpr *Val, unsigned ShiftAmount,
SMLoc S, SMLoc E, MCContext &Ctx) {
ARM64Operand *Op = new ARM64Operand(k_ShiftedImm, Ctx);
Op->ShiftedImm .Val = Val;
Op->ShiftedImm.ShiftAmount = ShiftAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
Op->FPImm.Val = Val;
@ -1781,6 +1846,14 @@ void ARM64Operand::print(raw_ostream &OS) const {
case k_Immediate:
getImm()->print(OS);
break;
case k_ShiftedImm: {
unsigned Shift = getShiftedImmShift();
OS << "<shiftedimm ";
getShiftedImmVal()->print(OS);
OS << ", " << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Shift)) << " #"
<< ARM64_AM::getShiftValue(Shift) << ">";
break;
}
case k_Memory:
OS << "<memory>";
break;
@ -2232,6 +2305,72 @@ ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
ARM64AsmParser::OperandMatchResultTy
ARM64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
SMLoc S = getLoc();
if (Parser.getTok().is(AsmToken::Hash))
Parser.Lex(); // Eat '#'
else if (Parser.getTok().isNot(AsmToken::Integer))
// Operand should start from # or should be integer, emit error otherwise.
return MatchOperand_NoMatch;
const MCExpr *Imm;
if (parseSymbolicImmVal(Imm))
return MatchOperand_ParseFail;
else if (Parser.getTok().isNot(AsmToken::Comma)) {
uint64_t ShiftAmount = 0;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
if (MCE) {
int64_t Val = MCE->getValue();
if (Val > 0xfff && (Val & 0xfff) == 0) {
Imm = MCConstantExpr::Create(Val >> 12, getContext());
ShiftAmount = 12;
}
}
SMLoc E = Parser.getTok().getLoc();
Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
getContext()));
return MatchOperand_Success;
}
// Eat ','
Parser.Lex();
// The optional operand must be "lsl #N" where N is non-negative.
if (!Parser.getTok().is(AsmToken::Identifier) ||
!Parser.getTok().getIdentifier().equals_lower("lsl")) {
Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
return MatchOperand_ParseFail;
}
// Eat 'lsl'
Parser.Lex();
if (Parser.getTok().is(AsmToken::Hash)) {
Parser.Lex();
}
if (Parser.getTok().isNot(AsmToken::Integer)) {
Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
return MatchOperand_ParseFail;
}
int64_t ShiftAmount = Parser.getTok().getIntVal();
if (ShiftAmount < 0) {
Error(Parser.getTok().getLoc(), "positive shift amount required");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat the number
SMLoc E = Parser.getTok().getLoc();
Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount,
S, E, getContext()));
return MatchOperand_Success;
}
/// parseCondCodeString - Parse a Condition Code string.
unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
unsigned CC = StringSwitch<unsigned>(Cond.lower())
@ -2301,7 +2440,7 @@ ARM64AsmParser::tryParseOptionalShift(OperandVector &Operands) {
// We expect a number here.
bool Hash = getLexer().is(AsmToken::Hash);
if (!Hash && getLexer().isNot(AsmToken::Integer)) {
TokError("immediate value expected for shifter operand");
TokError("expected #imm after shift specifier");
return MatchOperand_ParseFail;
}
@ -2322,7 +2461,7 @@ ARM64AsmParser::tryParseOptionalShift(OperandVector &Operands) {
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
if (!MCE) {
TokError("immediate value expected for shifter operand");
TokError("expected #imm after shift specifier");
return MatchOperand_ParseFail;
}
@ -3550,19 +3689,9 @@ bool ARM64AsmParser::validateInstruction(MCInst &Inst,
case ARM64::SUBSXri:
case ARM64::SUBWri:
case ARM64::SUBXri: {
if (!Inst.getOperand(3).isImm())
return Error(Loc[3], "immediate value expected");
int64_t shifter = Inst.getOperand(3).getImm();
if (shifter != 0 && shifter != 12)
return Error(Loc[3], "shift value out of range");
// The imm12 operand can be an expression. Validate that it's legit.
// FIXME: We really, really want to allow arbitrary expressions here
// and resolve the value and validate the result at fixup time, but
// that's hard as we have long since lost any source information we
// need to generate good diagnostics by that point.
if ((Inst.getOpcode() == ARM64::ADDXri ||
Inst.getOpcode() == ARM64::ADDWri) &&
Inst.getOperand(2).isExpr()) {
// Annoyingly we can't do this in the isAddSubImm predicate, so there is
// some slight duplication here.
if (Inst.getOperand(2).isExpr()) {
const MCExpr *Expr = Inst.getOperand(2).getExpr();
ARM64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
@ -3571,38 +3700,28 @@ bool ARM64AsmParser::validateInstruction(MCInst &Inst,
return Error(Loc[2], "invalid immediate expression");
}
// Note that we don't range-check the addend. It's adjusted modulo page
// size when converted, so there is no "out of range" condition when using
// @pageoff. Any validity checking for the value was done in the is*()
// predicate function.
// Only allow these with ADDXri.
if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
Inst.getOpcode() == ARM64::ADDXri)
return false;
if (ELFRefKind == ARM64MCExpr::VK_LO12 ||
// Only allow these with ADDXri/ADDWri
if ((ELFRefKind == ARM64MCExpr::VK_LO12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
ELFRefKind == ARM64MCExpr::VK_TPREL_HI12 ||
ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) &&
(Inst.getOpcode() == ARM64::ADDXri ||
Inst.getOpcode() == ARM64::ADDWri))
return false;
} else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
// @gotpageoff can only be used directly, not with an addend.
return Addend != 0;
}
// Otherwise, we're not sure, so don't allow it for now.
// Don't allow expressions in the immediate field otherwise
return Error(Loc[2], "invalid immediate expression");
}
// If it's anything but an immediate, it's not legit.
if (!Inst.getOperand(2).isImm())
return Error(Loc[2], "invalid immediate expression");
int64_t imm = Inst.getOperand(2).getImm();
if (imm > 4095 || imm < 0)
return Error(Loc[2], "immediate value out of range");
return false;
}
case ARM64::LDRBpre:
@ -3834,10 +3953,8 @@ static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
const MCExpr *Imm = MCConstantExpr::Create(0, Context);
Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, 0, Op2->getStartLoc(),
Op2->getEndLoc(), Context));
Operands.push_back(ARM64Operand::CreateShifter(
ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
delete Op;
}
@ -3879,6 +3996,9 @@ bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
case Match_AddSubRegExtendLarge:
return Error(Loc,
"expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
case Match_AddSubSecondSource:
return Error(Loc,
"expected compatible register, symbol or integer in range [0, 4095]");
case Match_InvalidMemoryIndexedSImm9:
return Error(Loc, "index must be an integer in range [-256, 255].");
case Match_InvalidMemoryIndexed32SImm7:
@ -3947,35 +4067,6 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
assert(Op->isToken() && "Leading operand should always be a mnemonic!");
StringRef Tok = Op->getToken();
// Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
// This needs to be done before the special handling of ADD/SUB immediates.
if (Tok == "cmp" || Tok == "cmn") {
// Replace the opcode with either ADDS or SUBS.
const char *Repl = StringSwitch<const char *>(Tok)
.Case("cmp", "subs")
.Case("cmn", "adds")
.Default(nullptr);
assert(Repl && "Unknown compare instruction");
delete Operands[0];
Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
// Insert WZR or XZR as destination operand.
ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
unsigned ZeroReg;
if (RegOp->isReg() &&
ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
RegOp->getReg()))
ZeroReg = ARM64::WZR;
else
ZeroReg = ARM64::XZR;
Operands.insert(
Operands.begin() + 1,
ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
// Update since we modified it above.
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
Tok = Op->getToken();
}
unsigned NumOperands = Operands.size();
if (Tok == "mov" && NumOperands == 3) {
@ -4050,39 +4141,7 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
rewriteMOVR(Operands, getContext());
}
} else if (NumOperands == 4) {
if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
// Handle the uimm24 immediate form, where the shift is not specified.
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
if (Op3->isImm()) {
if (const MCConstantExpr *CE =
dyn_cast<MCConstantExpr>(Op3->getImm())) {
uint64_t Val = CE->getValue();
if (Val >= (1 << 24)) {
Error(IDLoc, "immediate value is too large");
return true;
}
if (Val < (1 << 12)) {
Operands.push_back(ARM64Operand::CreateShifter(
ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
} else if ((Val & 0xfff) == 0) {
delete Operands[3];
CE = MCConstantExpr::Create(Val >> 12, getContext());
Operands[3] =
ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
Operands.push_back(ARM64Operand::CreateShifter(
ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
} else {
Error(IDLoc, "immediate value is too large");
return true;
}
} else {
Operands.push_back(ARM64Operand::CreateShifter(
ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
}
}
// FIXME: Horible hack to handle the LSL -> UBFM alias.
} else if (NumOperands == 4 && Tok == "lsl") {
if (NumOperands == 4 && Tok == "lsl") {
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
if (Op2->isReg() && Op3->isImm()) {
@ -4457,6 +4516,7 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
// FALL THROUGH
case Match_AddSubRegExtendSmall:
case Match_AddSubRegExtendLarge:
case Match_AddSubSecondSource:
case Match_InvalidMemoryIndexed8:
case Match_InvalidMemoryIndexed16:
case Match_InvalidMemoryIndexed32SImm7: