ARM64: separate load/store operands to simplify assembler

This changes ARM64 to use separate operands for each component of an
address, and look for separate '[', '$Rn, ..., ']' tokens when
parsing.

This allows us to do away with quite a bit of special C++ code to
handle monolithic "addressing modes" in the MC components. The more
incremental matching of the assembler operands also allows for better
diagnostics when LLVM is presented with invalid input.

Most of the complexity here is with the register-offset instructions,
which were extremely dodgy beforehand: even when the instruction used
wM, LLVM's model had xM as an operand. We papered over this
discrepancy before, but that approach doesn't work now so I split them
into separate X and W variants.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209425 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-05-22 11:56:09 +00:00
parent b08e03806f
commit e072ed71c8
15 changed files with 2529 additions and 2694 deletions

View File

@ -111,27 +111,18 @@ public:
return SelectAddrModeUnscaled(N, 16, Base, OffImm);
}
bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Imm) {
return SelectAddrModeRO(N, 1, Base, Offset, Imm);
template<int Width>
bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &SignExtend, SDValue &DoShift) {
return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
}
bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Imm) {
return SelectAddrModeRO(N, 2, Base, Offset, Imm);
template<int Width>
bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &SignExtend, SDValue &DoShift) {
return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
}
bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Imm) {
return SelectAddrModeRO(N, 4, Base, Offset, Imm);
}
bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Imm) {
return SelectAddrModeRO(N, 8, Base, Offset, Imm);
}
bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
SDValue &Imm) {
return SelectAddrModeRO(N, 16, Base, Offset, Imm);
}
bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
/// Form sequences of consecutive 64/128-bit registers for use in NEON
/// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
@ -179,11 +170,15 @@ private:
SDValue &OffImm);
bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
SDValue &OffImm);
bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
SDValue &Offset, SDValue &Imm);
bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
SDValue &Offset, SDValue &SignExtend,
SDValue &DoShift);
bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
SDValue &Offset, SDValue &SignExtend,
SDValue &DoShift);
bool isWorthFolding(SDValue V) const;
bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
SDValue &Imm);
bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
SDValue &Offset, SDValue &SignExtend);
template<unsigned RegWidth>
bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
@ -219,14 +214,6 @@ static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
isIntImmediate(N->getOperand(1).getNode(), Imm);
}
bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
EVT ValTy = N.getValueType();
if (ValTy != MVT::i64)
return false;
Val = N;
return true;
}
bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
@ -563,8 +550,8 @@ bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
// if we're folding a (sext i8), we need the RHS to be a GPR32, even though
// there might not be an actual 32-bit value in the program. We can
// (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
Ext != ARM64_AM::SXTX) {
assert(Ext != ARM64_AM::UXTX && Ext != ARM64_AM::SXTX);
if (Reg.getValueType() == MVT::i64) {
SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
MachineSDNode *Node = CurDAG->getMachineNode(
TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
@ -675,47 +662,44 @@ static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
return SDValue(Node, 0);
}
static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
if (N.getValueType() == MVT::i32) {
return Widen(CurDAG, N);
}
return N;
}
/// \brief Check if the given SHL node (\p N), can be used to form an
/// extended register for an addressing mode.
bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
SDValue &Offset, SDValue &Imm) {
bool WantExtend, SDValue &Offset,
SDValue &SignExtend) {
assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
return false;
if (WantExtend) {
ARM64_AM::ShiftExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
if (Ext == ARM64_AM::InvalidShiftExtend) {
Ext = ARM64_AM::UXTX;
Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
} else {
Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
}
unsigned LegalShiftVal = Log2_32(Size);
unsigned ShiftVal = CSD->getZExtValue();
if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
if (Ext == ARM64_AM::InvalidShiftExtend)
return false;
Imm = CurDAG->getTargetConstant(
ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
if (isWorthFolding(N))
return true;
Offset = N.getOperand(0).getOperand(0);
SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
} else {
Offset = N.getOperand(0);
SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
}
unsigned LegalShiftVal = Log2_32(Size);
unsigned ShiftVal = CSD->getZExtValue();
if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
return false;
if (isWorthFolding(N))
return true;
return false;
}
bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
bool ARM64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
SDValue &Base, SDValue &Offset,
SDValue &Imm) {
SDValue &SignExtend,
SDValue &DoShift) {
if (N.getOpcode() != ISD::ADD)
return false;
SDValue LHS = N.getOperand(0);
@ -740,26 +724,30 @@ bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
// Try to match a shifted extend on the RHS.
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, Offset, Imm)) {
SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
Base = LHS;
DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
// Try to match a shifted extend on the LHS.
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, Offset, Imm)) {
SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
Base = RHS;
DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
ARM64_AM::ShiftExtendType Ext = ARM64_AM::UXTX;
// There was no shift, whatever else we find.
DoShift = CurDAG->getTargetConstant(false, MVT::i32);
ARM64_AM::ShiftExtendType Ext = ARM64_AM::InvalidShiftExtend;
// Try to match an unshifted extend on the LHS.
if (IsExtendedRegisterWorthFolding &&
(Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidShiftExtend) {
Base = RHS;
Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
MVT::i32);
Offset = LHS.getOperand(0);
SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
if (isWorthFolding(LHS))
return true;
}
@ -768,19 +756,62 @@ bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding &&
(Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidShiftExtend) {
Base = LHS;
Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
MVT::i32);
Offset = RHS.getOperand(0);
SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
if (isWorthFolding(RHS))
return true;
}
return false;
}
bool ARM64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
SDValue &Base, SDValue &Offset,
SDValue &SignExtend,
SDValue &DoShift) {
if (N.getOpcode() != ISD::ADD)
return false;
SDValue LHS = N.getOperand(0);
SDValue RHS = N.getOperand(1);
// We don't want to match immediate adds here, because they are better lowered
// to the register-immediate addressing modes.
if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
return false;
// Check if this particular node is reused in any non-memory related
// operation. If yes, do not try to fold this node into the address
// computation, since the computation will be kept.
const SDNode *Node = N.getNode();
for (SDNode *UI : Node->uses()) {
if (!isa<MemSDNode>(*UI))
return false;
}
// Remember if it is worth folding N when it produces extended register.
bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
// Try to match a shifted extend on the RHS.
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
Base = LHS;
DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
// Try to match a shifted extend on the LHS.
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
Base = RHS;
DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
// Match any non-shifted, non-extend, non-immediate add expression.
Base = LHS;
Offset = WidenIfNeeded(CurDAG, RHS);
Ext = ARM64_AM::UXTX;
Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
MVT::i32);
Offset = RHS;
SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
DoShift = CurDAG->getTargetConstant(false, MVT::i32);
// Reg1 + Reg2 is free: no check needed.
return true;
}

View File

@ -43,39 +43,63 @@ class relaxed_load<PatFrag base>
// 8-bit loads
def : Pat<(acquiring_load<atomic_load_8> GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
def : Pat<(relaxed_load<atomic_load_8> ro_indexed8:$addr),
(LDRBBro ro_indexed8:$addr)>;
def : Pat<(relaxed_load<atomic_load_8> am_indexed8:$addr),
(LDRBBui am_indexed8:$addr)>;
def : Pat<(relaxed_load<atomic_load_8> am_unscaled8:$addr),
(LDURBBi am_unscaled8:$addr)>;
def : Pat<(relaxed_load<atomic_load_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend8:$offset)),
(LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
def : Pat<(relaxed_load<atomic_load_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend8:$offset)),
(LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
def : Pat<(relaxed_load<atomic_load_8> (am_indexed8 GPR64sp:$Rn,
uimm12s1:$offset)),
(LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
def : Pat<(relaxed_load<atomic_load_8>
(am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
(LDURBBi GPR64sp:$Rn, simm9:$offset)>;
// 16-bit loads
def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
def : Pat<(relaxed_load<atomic_load_16> ro_indexed16:$addr),
(LDRHHro ro_indexed16:$addr)>;
def : Pat<(relaxed_load<atomic_load_16> am_indexed16:$addr),
(LDRHHui am_indexed16:$addr)>;
def : Pat<(relaxed_load<atomic_load_16> am_unscaled16:$addr),
(LDURHHi am_unscaled16:$addr)>;
def : Pat<(relaxed_load<atomic_load_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend16:$extend)),
(LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
def : Pat<(relaxed_load<atomic_load_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend16:$extend)),
(LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
def : Pat<(relaxed_load<atomic_load_16> (am_indexed16 GPR64sp:$Rn,
uimm12s2:$offset)),
(LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
def : Pat<(relaxed_load<atomic_load_16>
(am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
(LDURHHi GPR64sp:$Rn, simm9:$offset)>;
// 32-bit loads
def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
def : Pat<(relaxed_load<atomic_load_32> ro_indexed32:$addr),
(LDRWro ro_indexed32:$addr)>;
def : Pat<(relaxed_load<atomic_load_32> am_indexed32:$addr),
(LDRWui am_indexed32:$addr)>;
def : Pat<(relaxed_load<atomic_load_32> am_unscaled32:$addr),
(LDURWi am_unscaled32:$addr)>;
def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend32:$extend)),
(LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend32:$extend)),
(LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
uimm12s4:$offset)),
(LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
def : Pat<(relaxed_load<atomic_load_32>
(am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
(LDURWi GPR64sp:$Rn, simm9:$offset)>;
// 64-bit loads
def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
def : Pat<(relaxed_load<atomic_load_64> ro_indexed64:$addr),
(LDRXro ro_indexed64:$addr)>;
def : Pat<(relaxed_load<atomic_load_64> am_indexed64:$addr),
(LDRXui am_indexed64:$addr)>;
def : Pat<(relaxed_load<atomic_load_64> am_unscaled64:$addr),
(LDURXi am_unscaled64:$addr)>;
def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend64:$extend)),
(LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend64:$extend)),
(LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
uimm12s8:$offset)),
(LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
def : Pat<(relaxed_load<atomic_load_64>
(am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
(LDURXi GPR64sp:$Rn, simm9:$offset)>;
//===----------------------------------
// Atomic stores
@ -103,42 +127,74 @@ class relaxed_store<PatFrag base>
// 8-bit stores
def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
(STLRB GPR32:$val, GPR64sp:$ptr)>;
def : Pat<(relaxed_store<atomic_store_8> ro_indexed8:$ptr, GPR32:$val),
(STRBBro GPR32:$val, ro_indexed8:$ptr)>;
def : Pat<(relaxed_store<atomic_store_8> am_indexed8:$ptr, GPR32:$val),
(STRBBui GPR32:$val, am_indexed8:$ptr)>;
def : Pat<(relaxed_store<atomic_store_8> am_unscaled8:$ptr, GPR32:$val),
(STURBBi GPR32:$val, am_unscaled8:$ptr)>;
def : Pat<(relaxed_store<atomic_store_8>
(ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
GPR32:$val),
(STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
def : Pat<(relaxed_store<atomic_store_8>
(ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
GPR32:$val),
(STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
def : Pat<(relaxed_store<atomic_store_8>
(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
(STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
def : Pat<(relaxed_store<atomic_store_8>
(am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
(STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
// 16-bit stores
def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
(STLRH GPR32:$val, GPR64sp:$ptr)>;
def : Pat<(relaxed_store<atomic_store_16> ro_indexed16:$ptr, GPR32:$val),
(STRHHro GPR32:$val, ro_indexed16:$ptr)>;
def : Pat<(relaxed_store<atomic_store_16> am_indexed16:$ptr, GPR32:$val),
(STRHHui GPR32:$val, am_indexed16:$ptr)>;
def : Pat<(relaxed_store<atomic_store_16> am_unscaled16:$ptr, GPR32:$val),
(STURHHi GPR32:$val, am_unscaled16:$ptr)>;
def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend16:$extend),
GPR32:$val),
(STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend16:$extend),
GPR32:$val),
(STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
def : Pat<(relaxed_store<atomic_store_16>
(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
(STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
def : Pat<(relaxed_store<atomic_store_16>
(am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
(STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
// 32-bit stores
def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
(STLRW GPR32:$val, GPR64sp:$ptr)>;
def : Pat<(relaxed_store<atomic_store_32> ro_indexed32:$ptr, GPR32:$val),
(STRWro GPR32:$val, ro_indexed32:$ptr)>;
def : Pat<(relaxed_store<atomic_store_32> am_indexed32:$ptr, GPR32:$val),
(STRWui GPR32:$val, am_indexed32:$ptr)>;
def : Pat<(relaxed_store<atomic_store_32> am_unscaled32:$ptr, GPR32:$val),
(STURWi GPR32:$val, am_unscaled32:$ptr)>;
def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend32:$extend),
GPR32:$val),
(STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend32:$extend),
GPR32:$val),
(STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
def : Pat<(relaxed_store<atomic_store_32>
(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
(STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
def : Pat<(relaxed_store<atomic_store_32>
(am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
(STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
// 64-bit stores
def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
(STLRX GPR64:$val, GPR64sp:$ptr)>;
def : Pat<(relaxed_store<atomic_store_64> ro_indexed64:$ptr, GPR64:$val),
(STRXro GPR64:$val, ro_indexed64:$ptr)>;
def : Pat<(relaxed_store<atomic_store_64> am_indexed64:$ptr, GPR64:$val),
(STRXui GPR64:$val, am_indexed64:$ptr)>;
def : Pat<(relaxed_store<atomic_store_64> am_unscaled64:$ptr, GPR64:$val),
(STURXi GPR64:$val, am_unscaled64:$ptr)>;
def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
ro_Wextend16:$extend),
GPR64:$val),
(STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
ro_Xextend16:$extend),
GPR64:$val),
(STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
def : Pat<(relaxed_store<atomic_store_64>
(am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
(STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
def : Pat<(relaxed_store<atomic_store_64>
(am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
(STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
//===----------------------------------
// Low-level exclusive operations
@ -162,20 +218,20 @@ def ldxr_8 : PatFrag<(ops node:$ptr), (int_arm64_ldxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;
def : Pat<(ldxr_1 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDXRB am_noindex:$addr), sub_32)>;
def : Pat<(ldxr_2 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDXRH am_noindex:$addr), sub_32)>;
def : Pat<(ldxr_4 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDXRW am_noindex:$addr), sub_32)>;
def : Pat<(ldxr_8 am_noindex:$addr), (LDXRX am_noindex:$addr)>;
def : Pat<(ldxr_1 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
def : Pat<(ldxr_2 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
def : Pat<(ldxr_4 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
def : Pat<(and (ldxr_1 am_noindex:$addr), 0xff),
(SUBREG_TO_REG (i64 0), (LDXRB am_noindex:$addr), sub_32)>;
def : Pat<(and (ldxr_2 am_noindex:$addr), 0xffff),
(SUBREG_TO_REG (i64 0), (LDXRH am_noindex:$addr), sub_32)>;
def : Pat<(and (ldxr_4 am_noindex:$addr), 0xffffffff),
(SUBREG_TO_REG (i64 0), (LDXRW am_noindex:$addr), sub_32)>;
def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
(SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
(SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
(SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
// Load-exclusives.
@ -195,20 +251,20 @@ def ldaxr_8 : PatFrag<(ops node:$ptr), (int_arm64_ldaxr node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;
def : Pat<(ldaxr_1 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRB am_noindex:$addr), sub_32)>;
def : Pat<(ldaxr_2 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRH am_noindex:$addr), sub_32)>;
def : Pat<(ldaxr_4 am_noindex:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRW am_noindex:$addr), sub_32)>;
def : Pat<(ldaxr_8 am_noindex:$addr), (LDAXRX am_noindex:$addr)>;
def : Pat<(ldaxr_1 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
def : Pat<(ldaxr_2 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
def : Pat<(ldaxr_4 GPR64sp:$addr),
(SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
def : Pat<(and (ldaxr_1 am_noindex:$addr), 0xff),
(SUBREG_TO_REG (i64 0), (LDAXRB am_noindex:$addr), sub_32)>;
def : Pat<(and (ldaxr_2 am_noindex:$addr), 0xffff),
(SUBREG_TO_REG (i64 0), (LDAXRH am_noindex:$addr), sub_32)>;
def : Pat<(and (ldaxr_4 am_noindex:$addr), 0xffffffff),
(SUBREG_TO_REG (i64 0), (LDAXRW am_noindex:$addr), sub_32)>;
def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
(SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
(SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
(SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
// Store-exclusives.
@ -233,28 +289,28 @@ def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
}]>;
def : Pat<(stxr_1 GPR64:$val, am_noindex:$addr),
(STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_2 GPR64:$val, am_noindex:$addr),
(STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_4 GPR64:$val, am_noindex:$addr),
(STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_8 GPR64:$val, am_noindex:$addr),
(STXRX GPR64:$val, am_noindex:$addr)>;
def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
(STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
(STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
(STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
(STXRX GPR64:$val, GPR64sp:$addr)>;
def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), am_noindex:$addr),
(STXRB GPR32:$val, am_noindex:$addr)>;
def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), am_noindex:$addr),
(STXRH GPR32:$val, am_noindex:$addr)>;
def : Pat<(stxr_4 (zext GPR32:$val), am_noindex:$addr),
(STXRW GPR32:$val, am_noindex:$addr)>;
def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
(STXRB GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
(STXRH GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
(STXRW GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stxr_1 (and GPR64:$val, 0xff), am_noindex:$addr),
(STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_2 (and GPR64:$val, 0xffff), am_noindex:$addr),
(STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), am_noindex:$addr),
(STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
(STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
(STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
(STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
// Store-release-exclusives.
@ -279,28 +335,28 @@ def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
}]>;
def : Pat<(stlxr_1 GPR64:$val, am_noindex:$addr),
(STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_2 GPR64:$val, am_noindex:$addr),
(STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_4 GPR64:$val, am_noindex:$addr),
(STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_8 GPR64:$val, am_noindex:$addr),
(STLXRX GPR64:$val, am_noindex:$addr)>;
def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
(STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
(STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
(STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
(STLXRX GPR64:$val, GPR64sp:$addr)>;
def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), am_noindex:$addr),
(STLXRB GPR32:$val, am_noindex:$addr)>;
def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), am_noindex:$addr),
(STLXRH GPR32:$val, am_noindex:$addr)>;
def : Pat<(stlxr_4 (zext GPR32:$val), am_noindex:$addr),
(STLXRW GPR32:$val, am_noindex:$addr)>;
def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
(STLXRB GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
(STLXRH GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
(STLXRW GPR32:$val, GPR64sp:$addr)>;
def : Pat<(stlxr_1 (and GPR64:$val, 0xff), am_noindex:$addr),
(STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), am_noindex:$addr),
(STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), am_noindex:$addr),
(STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), am_noindex:$addr)>;
def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
(STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
(STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
(STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
// And clear exclusive.

File diff suppressed because it is too large Load Diff

View File

@ -1039,29 +1039,53 @@ bool ARM64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default:
break;
case ARM64::LDRBBro:
case ARM64::LDRBro:
case ARM64::LDRDro:
case ARM64::LDRHHro:
case ARM64::LDRHro:
case ARM64::LDRQro:
case ARM64::LDRSBWro:
case ARM64::LDRSBXro:
case ARM64::LDRSHWro:
case ARM64::LDRSHXro:
case ARM64::LDRSWro:
case ARM64::LDRSro:
case ARM64::LDRWro:
case ARM64::LDRXro:
case ARM64::STRBBro:
case ARM64::STRBro:
case ARM64::STRDro:
case ARM64::STRHHro:
case ARM64::STRHro:
case ARM64::STRQro:
case ARM64::STRSro:
case ARM64::STRWro:
case ARM64::STRXro:
case ARM64::LDRBBroW:
case ARM64::LDRBroW:
case ARM64::LDRDroW:
case ARM64::LDRHHroW:
case ARM64::LDRHroW:
case ARM64::LDRQroW:
case ARM64::LDRSBWroW:
case ARM64::LDRSBXroW:
case ARM64::LDRSHWroW:
case ARM64::LDRSHXroW:
case ARM64::LDRSWroW:
case ARM64::LDRSroW:
case ARM64::LDRWroW:
case ARM64::LDRXroW:
case ARM64::STRBBroW:
case ARM64::STRBroW:
case ARM64::STRDroW:
case ARM64::STRHHroW:
case ARM64::STRHroW:
case ARM64::STRQroW:
case ARM64::STRSroW:
case ARM64::STRWroW:
case ARM64::STRXroW:
case ARM64::LDRBBroX:
case ARM64::LDRBroX:
case ARM64::LDRDroX:
case ARM64::LDRHHroX:
case ARM64::LDRHroX:
case ARM64::LDRQroX:
case ARM64::LDRSBWroX:
case ARM64::LDRSBXroX:
case ARM64::LDRSHWroX:
case ARM64::LDRSHXroX:
case ARM64::LDRSWroX:
case ARM64::LDRSroX:
case ARM64::LDRWroX:
case ARM64::LDRXroX:
case ARM64::STRBBroX:
case ARM64::STRBroX:
case ARM64::STRDroX:
case ARM64::STRHHroX:
case ARM64::STRHroX:
case ARM64::STRQroX:
case ARM64::STRSroX:
case ARM64::STRWroX:
case ARM64::STRXroX:
unsigned Val = MI->getOperand(3).getImm();
ARM64_AM::ShiftExtendType ExtType = ARM64_AM::getMemExtendType(Val);
return (ExtType != ARM64_AM::UXTX) || ARM64_AM::getMemDoShift(Val);

File diff suppressed because it is too large Load Diff

View File

@ -155,6 +155,16 @@ def GPR64sp : RegisterClass<"ARM64", [i64], 64, (add GPR64common, SP)> {
def GPR32sponly : RegisterClass<"ARM64", [i32], 32, (add WSP)>;
def GPR64sponly : RegisterClass<"ARM64", [i64], 64, (add SP)>;
def GPR64spPlus0Operand : AsmOperandClass {
let Name = "GPR64sp0";
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseGPR64sp0Operand";
}
def GPR64sp0 : RegisterOperand<GPR64sp> {
let ParserMatchClass = GPR64spPlus0Operand;
}
// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
// constraint used by any instructions, it is used as a common super-class.
def GPR32all : RegisterClass<"ARM64", [i32], 32, (add GPR32common, WZR, WSP)>;

File diff suppressed because it is too large Load Diff

View File

@ -89,6 +89,8 @@ static DecodeStatus DecodeFixedPointScaleImm64(llvm::MCInst &Inst, unsigned Imm,
const void *Decoder);
static DecodeStatus DecodePCRelLabel19(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMemExtend(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMRSSystemRegister(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMSRSystemRegister(llvm::MCInst &Inst, unsigned Imm,
@ -114,10 +116,6 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeRegOffsetLdStInstruction(llvm::MCInst &Inst,
uint32_t insn,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeAddSubERegInstruction(llvm::MCInst &Inst,
uint32_t insn, uint64_t Address,
const void *Decoder);
@ -605,6 +603,13 @@ static DecodeStatus DecodePCRelLabel19(llvm::MCInst &Inst, unsigned Imm,
return Success;
}
static DecodeStatus DecodeMemExtend(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateImm((Imm >> 1) & 1));
Inst.addOperand(MCOperand::CreateImm(Imm & 1));
return Success;
}
static DecodeStatus DecodeMRSSystemRegister(llvm::MCInst &Inst, unsigned Imm,
uint64_t Address,
const void *Decoder) {
@ -1189,81 +1194,6 @@ static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
return Success;
}
static DecodeStatus DecodeRegOffsetLdStInstruction(llvm::MCInst &Inst,
uint32_t insn, uint64_t Addr,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(insn, 0, 5);
unsigned Rn = fieldFromInstruction(insn, 5, 5);
unsigned Rm = fieldFromInstruction(insn, 16, 5);
unsigned extendHi = fieldFromInstruction(insn, 13, 3);
unsigned extendLo = fieldFromInstruction(insn, 12, 1);
unsigned extend = (extendHi << 1) | extendLo;
// All RO load-store instructions are undefined if option == 00x or 10x.
if (extend >> 2 == 0x0 || extend >> 2 == 0x2)
return Fail;
switch (Inst.getOpcode()) {
default:
return Fail;
case ARM64::LDRSWro:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRXro:
case ARM64::STRXro:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRWro:
case ARM64::STRWro:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRQro:
case ARM64::STRQro:
DecodeFPR128RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRDro:
case ARM64::STRDro:
DecodeFPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRSro:
case ARM64::STRSro:
DecodeFPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRHro:
case ARM64::STRHro:
DecodeFPR16RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRBro:
case ARM64::STRBro:
DecodeFPR8RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRBBro:
case ARM64::STRBBro:
case ARM64::LDRSBWro:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRHHro:
case ARM64::STRHHro:
case ARM64::LDRSHWro:
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRSHXro:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::LDRSBXro:
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
break;
case ARM64::PRFMro:
Inst.addOperand(MCOperand::CreateImm(Rt));
}
DecodeGPR64spRegisterClass(Inst, Rn, Addr, Decoder);
DecodeGPR64RegisterClass(Inst, Rm, Addr, Decoder);
Inst.addOperand(MCOperand::CreateImm(extend));
return Success;
}
static DecodeStatus DecodeAddSubERegInstruction(llvm::MCInst &Inst,
uint32_t insn, uint64_t Addr,
const void *Decoder) {

View File

@ -990,11 +990,11 @@ void ARM64InstPrinter::printShiftedRegister(const MCInst *MI, unsigned OpNum,
void ARM64InstPrinter::printExtendedRegister(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
O << getRegisterName(MI->getOperand(OpNum).getReg());
printExtend(MI, OpNum + 1, O);
printArithExtend(MI, OpNum + 1, O);
}
void ARM64InstPrinter::printExtend(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
void ARM64InstPrinter::printArithExtend(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNum).getImm();
ARM64_AM::ShiftExtendType ExtType = ARM64_AM::getArithExtendType(Val);
unsigned ShiftVal = ARM64_AM::getArithShiftValue(Val);
@ -1019,6 +1019,23 @@ void ARM64InstPrinter::printExtend(const MCInst *MI, unsigned OpNum,
O << " #" << ShiftVal;
}
void ARM64InstPrinter::printMemExtend(const MCInst *MI, unsigned OpNum,
raw_ostream &O, char SrcRegKind,
unsigned Width) {
unsigned SignExtend = MI->getOperand(OpNum).getImm();
unsigned DoShift = MI->getOperand(OpNum + 1).getImm();
// sxtw, sxtx, uxtw or lsl (== uxtx)
bool IsLSL = !SignExtend && SrcRegKind == 'x';
if (IsLSL)
O << "lsl";
else
O << (SignExtend ? 's' : 'u') << "xt" << SrcRegKind;
if (DoShift || IsLSL)
O << " #" << Log2_32(Width / 8);
}
void ARM64InstPrinter::printCondCode(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
ARM64CC::CondCode CC = (ARM64CC::CondCode)MI->getOperand(OpNum).getImm();
@ -1042,18 +1059,15 @@ void ARM64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
O << '#' << Scale * MI->getOperand(OpNum).getImm();
}
void ARM64InstPrinter::printAMIndexed(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO1 = MI->getOperand(OpNum + 1);
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
if (MO1.isImm()) {
if (MO1.getImm() != 0)
O << ", #" << (MO1.getImm() * Scale);
void ARM64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO = MI->getOperand(OpNum);
if (MO.isImm()) {
O << "#" << (MO.getImm() * Scale);
} else {
assert(MO1.isExpr() && "Unexpected operand type!");
O << ", " << *MO1.getExpr();
assert(MO.isExpr() && "Unexpected operand type!");
O << *MO.getExpr();
}
O << ']';
}
void ARM64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
@ -1080,37 +1094,6 @@ void ARM64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
O << '#' << prfop;
}
void ARM64InstPrinter::printMemoryPostIndexed(const MCInst *MI, unsigned OpNum,
raw_ostream &O, unsigned Scale) {
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ']' << ", #"
<< Scale * MI->getOperand(OpNum + 1).getImm();
}
void ARM64InstPrinter::printMemoryRegOffset(const MCInst *MI, unsigned OpNum,
raw_ostream &O, int Scale) {
unsigned Val = MI->getOperand(OpNum + 2).getImm();
ARM64_AM::ShiftExtendType ExtType = ARM64_AM::getMemExtendType(Val);
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ", ";
if (ExtType == ARM64_AM::UXTW || ExtType == ARM64_AM::SXTW)
O << getRegisterName(getWRegFromXReg(MI->getOperand(OpNum + 1).getReg()));
else
O << getRegisterName(MI->getOperand(OpNum + 1).getReg());
bool DoShift = ARM64_AM::getMemDoShift(Val);
if (ExtType == ARM64_AM::UXTX) {
if (DoShift)
O << ", lsl";
} else
O << ", " << ARM64_AM::getShiftExtendName(ExtType);
if (DoShift)
O << " #" << Log2_32(Scale);
O << "]";
}
void ARM64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
const MCOperand &MO = MI->getOperand(OpNum);

View File

@ -62,18 +62,26 @@ protected:
void printShifter(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printShiftedRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printExtendedRegister(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printArithExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O,
char SrcRegKind, unsigned Width);
template <char SrcRegKind, unsigned Width>
void printMemExtend(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
printMemExtend(MI, OpNum, O, SrcRegKind, Width);
}
void printCondCode(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printInverseCondCode(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAlignedLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printAMIndexed(const MCInst *MI, unsigned OpNum, unsigned Scale,
raw_ostream &O);
void printUImm12Offset(const MCInst *MI, unsigned OpNum, unsigned Scale,
raw_ostream &O);
void printAMIndexedWB(const MCInst *MI, unsigned OpNum, unsigned Scale,
raw_ostream &O);
template<int BitWidth>
void printAMIndexed(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
printAMIndexed(MI, OpNum, BitWidth / 8, O);
template<int Scale>
void printUImm12Offset(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
printUImm12Offset(MI, OpNum, Scale, O);
}
template<int BitWidth>
@ -88,21 +96,6 @@ protected:
void printPrefetchOp(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemoryPostIndexed(const MCInst *MI, unsigned OpNum, raw_ostream &O,
unsigned Scale);
template<int BitWidth>
void printMemoryPostIndexed(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
printMemoryPostIndexed(MI, OpNum, O, BitWidth / 8);
}
void printMemoryRegOffset(const MCInst *MI, unsigned OpNum, raw_ostream &O,
int LegalShiftAmt);
template<int BitWidth>
void printMemoryRegOffset(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
printMemoryRegOffset(MI, OpNum, O, BitWidth / 8);
}
void printFPImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O,

View File

@ -56,12 +56,11 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
/// getAMIndexed8OpValue - Return encoding info for base register
/// and 12-bit unsigned immediate attached to a load, store or prfm
/// instruction. If operand requires a relocation, record it and
/// return zero in that part of the encoding.
/// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
/// attached to a load, store or prfm instruction. If operand requires a
/// relocation, record it and return zero in that part of the encoding.
template <uint32_t FixupKind>
uint32_t getAMIndexed8OpValue(const MCInst &MI, unsigned OpIdx,
uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
@ -89,6 +88,13 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
/// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
/// instruction: bit 0 is whether a shift is present, bit 1 is whether the
/// operation is a sign extend (as opposed to a zero extend).
uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
/// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
/// branch target.
uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
@ -221,15 +227,11 @@ ARM64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
return 0;
}
template <uint32_t FixupKind>
uint32_t
ARM64MCCodeEmitter::getAMIndexed8OpValue(const MCInst &MI, unsigned OpIdx,
template<unsigned FixupKind> uint32_t
ARM64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
unsigned BaseReg = MI.getOperand(OpIdx).getReg();
BaseReg = Ctx.getRegisterInfo()->getEncodingValue(BaseReg);
const MCOperand &MO = MI.getOperand(OpIdx + 1);
const MCOperand &MO = MI.getOperand(OpIdx);
uint32_t ImmVal = 0;
if (MO.isImm())
@ -241,7 +243,7 @@ ARM64MCCodeEmitter::getAMIndexed8OpValue(const MCInst &MI, unsigned OpIdx,
++MCNumFixups;
}
return BaseReg | (ImmVal << 5);
return ImmVal;
}
/// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
@ -255,7 +257,7 @@ ARM64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
// If the destination is an immediate, we have nothing to do.
if (MO.isImm())
return MO.getImm();
assert(MO.isExpr() && "Unexpected ADR target type!");
assert(MO.isExpr() && "Unexpected target type!");
const MCExpr *Expr = MO.getExpr();
MCFixupKind Kind = MI.getOpcode() == ARM64::ADR
@ -341,6 +343,15 @@ ARM64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
return 0;
}
uint32_t
ARM64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
unsigned SignExtend = MI.getOperand(OpIdx).getImm();
unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
return (SignExtend << 1) | DoShift;
}
uint32_t
ARM64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,

View File

@ -1803,7 +1803,7 @@
stxrb w2, w3, [x4, #20]
stlxrh w10, w11, [w2]
// CHECK-ERROR-AARCH64: error: expected '#0'
// CHECK-ERROR-ARM64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: index must be absent or #0
// CHECK-ERROR-NEXT: stxrb w2, w3, [x4, #20]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR: error: invalid operand for instruction
@ -1887,7 +1887,8 @@
//------------------------------------------------------------------------------
ldr x3, [x4, #25], #0
ldr x4, [x9, #0], #4
// CHECK-ERROR: error: {{expected symbolic reference or integer|index must be a multiple of 8}} in range [0, 32760]
// CHECK-ERROR-AARCH64: error: {{expected symbolic reference or integer|index must be a multiple of 8}} in range [0, 32760]
// CHECK-ERROR-ARM64: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr x3, [x4, #25], #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
@ -2083,22 +2084,19 @@
strh w9, [sp, #-257]!
str w1, [x19, #256]!
str w9, [sp, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: strb w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strb w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: strh w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: strh w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2111,22 +2109,19 @@
ldrh w9, [sp, #-257]!
ldr w1, [x19, #256]!
ldr w9, [sp, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrb w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrh w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w9, [sp, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr w1, [x19, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2139,22 +2134,19 @@
ldrsh x22, [x13, #-257]!
ldrsw x2, [x3, #256]!
ldrsw x22, [x13, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsb x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb x22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsh x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsh x22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsw x2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2165,15 +2157,13 @@
ldrsb w22, [x13, #-257]!
ldrsh w2, [x3, #256]!
ldrsh w22, [x13, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsb w2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrsb w22, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldrsh w2, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2188,29 +2178,25 @@
str s3, [x13, #-257]!
str d3, [x3, #256]!
str d3, [x13, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str b3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str b3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str h3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str h3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str s3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str s3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: str d3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2225,29 +2211,25 @@
ldr s3, [x13, #-257]!
ldr d3, [x3, #256]!
ldr d3, [x13, #-257]!
// CHECK-ERROR-AARCH64: error: invalid operand for instruction
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr b3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr b3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr h3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr h3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr s3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr s3, [x13, #-257]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: invalid operand for instruction
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr d3, [x3, #256]!
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
@ -2262,20 +2244,16 @@
sttrh w17, [x1, #256]
ldtrsw x20, [x1, #256]
ldtr x12, [sp, #256]
// CHECK-ERROR-AARCH64: error: expected integer in range [-256, 255]
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrb w2, [sp, #256]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: sttrh w17, [x1, #256]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrsw x20, [x1, #256]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtr x12, [sp, #256]
// CHECK-ERROR-NEXT: ^
@ -2290,12 +2268,10 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: sttr b2, [x2, #-257]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtrsb x9, [sp, #-257]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldtr w2, [x30, #-257]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: invalid operand for instruction
@ -2313,24 +2289,19 @@
ldr w0, [x4, #16384]
ldrh w2, [x21, #8192]
ldrb w3, [x12, #4096]
// CHECK-ERROR-AARCH64: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64: error: invalid offset in memory address
// CHECK-ERROR: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr q0, [x11, #65536]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr x0, [sp, #32768]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldr w0, [x4, #16384]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrh w2, [x21, #8192]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: ldrb w3, [x12, #4096]
// CHECK-ERROR-NEXT: ^
@ -2372,8 +2343,7 @@
// CHECK-ERROR-AARCH64-NEXT: error: too few operands for instruction
// CHECK-ERROR-AARCH64-NEXT: str x5, [x22, #12]
// CHECK-ERROR-AARCH64-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: {{expected|index must be an}} integer in range [-256, 255]
// CHECK-ERROR-NEXT: str w7, [x12, #16384]
// CHECK-ERROR-NEXT: ^
@ -2411,92 +2381,78 @@
// CHECK-ERROR-NEXT: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldr w3, [xzr, x3]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected #imm after shift specifier
// CHECK-ERROR-ARM64-NEXT: error: LSL extend requires immediate operand
// CHECK-ERROR-NEXT: error: expected #imm after shift specifier
// CHECK-ERROR-NEXT: ldr w4, [x0, x4, lsl]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-AARCH64-NEXT: ldr w9, [x5, x5, uxtw]
// CHECK-ERROR-AARCH64-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-AARCH64-NEXT: ldr w10, [x6, x9, sxtw #2]
// CHECK-ERROR-AARCH64-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-ARM64-NEXT: error: 32-bit general purpose offset register requires sxtw or uxtw extend
// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: ldr w9, [x5, x5, uxtw]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: ldr w10, [x6, x9, sxtw #2]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: ldr w11, [x7, w2, lsl #2]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-ARM64-NEXT: error: 32-bit general purpose offset register requires sxtw or uxtw extend
// CHECK-ERROR-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: ldr w12, [x8, w1, sxtx]
// CHECK-ERROR-NEXT: ^
ldrsb w9, [x4, x2, lsl #-1]
strb w9, [x4, x2, lsl #1]
// CHECK-ERROR-AARCH64-NEXT: error: expected integer shift amount
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: ldrsb w9, [x4, x2, lsl #-1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0
// CHECK-ERROR-NEXT: strb w9, [x4, x2, lsl #1]
// CHECK-ERROR-NEXT: ^
ldrsh w9, [x4, x2, lsl #-1]
ldr h13, [x4, w2, uxtw #2]
// CHECK-ERROR-AARCH64-NEXT: error: expected integer shift amount
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: ldrsh w9, [x4, x2, lsl #-1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
// CHECK-ERROR-NEXT: ldr h13, [x4, w2, uxtw #2]
// CHECK-ERROR-NEXT: ^
str w9, [x5, w9, sxtw #-1]
str s3, [sp, w9, uxtw #1]
ldrsw x9, [x15, x4, sxtx #3]
// CHECK-ERROR-AARCH64-NEXT: error: expected integer shift amount
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: str w9, [x5, w9, sxtw #-1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: str s3, [sp, w9, uxtw #1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #2
// CHECK-ERROR-NEXT: ldrsw x9, [x15, x4, sxtx #3]
// CHECK-ERROR-NEXT: ^
str xzr, [x5, x9, sxtx #-1]
prfm pldl3keep, [sp, x20, lsl #2]
ldr d3, [x20, wzr, uxtw #4]
// CHECK-ERROR-AARCH64-NEXT: error: expected integer shift amount
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: str xzr, [x5, x9, sxtx #-1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #3
// CHECK-ERROR-ARM64-NEXT: error: expected label or encodable integer pc offset
// CHECK-ERROR-NEXT: error: expected 'lsl' or 'sxtx' with optional shift of #0 or #3
// CHECK-ERROR-NEXT: prfm pldl3keep, [sp, x20, lsl #2]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
// CHECK-ERROR-NEXT: ldr d3, [x20, wzr, uxtw #4]
// CHECK-ERROR-NEXT: ^
ldr q5, [sp, x2, lsl #-1]
ldr q10, [x20, w4, uxtw #2]
str q21, [x20, w4, uxtw #5]
// CHECK-ERROR-AARCH64-NEXT: error: expected integer shift amount
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-NEXT: error: expected integer shift amount
// CHECK-ERROR-NEXT: ldr q5, [sp, x2, lsl #-1]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-ARM64-NEXT: error: invalid offset in memory address
// CHECK-ERROR-ARM64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-NEXT: ldr q10, [x20, w4, uxtw #2]
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: expected 'lsl' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-ARM64-NEXT: error: immediate operand out of range
// CHECK-ERROR-ARM64-NEXT: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
// CHECK-ERROR-NEXT: str q21, [x20, w4, uxtw #5]
// CHECK-ERROR-NEXT: ^
@ -2695,16 +2651,13 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR-NEXT: ldp d3, q2, [sp], #0
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-ARM64-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q3, q5, [sp], #8
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-ARM64-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: stp q20, q25, [x5], #1024
// CHECK-ERROR-NEXT: ^
// CHECK-ERROR-AARCH64-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-ARM64-NEXT: error: {{expected integer|index must be a}} multiple of 8 in range [-512, 504]
// CHECK-ERROR-NEXT: error: {{expected integer|index must be a}} multiple of 16 in range [-1024, 1008]
// CHECK-ERROR-NEXT: ldp q30, q15, [x23], #-1040
// CHECK-ERROR-NEXT: ^

View File

@ -4080,8 +4080,7 @@
// CHECK-ARM64-ERROR: error: vector register expected
// CHECK-ERROR: ld1 {v32.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: invalid operand for instruction
// CHECK-ARM64-ERROR: error: register expected
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld1 {v15.8h}, [x32]
// CHECK-ERROR: ^
@ -4130,8 +4129,7 @@
// CHECK-ARM64-ERROR: error: registers must be sequential
// CHECK-ERROR: ld2 {v0.8b, v2.8b}, [x0]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: invalid operand for instruction
// CHECK-ARM64-ERROR: error: register expected
// CHECK-AARCH64: error: invalid operand for instruction
// CHECK-ERROR: ld2 {v15.4h, v16.4h, v17.4h}, [x32]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: expected the same vector layout
@ -4207,8 +4205,7 @@
// CHECK-ARM64-ERROR: error: vector register expected
// CHECK-ERROR: st1 {v32.16b}, [x0]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: invalid operand for instruction
// CHECK-ARM64-ERROR: error: register expected
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st1 {v15.8h}, [x32]
// CHECK-ERROR: ^
@ -4434,8 +4431,7 @@
// CHECK-ARM64-ERROR: vector lane must be an integer in range
// CHECK-ERROR: st1 {v0.d}[16], [x0]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: invalid operand for instruction
// CHECK-ARM64-ERROR: error: register expected
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st2 {v31.s, v0.s}[3], [8]
// CHECK-ERROR: ^
// CHECK-AARCH64-ERROR: error: expected lane number

View File

@ -9,7 +9,7 @@ foo:
ldr x3, [foo + 4]
; CHECK: ldr x3, foo+4 ; encoding: [0bAAA00011,A,A,0x58]
; CHECK: ; fixup A - offset: 0, value: foo+4, kind: fixup_arm64_ldr_pcrel_imm19
; CHECK-ERRORS: error: register expected
; CHECK-ERRORS: error: invalid operand for instruction
; The last argument should be flagged as an error. rdar://9576009
ld4.8b {v0, v1, v2, v3}, [x0], #33
@ -33,10 +33,10 @@ foo:
ldur x0, [x1, #-257]
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
; CHECK-ERRORS: ldr x0, [x0, #804]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
; CHECK-ERRORS: ldr w0, [x0, #802]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
@ -66,7 +66,7 @@ foo:
; CHECK-ERRORS: error: index must be a multiple of 8 in range [-512, 504].
; CHECK-ERRORS: ldp x3, x4, [x5], #12
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: index must be a multiple of 8 in range [-512, 504].
; CHECK-ERRORS: error: index must be a multiple of 16 in range [-1024, 1008].
; CHECK-ERRORS: ldp q3, q4, [x5], #12
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
@ -84,31 +84,31 @@ ldr s1, [x3, w3, sxtw #4]
ldr d1, [x3, w3, sxtw #4]
ldr q1, [x3, w3, sxtw #1]
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0
; CHECK-ERRORS:ldrb w1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
; CHECK-ERRORS:ldrh w1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
; CHECK-ERRORS:ldr w1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
; CHECK-ERRORS:ldr x1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0
; CHECK-ERRORS:ldr b1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #1
; CHECK-ERRORS:ldr h1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #2
; CHECK-ERRORS:ldr s1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
; CHECK-ERRORS:ldr d1, [x3, w3, sxtw #4]
; CHECK-ERRORS: ^
; CHECK-ERRORS: invalid offset in memory address.
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #4
; CHECK-ERRORS:ldr q1, [x3, w3, sxtw #1]
; CHECK-ERRORS: ^
@ -118,10 +118,10 @@ ldr q1, [x3, w3, sxtw #1]
str d1, [x3, w3, sxtx #3]
ldr s1, [x3, d3, sxtx #2]
; CHECK-ERRORS: 32-bit general purpose offset register requires sxtw or uxtw extend
; CHECK-ERRORS: error: expected 'uxtw' or 'sxtw' with optional shift of #0 or #3
; CHECK-ERRORS: str d1, [x3, w3, sxtx #3]
; CHECK-ERRORS: ^
; CHECK-ERRORS: error: 64-bit general purpose offset register expected
; CHECK-ERRORS: error: index must be an integer in range [-256, 255].
; CHECK-ERRORS: ldr s1, [x3, d3, sxtx #2]
; CHECK-ERRORS: ^

View File

@ -1718,9 +1718,9 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
if (unsigned NumArgs = MIOpInfo->getNumArgs()) {
// But don't do that if the whole operand is being provided by
// a single ComplexPattern.
const ComplexPattern *AM = Child->getComplexPatternInfo(CDP);
if (!AM || AM->getNumOperands() < NumArgs) {
// a single ComplexPattern-related Operand.
if (Child->getNumMIResults(CDP) < NumArgs) {
// Match first sub-operand against the child we already have.
Record *SubRec = cast<DefInit>(MIOpInfo->getArg(0))->getDef();
MadeChange |=