Implement AArch64 post-index vector load/store multiple N-element structure class SIMD(lselem-post).

Including following 14 instructions:
4 ld1 insts: post-index load multiple 1-element structure to sequential 1/2/3/4 registers.
ld2/ld3/ld4: post-index load multiple N-element structure to sequential N registers (N=2,3,4).
4 st1 insts: post-index store multiple 1-element structure from sequential 1/2/3/4 registers.
st2/st3/st4: post-index store multiple N-element structure from sequential N registers (N = 2,3,4).


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194043 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Hao Liu 2013-11-05 03:39:32 +00:00
parent 8263dcdf23
commit 591c2f738a
11 changed files with 1425 additions and 15 deletions

View File

@ -110,10 +110,12 @@ public:
SDNode* Select(SDNode*);
private:
/// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4.
SDNode *SelectVLD(SDNode *N, unsigned NumVecs, const uint16_t *Opcode);
SDNode *SelectVLD(SDNode *N, unsigned NumVecs, bool isUpdating,
const uint16_t *Opcode);
/// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4.
SDNode *SelectVST(SDNode *N, unsigned NumVecs, const uint16_t *Opcodes);
SDNode *SelectVST(SDNode *N, unsigned NumVecs, bool isUpdating,
const uint16_t *Opcodes);
// Form pairs of consecutive 64-bit/128-bit registers.
SDNode *createDPairNode(SDValue V0, SDValue V1);
@ -485,7 +487,88 @@ SDNode *AArch64DAGToDAGISel::createQQuadNode(SDValue V0, SDValue V1, SDValue V2,
Ops);
}
// Get the register stride update opcode of a VLD/VST instruction that
// is otherwise equivalent to the given fixed stride updating instruction.
static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
switch (Opc) {
default: break;
case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register;
case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register;
case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register;
case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register;
case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register;
case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register;
case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register;
case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register;
case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
case AArch64::LD1WB2V_1D_fixed: return AArch64::LD1WB2V_1D_register;
case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register;
case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
case AArch64::LD1WB3V_1D_fixed: return AArch64::LD1WB3V_1D_register;
case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register;
case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
case AArch64::LD1WB4V_1D_fixed: return AArch64::LD1WB4V_1D_register;
case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register;
case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register;
case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register;
case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register;
case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register;
case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
case AArch64::ST1WB2V_1D_fixed: return AArch64::ST1WB2V_1D_register;
case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register;
case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
case AArch64::ST1WB3V_1D_fixed: return AArch64::ST1WB3V_1D_register;
case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register;
case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
case AArch64::ST1WB4V_1D_fixed: return AArch64::ST1WB4V_1D_register;
case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
}
return Opc; // If not one we handle, return it unchanged.
}
SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
bool isUpdating,
const uint16_t *Opcodes) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
@ -510,7 +593,16 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
unsigned Opc = Opcodes[OpcodeIndex];
SmallVector<SDValue, 2> Ops;
Ops.push_back(N->getOperand(2)); // Push back the Memory Address
unsigned AddrOpIdx = isUpdating ? 1 : 2;
Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
if (isUpdating) {
SDValue Inc = N->getOperand(AddrOpIdx + 1);
if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
Opc = getVLDSTRegisterUpdateOpcode(Opc);
Ops.push_back(Inc);
}
Ops.push_back(N->getOperand(0)); // Push back the Chain
std::vector<EVT> ResTys;
@ -526,6 +618,8 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
ResTys.push_back(ResTy);
}
if (isUpdating)
ResTys.push_back(MVT::i64); // Type of the updated register
ResTys.push_back(MVT::Other); // Type of the Chain
SDLoc dl(N);
SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
@ -548,11 +642,14 @@ SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
// Update users of the Chain
ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
return NULL;
}
SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
bool isUpdating,
const uint16_t *Opcodes) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
SDLoc dl(N);
@ -560,7 +657,8 @@ SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
unsigned Vec0Idx = 3;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
EVT VT = N->getOperand(Vec0Idx).getValueType();
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@ -582,11 +680,19 @@ SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
unsigned Opc = Opcodes[OpcodeIndex];
std::vector<EVT> ResTys;
if (isUpdating)
ResTys.push_back(MVT::i64);
ResTys.push_back(MVT::Other); // Type for the Chain
SmallVector<SDValue, 6> Ops;
Ops.push_back(N->getOperand(2)); // Push back the Memory Address
Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
if (isUpdating) {
SDValue Inc = N->getOperand(AddrOpIdx + 1);
if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
Opc = getVLDSTRegisterUpdateOpcode(Opc);
Ops.push_back(Inc);
}
bool is64BitVector = VT.is64BitVector();
SDValue V0 = N->getOperand(Vec0Idx + 0);
@ -768,6 +874,78 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
Node = ResNode;
break;
}
case AArch64ISD::NEON_LD1_UPD: {
static const uint16_t Opcodes[] = {
AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed,
AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed,
AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed,
AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed
};
return SelectVLD(Node, 1, true, Opcodes);
}
case AArch64ISD::NEON_LD2_UPD: {
static const uint16_t Opcodes[] = {
AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed,
AArch64::LD2WB_2S_fixed, AArch64::LD1WB2V_1D_fixed,
AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed
};
return SelectVLD(Node, 2, true, Opcodes);
}
case AArch64ISD::NEON_LD3_UPD: {
static const uint16_t Opcodes[] = {
AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed,
AArch64::LD3WB_2S_fixed, AArch64::LD1WB3V_1D_fixed,
AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed
};
return SelectVLD(Node, 3, true, Opcodes);
}
case AArch64ISD::NEON_LD4_UPD: {
static const uint16_t Opcodes[] = {
AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed,
AArch64::LD4WB_2S_fixed, AArch64::LD1WB4V_1D_fixed,
AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed
};
return SelectVLD(Node, 4, true, Opcodes);
}
case AArch64ISD::NEON_ST1_UPD: {
static const uint16_t Opcodes[] = {
AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed,
AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed,
AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed,
AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed
};
return SelectVST(Node, 1, true, Opcodes);
}
case AArch64ISD::NEON_ST2_UPD: {
static const uint16_t Opcodes[] = {
AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed,
AArch64::ST2WB_2S_fixed, AArch64::ST1WB2V_1D_fixed,
AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed
};
return SelectVST(Node, 2, true, Opcodes);
}
case AArch64ISD::NEON_ST3_UPD: {
static const uint16_t Opcodes[] = {
AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed,
AArch64::ST3WB_2S_fixed, AArch64::ST1WB3V_1D_fixed,
AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed
};
return SelectVST(Node, 3, true, Opcodes);
}
case AArch64ISD::NEON_ST4_UPD: {
static const uint16_t Opcodes[] = {
AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed,
AArch64::ST4WB_2S_fixed, AArch64::ST1WB4V_1D_fixed,
AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed
};
return SelectVST(Node, 4, true, Opcodes);
}
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
@ -780,56 +958,56 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
AArch64::LD1_2S, AArch64::LD1_1D,
AArch64::LD1_16B, AArch64::LD1_8H,
AArch64::LD1_4S, AArch64::LD1_2D };
return SelectVLD(Node, 1, Opcodes);
return SelectVLD(Node, 1, false, Opcodes);
}
case Intrinsic::arm_neon_vld2: {
static const uint16_t Opcodes[] = { AArch64::LD2_8B, AArch64::LD2_4H,
AArch64::LD2_2S, AArch64::LD1_2V_1D,
AArch64::LD2_16B, AArch64::LD2_8H,
AArch64::LD2_4S, AArch64::LD2_2D };
return SelectVLD(Node, 2, Opcodes);
return SelectVLD(Node, 2, false, Opcodes);
}
case Intrinsic::arm_neon_vld3: {
static const uint16_t Opcodes[] = { AArch64::LD3_8B, AArch64::LD3_4H,
AArch64::LD3_2S, AArch64::LD1_3V_1D,
AArch64::LD3_16B, AArch64::LD3_8H,
AArch64::LD3_4S, AArch64::LD3_2D };
return SelectVLD(Node, 3, Opcodes);
return SelectVLD(Node, 3, false, Opcodes);
}
case Intrinsic::arm_neon_vld4: {
static const uint16_t Opcodes[] = { AArch64::LD4_8B, AArch64::LD4_4H,
AArch64::LD4_2S, AArch64::LD1_4V_1D,
AArch64::LD4_16B, AArch64::LD4_8H,
AArch64::LD4_4S, AArch64::LD4_2D };
return SelectVLD(Node, 4, Opcodes);
return SelectVLD(Node, 4, false, Opcodes);
}
case Intrinsic::arm_neon_vst1: {
static const uint16_t Opcodes[] = { AArch64::ST1_8B, AArch64::ST1_4H,
AArch64::ST1_2S, AArch64::ST1_1D,
AArch64::ST1_16B, AArch64::ST1_8H,
AArch64::ST1_4S, AArch64::ST1_2D };
return SelectVST(Node, 1, Opcodes);
return SelectVST(Node, 1, false, Opcodes);
}
case Intrinsic::arm_neon_vst2: {
static const uint16_t Opcodes[] = { AArch64::ST2_8B, AArch64::ST2_4H,
AArch64::ST2_2S, AArch64::ST1_2V_1D,
AArch64::ST2_16B, AArch64::ST2_8H,
AArch64::ST2_4S, AArch64::ST2_2D };
return SelectVST(Node, 2, Opcodes);
return SelectVST(Node, 2, false, Opcodes);
}
case Intrinsic::arm_neon_vst3: {
static const uint16_t Opcodes[] = { AArch64::ST3_8B, AArch64::ST3_4H,
AArch64::ST3_2S, AArch64::ST1_3V_1D,
AArch64::ST3_16B, AArch64::ST3_8H,
AArch64::ST3_4S, AArch64::ST3_2D };
return SelectVST(Node, 3, Opcodes);
return SelectVST(Node, 3, false, Opcodes);
}
case Intrinsic::arm_neon_vst4: {
static const uint16_t Opcodes[] = { AArch64::ST4_8B, AArch64::ST4_4H,
AArch64::ST4_2S, AArch64::ST1_4V_1D,
AArch64::ST4_16B, AArch64::ST4_8H,
AArch64::ST4_4S, AArch64::ST4_2D };
return SelectVST(Node, 4, Opcodes);
return SelectVST(Node, 4, false, Opcodes);
}
}
break;

View File

@ -90,6 +90,8 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
// AArch64 does not have i1 loads, or much of anything for i1 really.
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@ -889,6 +891,22 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
return "AArch64ISD::NEON_VDUP";
case AArch64ISD::NEON_VDUPLANE:
return "AArch64ISD::NEON_VDUPLANE";
case AArch64ISD::NEON_LD1_UPD:
return "AArch64ISD::NEON_LD1_UPD";
case AArch64ISD::NEON_LD2_UPD:
return "AArch64ISD::NEON_LD2_UPD";
case AArch64ISD::NEON_LD3_UPD:
return "AArch64ISD::NEON_LD3_UPD";
case AArch64ISD::NEON_LD4_UPD:
return "AArch64ISD::NEON_LD4_UPD";
case AArch64ISD::NEON_ST1_UPD:
return "AArch64ISD::NEON_ST1_UPD";
case AArch64ISD::NEON_ST2_UPD:
return "AArch64ISD::NEON_ST2_UPD";
case AArch64ISD::NEON_ST3_UPD:
return "AArch64ISD::NEON_ST3_UPD";
case AArch64ISD::NEON_ST4_UPD:
return "AArch64ISD::NEON_ST4_UPD";
default:
return NULL;
}
@ -3448,6 +3466,108 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
/// Target-specific DAG combine function for NEON load/store intrinsics
/// to merge base address updates.
static SDValue CombineBaseUpdate(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
unsigned AddrOpIdx = 2;
SDValue Addr = N->getOperand(AddrOpIdx);
// Search for a use of the address operand that is an increment.
for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
SDNode *User = *UI;
if (User->getOpcode() != ISD::ADD ||
UI.getUse().getResNo() != Addr.getResNo())
continue;
// Check that the add is independent of the load/store. Otherwise, folding
// it would create a cycle.
if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
continue;
// Find the new opcode for the updating load/store.
bool isLoad = true;
unsigned NewOpc = 0;
unsigned NumVecs = 0;
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IntNo) {
default: llvm_unreachable("unexpected intrinsic for Neon base update");
case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
NumVecs = 1; break;
case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
NumVecs = 2; break;
case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
NumVecs = 3; break;
case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
NumVecs = 4; break;
case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
NumVecs = 1; isLoad = false; break;
case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
NumVecs = 2; isLoad = false; break;
case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
NumVecs = 3; isLoad = false; break;
case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
NumVecs = 4; isLoad = false; break;
}
// Find the size of memory referenced by the load/store.
EVT VecTy;
if (isLoad)
VecTy = N->getValueType(0);
else
VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
// If the increment is a constant, it must match the memory ref size.
SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
uint32_t IncVal = CInc->getZExtValue();
if (IncVal != NumBytes)
continue;
Inc = DAG.getTargetConstant(IncVal, MVT::i32);
}
// Create the new updating load/store node.
EVT Tys[6];
unsigned NumResultVecs = (isLoad ? NumVecs : 0);
unsigned n;
for (n = 0; n < NumResultVecs; ++n)
Tys[n] = VecTy;
Tys[n++] = MVT::i64;
Tys[n] = MVT::Other;
SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
SmallVector<SDValue, 8> Ops;
Ops.push_back(N->getOperand(0)); // incoming chain
Ops.push_back(N->getOperand(AddrOpIdx));
Ops.push_back(Inc);
for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
Ops.push_back(N->getOperand(i));
}
MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
Ops.data(), Ops.size(),
MemInt->getMemoryVT(),
MemInt->getMemOperand());
// Update the uses.
std::vector<SDValue> NewResults;
for (unsigned i = 0; i < NumResultVecs; ++i) {
NewResults.push_back(SDValue(UpdN.getNode(), i));
}
NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
DCI.CombineTo(N, NewResults);
DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
break;
}
return SDValue();
}
SDValue
AArch64TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
@ -3461,6 +3581,21 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return PerformShiftCombine(N, DCI, getSubtarget());
case ISD::INTRINSIC_WO_CHAIN:
return PerformIntrinsicCombine(N, DCI.DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
case Intrinsic::arm_neon_vld1:
case Intrinsic::arm_neon_vld2:
case Intrinsic::arm_neon_vld3:
case Intrinsic::arm_neon_vld4:
case Intrinsic::arm_neon_vst1:
case Intrinsic::arm_neon_vst2:
case Intrinsic::arm_neon_vst3:
case Intrinsic::arm_neon_vst4:
return CombineBaseUpdate(N, DCI);
default:
break;
}
}
return SDValue();
}

View File

@ -142,7 +142,19 @@ namespace AArch64ISD {
NEON_VDUP,
// Vector dup by lane
NEON_VDUPLANE
NEON_VDUPLANE,
// NEON loads with post-increment base updates:
NEON_LD1_UPD = ISD::FIRST_TARGET_MEMORY_OPCODE,
NEON_LD2_UPD,
NEON_LD3_UPD,
NEON_LD4_UPD,
// NEON stores with post-increment base updates:
NEON_ST1_UPD,
NEON_ST2_UPD,
NEON_ST3_UPD,
NEON_ST4_UPD
};
}

View File

@ -120,6 +120,14 @@ class A64InstRdnm<dag outs, dag ins, string asmstr,
let Inst{20-16} = Rm;
}
class A64InstRtnm<dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
: A64InstRtn<outs, ins, asmstr, patterns, itin> {
bits<5> Rm;
let Inst{20-16} = Rm;
}
//===----------------------------------------------------------------------===//
//
// Actual A64 Instruction Formats
@ -1216,6 +1224,24 @@ class NeonI_LdStMult<bit q, bit l, bits<4> opcode, bits<2> size,
// Inherit Rt in 4-0
}
// Format AdvSIMD vector load/store multiple N-element structure (post-index)
class NeonI_LdStMult_Post<bit q, bit l, bits<4> opcode, bits<2> size,
dag outs, dag ins, string asmstr,
list<dag> patterns, InstrItinClass itin>
: A64InstRtnm<outs, ins, asmstr, patterns, itin>
{
let Inst{31} = 0b0;
let Inst{30} = q;
let Inst{29-23} = 0b0011001;
let Inst{22} = l;
let Inst{21} = 0b0;
// Inherit Rm in 20-16
let Inst{15-12} = opcode;
let Inst{11-10} = size;
// Inherit Rn in 9-5
// Inherit Rt in 4-0
}
// Format AdvSIMD 3 scalar registers with different type
class NeonI_Scalar3Diff<bit u, bits<2> size, bits<4> opcode,

View File

@ -3088,6 +3088,230 @@ def ST1_4V_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
// End of vector load/store multiple N-element structure(class SIMD lselem)
// The followings are post-index vector load/store multiple N-element
// structure(class SIMD lselem-post)
def exact8_asmoperand : AsmOperandClass {
let Name = "Exact8";
let PredicateMethod = "isExactImm<8>";
let RenderMethod = "addImmOperands";
}
def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
let ParserMatchClass = exact8_asmoperand;
}
def exact16_asmoperand : AsmOperandClass {
let Name = "Exact16";
let PredicateMethod = "isExactImm<16>";
let RenderMethod = "addImmOperands";
}
def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
let ParserMatchClass = exact16_asmoperand;
}
def exact24_asmoperand : AsmOperandClass {
let Name = "Exact24";
let PredicateMethod = "isExactImm<24>";
let RenderMethod = "addImmOperands";
}
def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
let ParserMatchClass = exact24_asmoperand;
}
def exact32_asmoperand : AsmOperandClass {
let Name = "Exact32";
let PredicateMethod = "isExactImm<32>";
let RenderMethod = "addImmOperands";
}
def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
let ParserMatchClass = exact32_asmoperand;
}
def exact48_asmoperand : AsmOperandClass {
let Name = "Exact48";
let PredicateMethod = "isExactImm<48>";
let RenderMethod = "addImmOperands";
}
def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
let ParserMatchClass = exact48_asmoperand;
}
def exact64_asmoperand : AsmOperandClass {
let Name = "Exact64";
let PredicateMethod = "isExactImm<64>";
let RenderMethod = "addImmOperands";
}
def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
let ParserMatchClass = exact64_asmoperand;
}
multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
RegisterOperand VecList, Operand ImmTy,
string asmop> {
let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
DecoderMethod = "DecodeVLDSTPostInstruction" in {
def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
(outs VecList:$Rt, GPR64xsp:$wb),
(ins GPR64xsp:$Rn, ImmTy:$amt),
asmop # "\t$Rt, [$Rn], $amt",
[],
NoItinerary> {
let Rm = 0b11111;
}
def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
(outs VecList:$Rt, GPR64xsp:$wb),
(ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
asmop # "\t$Rt, [$Rn], $Rm",
[],
NoItinerary>;
}
}
multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
Operand ImmTy2, string asmop> {
defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
!cast<RegisterOperand>(List # "8B_operand"),
ImmTy, asmop>;
defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
!cast<RegisterOperand>(List # "4H_operand"),
ImmTy, asmop>;
defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
!cast<RegisterOperand>(List # "2S_operand"),
ImmTy, asmop>;
defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
!cast<RegisterOperand>(List # "16B_operand"),
ImmTy2, asmop>;
defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
!cast<RegisterOperand>(List # "8H_operand"),
ImmTy2, asmop>;
defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
!cast<RegisterOperand>(List # "4S_operand"),
ImmTy2, asmop>;
defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
!cast<RegisterOperand>(List # "2D_operand"),
ImmTy2, asmop>;
}
// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
"ld1">;
defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
"ld3">;
defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
// Post-index load multiple 1-element structures from N consecutive registers
// (N = 2,3,4)
defm LD1WB2V : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
"ld1">;
defm LD1WB2V_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
uimm_exact16, "ld1">;
defm LD1WB3V : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
"ld1">;
defm LD1WB3V_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
uimm_exact24, "ld1">;
defm LD1WB_4V : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
"ld1">;
defm LD1WB4V_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
uimm_exact32, "ld1">;
multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
RegisterOperand VecList, Operand ImmTy,
string asmop> {
let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
DecoderMethod = "DecodeVLDSTPostInstruction" in {
def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
(outs GPR64xsp:$wb),
(ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
asmop # "\t$Rt, [$Rn], $amt",
[],
NoItinerary> {
let Rm = 0b11111;
}
def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
(outs GPR64xsp:$wb),
(ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
asmop # "\t$Rt, [$Rn], $Rm",
[],
NoItinerary>;
}
}
multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
Operand ImmTy2, string asmop> {
defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
!cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
!cast<RegisterOperand>(List # "4H_operand"),
ImmTy, asmop>;
defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
!cast<RegisterOperand>(List # "2S_operand"),
ImmTy, asmop>;
defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
!cast<RegisterOperand>(List # "16B_operand"),
ImmTy2, asmop>;
defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
!cast<RegisterOperand>(List # "8H_operand"),
ImmTy2, asmop>;
defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
!cast<RegisterOperand>(List # "4S_operand"),
ImmTy2, asmop>;
defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
!cast<RegisterOperand>(List # "2D_operand"),
ImmTy2, asmop>;
}
// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
"st1">;
defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
"st3">;
defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
// Post-index load multiple 1-element structures from N consecutive registers
// (N = 2,3,4)
defm ST1WB2V : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
"st1">;
defm ST1WB2V_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
uimm_exact16, "st1">;
defm ST1WB3V : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
"st1">;
defm ST1WB3V_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
uimm_exact24, "st1">;
defm ST1WB4V : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
"st1">;
defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
uimm_exact32, "st1">;
// End of post-index vector load/store multiple N-element structure
// (class SIMD lselem-post)
// Scalar Three Same
class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,

View File

@ -815,6 +815,17 @@ public:
return true;
}
// if value == N, return true
template<int N>
bool isExactImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
return CE->getValue() == N;
}
static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
unsigned ShiftAmount,
bool ImplicitAmount,

View File

@ -89,6 +89,11 @@ static DecodeStatus DecodeFPR128LoRegisterClass(llvm::MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
@ -223,6 +228,9 @@ static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Val,
uint64_t Address,
const void *Decoder);
static bool Check(DecodeStatus &Out, DecodeStatus In);
@ -392,6 +400,18 @@ DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder);
}
static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
if (RegNo >= 30)
return MCDisassembler::Fail;
uint16_t Register = getReg(Decoder, AArch64::GPR64noxzrRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Register));
return MCDisassembler::Success;
}
static DecodeStatus DecodeRegisterClassByID(llvm::MCInst &Inst, unsigned RegNo,
unsigned RegID,
const void *Decoder) {
@ -984,3 +1004,91 @@ DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
return MCDisassembler::Success;
}
// Decode post-index vector load/store instructions.
// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
// operand is an immediate equal the the length of vector list in bytes,
// or Rm is decoded to a GPR64noxzr register.
static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder) {
unsigned Rt = fieldFromInstruction(Insn, 0, 5);
unsigned Rn = fieldFromInstruction(Insn, 5, 5);
unsigned Rm = fieldFromInstruction(Insn, 16, 5);
unsigned Opcode = fieldFromInstruction(Insn, 12, 4);
unsigned IsLoad = fieldFromInstruction(Insn, 22, 1);
// 0 for 64bit vector list, 1 for 128bit vector list
unsigned Is128BitVec = fieldFromInstruction(Insn, 30, 1);
unsigned NumVecs;
switch (Opcode) {
case 0: // ld4/st4
case 2: // ld1/st1 with 4 vectors
NumVecs = 4; break;
case 4: // ld3/st3
case 6: // ld1/st1 with 3 vectors
NumVecs = 3; break;
case 7: // ld1/st1 with 1 vector
NumVecs = 1; break;
case 8: // ld2/st2
case 10: // ld1/st1 with 2 vectors
NumVecs = 2; break;
default:
llvm_unreachable("Invalid opcode for post-index load/store instructions");
}
// Decode vector list of 1/2/3/4 vectors for load instructions.
if (IsLoad) {
switch (NumVecs) {
case 1:
Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
: DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
break;
case 2:
Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
break;
case 3:
Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
break;
case 4:
Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
break;
}
}
// Decode write back register, which is equal to Rn.
DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
if (Rm == 31) // If Rm is 0x11111, add the vector list length in byte
Inst.addOperand(MCOperand::CreateImm(NumVecs * (Is128BitVec ? 16 : 8)));
else // Decode Rm
DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
// Decode vector list of 1/2/3/4 vectors for load instructions.
if (!IsLoad) {
switch (NumVecs) {
case 1:
Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
: DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
break;
case 2:
Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
break;
case 3:
Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
break;
case 4:
Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
: DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
break;
}
}
return MCDisassembler::Success;
}

View File

@ -0,0 +1,200 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
; CHECK: test_vld1_fx_update
; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 2)
%tmp2 = getelementptr i16* %A, i32 4
store i16* %tmp2, i16** %ptr
ret <4 x i16> %tmp1
}
define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
; CHECK: test_vld1_reg_update
; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 4)
%tmp2 = getelementptr i32* %A, i32 %inc
store i32* %tmp2, i32** %ptr
ret <2 x i32> %tmp1
}
define <2 x float> @test_vld2_fx_update(float** %ptr) nounwind {
; CHECK: test_vld2_fx_update
; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
%A = load float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 4)
%tmp2 = extractvalue { <2 x float>, <2 x float> } %tmp1, 0
%tmp3 = getelementptr float* %A, i32 4
store float* %tmp3, float** %ptr
ret <2 x float> %tmp2
}
define <16 x i8> @test_vld2_reg_update(i8** %ptr, i32 %inc) nounwind {
; CHECK: test_vld2_reg_update
; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
%A = load i8** %ptr
%tmp0 = call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %A, i32 1)
%tmp1 = extractvalue { <16 x i8>, <16 x i8> } %tmp0, 0
%tmp2 = getelementptr i8* %A, i32 %inc
store i8* %tmp2, i8** %ptr
ret <16 x i8> %tmp1
}
define <4 x i32> @test_vld3_fx_update(i32** %ptr) nounwind {
; CHECK: test_vld3_fx_update
; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #48
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 4)
%tmp2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %tmp1, 0
%tmp3 = getelementptr i32* %A, i32 12
store i32* %tmp3, i32** %ptr
ret <4 x i32> %tmp2
}
define <4 x i16> @test_vld3_reg_update(i16** %ptr, i32 %inc) nounwind {
; CHECK: test_vld3_reg_update
; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 2)
%tmp2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %tmp1, 0
%tmp3 = getelementptr i16* %A, i32 %inc
store i16* %tmp3, i16** %ptr
ret <4 x i16> %tmp2
}
define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind {
; CHECK: test_vld4_fx_update
; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], #64
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
%tmp2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %tmp1, 0
%tmp3 = getelementptr i16* %A, i32 32
store i16* %tmp3, i16** %ptr
ret <8 x i16> %tmp2
}
;Check for a post-increment updating load with register increment.
define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
; CHECK: test_vld4_reg_update
; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
%A = load i8** %ptr
%tmp0 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %A, i32 1)
%tmp1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %tmp0, 0
%tmp2 = getelementptr i8* %A, i32 %inc
store i8* %tmp2, i8** %ptr
ret <8 x i8> %tmp1
}
;Check for a post-increment updating store.
define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
; CHECK: test_vst1_fx_update
; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
%A = load float** %ptr
%tmp0 = bitcast float* %A to i8*
call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %B, i32 4)
%tmp2 = getelementptr float* %A, i32 2
store float* %tmp2, float** %ptr
ret void
}
define void @test_vst1_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
; CHECK: test_vst1_reg_update
; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %B, i32 2)
%tmp1 = getelementptr i16* %A, i32 %inc
store i16* %tmp1, i16** %ptr
ret void
}
define void @test_vst2_fx_update(i64** %ptr, <1 x i64> %B) nounwind {
; CHECK: test_vst2_fx_update
; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}], #16
%A = load i64** %ptr
%tmp0 = bitcast i64* %A to i8*
call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %B, <1 x i64> %B, i32 8)
%tmp1 = getelementptr i64* %A, i32 2
store i64* %tmp1, i64** %ptr
ret void
}
define void @test_vst2_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
; CHECK: test_vst2_reg_update
; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
%A = load i8** %ptr
call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, i32 4)
%tmp0 = getelementptr i8* %A, i32 %inc
store i8* %tmp0, i8** %ptr
ret void
}
define void @test_vst3_fx_update(i32** %ptr, <2 x i32> %B) nounwind {
; CHECK: test_vst3_fx_update
; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #24
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %B, <2 x i32> %B, <2 x i32> %B, i32 4)
%tmp1 = getelementptr i32* %A, i32 6
store i32* %tmp1, i32** %ptr
ret void
}
define void @test_vst3_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
; CHECK: test_vst3_reg_update
; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %B, <8 x i16> %B, <8 x i16> %B, i32 2)
%tmp1 = getelementptr i16* %A, i32 %inc
store i16* %tmp1, i16** %ptr
ret void
}
define void @test_vst4_fx_update(float** %ptr, <4 x float> %B) nounwind {
; CHECK: test_vst4_fx_update
; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}], #64
%A = load float** %ptr
%tmp0 = bitcast float* %A to i8*
call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %B, <4 x float> %B, <4 x float> %B, <4 x float> %B, i32 4)
%tmp1 = getelementptr float* %A, i32 16
store float* %tmp1, float** %ptr
ret void
}
define void @test_vst4_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
; CHECK: test_vst4_reg_update
; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
%A = load i8** %ptr
call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, i32 1)
%tmp0 = getelementptr i8* %A, i32 %inc
store i8* %tmp0, i8** %ptr
ret void
}
declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)

View File

@ -4102,6 +4102,62 @@
// CHECK-ERROR: st4 {v31.2s-v1.2s}, [x31]
// CHECK-ERROR: ^
//----------------------------------------------------------------------
// Vector post-index load/store multiple N-element structure
// (class SIMD lselem-post)
//----------------------------------------------------------------------
ld1 {v0.16b}, [x0], #8
ld1 {v0.8h, v1.16h}, [x0], x1
ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], #24
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld1 {v0.16b}, [x0], #8
// CHECK-ERROR: ^
// CHECK-ERROR: error: expected vector type register
// CHECK-ERROR: ld1 {v0.8h, v1.16h}, [x0], x1
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], #24
// CHECK-ERROR: ^
ld2 {v0.16b, v1.16b}, [x0], #16
ld3 {v5.2s, v6.2s, v7.2s}, [x1], #48
ld4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld2 {v0.16b, v1.16b}, [x0], #16
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: ld3 {v5.2s, v6.2s, v7.2s}, [x1], #48
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid space between two vectors
// CHECK-ERROR: ld4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: ^
st1 {v0.16b}, [x0], #8
st1 {v0.8h, v1.16h}, [x0], x1
st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], #24
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st1 {v0.16b}, [x0], #8
// CHECK-ERROR: ^
// CHECK-ERROR: error: expected vector type register
// CHECK-ERROR: st1 {v0.8h, v1.16h}, [x0], x1
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], #24
^
st2 {v0.16b, v1.16b}, [x0], #16
st3 {v5.2s, v6.2s, v7.2s}, [x1], #48
st4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st2 {v0.16b, v1.16b}, [x0], #16
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: st3 {v5.2s, v6.2s, v7.2s}, [x1], #48
// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid space between two vectors
// CHECK-ERROR: st4 {v31.2d, v0.2d, v1.2d, v2.1d}, [x3], x1
// CHECK-ERROR: ^
ins v2.b[16], w1
ins v7.h[8], w14
ins v20.s[5], w30

View File

@ -0,0 +1,389 @@
// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
// Check that the assembler can handle the documented syntax for AArch64
//------------------------------------------------------------------------------
// Load multiple 1-element structures from one register (post-index)
//------------------------------------------------------------------------------
ld1 {v0.16b}, [x0], x1
ld1 {v15.8h}, [x15], x2
ld1 {v31.4s}, [sp], #16
ld1 {v0.2d}, [x0], #16
ld1 {v0.8b}, [x0], x2
ld1 {v15.4h}, [x15], x3
ld1 {v31.2s}, [sp], #8
ld1 {v0.1d}, [x0], #8
// CHECK: ld1 {v0.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x70,0xc1,0x4c]
// CHECK: ld1 {v15.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x75,0xc2,0x4c]
// CHECK: ld1 {v31.4s}, [sp], #16
// CHECK: // encoding: [0xff,0x7b,0xdf,0x4c]
// CHECK: ld1 {v0.2d}, [x0], #16
// CHECK: // encoding: [0x00,0x7c,0xdf,0x4c]
// CHECK: ld1 {v0.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x70,0xc2,0x0c]
// CHECK: ld1 {v15.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x75,0xc3,0x0c]
// CHECK: ld1 {v31.2s}, [sp], #8
// CHECK: // encoding: [0xff,0x7b,0xdf,0x0c]
// CHECK: ld1 {v0.1d}, [x0], #8
// CHECK: // encoding: [0x00,0x7c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld1 {v0.16b, v1.16b}, [x0], x1
ld1 {v15.8h, v16.8h}, [x15], x2
ld1 {v31.4s, v0.4s}, [sp], #32
ld1 {v0.2d, v1.2d}, [x0], #32
ld1 {v0.8b, v1.8b}, [x0], x2
ld1 {v15.4h, v16.4h}, [x15], x3
ld1 {v31.2s, v0.2s}, [sp], #16
ld1 {v0.1d, v1.1d}, [x0], #16
// CHECK: ld1 {v0.16b, v1.16b}, [x0], x1
// CHECK: // encoding: [0x00,0xa0,0xc1,0x4c]
// CHECK: ld1 {v15.8h, v16.8h}, [x15], x2
// CHECK: // encoding: [0xef,0xa5,0xc2,0x4c]
// CHECK: ld1 {v31.4s, v0.4s}, [sp], #32
// CHECK: // encoding: [0xff,0xab,0xdf,0x4c]
// CHECK: ld1 {v0.2d, v1.2d}, [x0], #32
// CHECK: // encoding: [0x00,0xac,0xdf,0x4c]
// CHECK: ld1 {v0.8b, v1.8b}, [x0], x2
// CHECK: // encoding: [0x00,0xa0,0xc2,0x0c]
// CHECK: ld1 {v15.4h, v16.4h}, [x15], x3
// CHECK: // encoding: [0xef,0xa5,0xc3,0x0c]
// CHECK: ld1 {v31.2s, v0.2s}, [sp], #16
// CHECK: // encoding: [0xff,0xab,0xdf,0x0c]
// CHECK: ld1 {v0.1d, v1.1d}, [x0], #16
// CHECK: // encoding: [0x00,0xac,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld1 {v0.16b, v1.16b, v2.16b}, [x0], x1
ld1 {v15.8h, v16.8h, v17.8h}, [x15], x2
ld1 {v31.4s, v0.4s, v1.4s}, [sp], #48
ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
ld1 {v0.8b, v1.8b, v2.8b}, [x0], x2
ld1 {v15.4h, v16.4h, v17.4h}, [x15], x3
ld1 {v31.2s, v0.2s, v1.2s}, [sp], #24
ld1 {v0.1d, v1.1d, v2.1d}, [x0], #24
// CHECK: ld1 {v0.16b, v1.16b, v2.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x60,0xc1,0x4c]
// CHECK: ld1 {v15.8h, v16.8h, v17.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x65,0xc2,0x4c]
// CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp], #48
// CHECK: // encoding: [0xff,0x6b,0xdf,0x4c]
// CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
// CHECK: // encoding: [0x00,0x6c,0xdf,0x4c]
// CHECK: ld1 {v0.8b, v1.8b, v2.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x60,0xc2,0x0c]
// CHECK: ld1 {v15.4h, v16.4h, v17.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x65,0xc3,0x0c]
// CHECK: ld1 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: // encoding: [0xff,0x6b,0xdf,0x0c]
// CHECK: ld1 {v0.1d, v1.1d, v2.1d}, [x0], #24
// CHECK: // encoding: [0x00,0x6c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 1-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
// CHECK: ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x20,0xc1,0x4c]
// CHECK: ld1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x25,0xc2,0x4c]
// CHECK: ld1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
// CHECK: // encoding: [0xff,0x2b,0xdf,0x4c]
// CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
// CHECK: // encoding: [0x00,0x2c,0xdf,0x4c]
// CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
// CHECK: // encoding: [0x00,0x20,0xc3,0x0c]
// CHECK: ld1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
// CHECK: // encoding: [0xef,0x25,0xc4,0x0c]
// CHECK: ld1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: // encoding: [0xff,0x2b,0xdf,0x0c]
// CHECK: ld1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
// CHECK: // encoding: [0x00,0x2c,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 2-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld2 {v0.16b, v1.16b}, [x0], x1
ld2 {v15.8h, v16.8h}, [x15], x2
ld2 {v31.4s, v0.4s}, [sp], #32
ld2 {v0.2d, v1.2d}, [x0], #32
ld2 {v0.8b, v1.8b}, [x0], x2
ld2 {v15.4h, v16.4h}, [x15], x3
ld2 {v31.2s, v0.2s}, [sp], #16
// CHECK: ld2 {v0.16b, v1.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x80,0xc1,0x4c]
// CHECK: ld2 {v15.8h, v16.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x85,0xc2,0x4c]
// CHECK: ld2 {v31.4s, v0.4s}, [sp], #32
// CHECK: // encoding: [0xff,0x8b,0xdf,0x4c]
// CHECK: ld2 {v0.2d, v1.2d}, [x0], #32
// CHECK: // encoding: [0x00,0x8c,0xdf,0x4c]
// CHECK: ld2 {v0.8b, v1.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x80,0xc2,0x0c]
// CHECK: ld2 {v15.4h, v16.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x85,0xc3,0x0c]
// CHECK: ld2 {v31.2s, v0.2s}, [sp], #16
// CHECK: // encoding: [0xff,0x8b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 3-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1
ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48
ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48
ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2
ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3
ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x40,0xc1,0x4c]
// CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x45,0xc2,0x4c]
// CHECK: ld3 {v31.4s, v0.4s, v1.4s}, [sp], #48
// CHECK: // encoding: [0xff,0x4b,0xdf,0x4c]
// CHECK: ld3 {v0.2d, v1.2d, v2.2d}, [x0], #48
// CHECK: // encoding: [0x00,0x4c,0xdf,0x4c]
// CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x40,0xc2,0x0c]
// CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x45,0xc3,0x0c]
// CHECK: ld3 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: // encoding: [0xff,0x4b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Load multiple 4-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x00,0xc1,0x4c]
// CHECK: ld4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x05,0xc2,0x4c]
// CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
// CHECK: // encoding: [0xff,0x0b,0xdf,0x4c]
// CHECK: ld4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
// CHECK: // encoding: [0x00,0x0c,0xdf,0x4c]
// CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
// CHECK: // encoding: [0x00,0x00,0xc3,0x0c]
// CHECK: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
// CHECK: // encoding: [0xef,0x05,0xc4,0x0c]
// CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: // encoding: [0xff,0x0b,0xdf,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from one register (post-index)
//------------------------------------------------------------------------------
st1 {v0.16b}, [x0], x1
st1 {v15.8h}, [x15], x2
st1 {v31.4s}, [sp], #16
st1 {v0.2d}, [x0], #16
st1 {v0.8b}, [x0], x2
st1 {v15.4h}, [x15], x3
st1 {v31.2s}, [sp], #8
st1 {v0.1d}, [x0], #8
// CHECK: st1 {v0.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x70,0x81,0x4c]
// CHECK: st1 {v15.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x75,0x82,0x4c]
// CHECK: st1 {v31.4s}, [sp], #16
// CHECK: // encoding: [0xff,0x7b,0x9f,0x4c]
// CHECK: st1 {v0.2d}, [x0], #16
// CHECK: // encoding: [0x00,0x7c,0x9f,0x4c]
// CHECK: st1 {v0.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x70,0x82,0x0c]
// CHECK: st1 {v15.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x75,0x83,0x0c]
// CHECK: st1 {v31.2s}, [sp], #8
// CHECK: // encoding: [0xff,0x7b,0x9f,0x0c]
// CHECK: st1 {v0.1d}, [x0], #8
// CHECK: // encoding: [0x00,0x7c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st1 {v0.16b, v1.16b}, [x0], x1
st1 {v15.8h, v16.8h}, [x15], x2
st1 {v31.4s, v0.4s}, [sp], #32
st1 {v0.2d, v1.2d}, [x0], #32
st1 {v0.8b, v1.8b}, [x0], x2
st1 {v15.4h, v16.4h}, [x15], x3
st1 {v31.2s, v0.2s}, [sp], #16
st1 {v0.1d, v1.1d}, [x0], #16
// CHECK: st1 {v0.16b, v1.16b}, [x0], x1
// CHECK: // encoding: [0x00,0xa0,0x81,0x4c]
// CHECK: st1 {v15.8h, v16.8h}, [x15], x2
// CHECK: // encoding: [0xef,0xa5,0x82,0x4c]
// CHECK: st1 {v31.4s, v0.4s}, [sp], #32
// CHECK: // encoding: [0xff,0xab,0x9f,0x4c]
// CHECK: st1 {v0.2d, v1.2d}, [x0], #32
// CHECK: // encoding: [0x00,0xac,0x9f,0x4c]
// CHECK: st1 {v0.8b, v1.8b}, [x0], x2
// CHECK: // encoding: [0x00,0xa0,0x82,0x0c]
// CHECK: st1 {v15.4h, v16.4h}, [x15], x3
// CHECK: // encoding: [0xef,0xa5,0x83,0x0c]
// CHECK: st1 {v31.2s, v0.2s}, [sp], #16
// CHECK: // encoding: [0xff,0xab,0x9f,0x0c]
// CHECK: st1 {v0.1d, v1.1d}, [x0], #16
// CHECK: // encoding: [0x00,0xac,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st1 {v0.16b, v1.16b, v2.16b}, [x0], x1
st1 {v15.8h, v16.8h, v17.8h}, [x15], x2
st1 {v31.4s, v0.4s, v1.4s}, [sp], #48
st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
st1 {v0.8b, v1.8b, v2.8b}, [x0], x2
st1 {v15.4h, v16.4h, v17.4h}, [x15], x3
st1 {v31.2s, v0.2s, v1.2s}, [sp], #24
st1 {v0.1d, v1.1d, v2.1d}, [x0], #24
// CHECK: st1 {v0.16b, v1.16b, v2.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x60,0x81,0x4c]
// CHECK: st1 {v15.8h, v16.8h, v17.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x65,0x82,0x4c]
// CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp], #48
// CHECK: // encoding: [0xff,0x6b,0x9f,0x4c]
// CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
// CHECK: // encoding: [0x00,0x6c,0x9f,0x4c]
// CHECK: st1 {v0.8b, v1.8b, v2.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x60,0x82,0x0c]
// CHECK: st1 {v15.4h, v16.4h, v17.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x65,0x83,0x0c]
// CHECK: st1 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: // encoding: [0xff,0x6b,0x9f,0x0c]
// CHECK: st1 {v0.1d, v1.1d, v2.1d}, [x0], #24
// CHECK: // encoding: [0x00,0x6c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 1-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
// CHECK: st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x20,0x81,0x4c]
// CHECK: st1 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x25,0x82,0x4c]
// CHECK: st1 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
// CHECK: // encoding: [0xff,0x2b,0x9f,0x4c]
// CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
// CHECK: // encoding: [0x00,0x2c,0x9f,0x4c]
// CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
// CHECK: // encoding: [0x00,0x20,0x83,0x0c]
// CHECK: st1 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
// CHECK: // encoding: [0xef,0x25,0x84,0x0c]
// CHECK: st1 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: // encoding: [0xff,0x2b,0x9f,0x0c]
// CHECK: st1 {v0.1d, v1.1d, v2.1d, v3.1d}, [x0], #32
// CHECK: // encoding: [0x00,0x2c,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 2-element structures from two consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st2 {v0.16b, v1.16b}, [x0], x1
st2 {v15.8h, v16.8h}, [x15], x2
st2 {v31.4s, v0.4s}, [sp], #32
st2 {v0.2d, v1.2d}, [x0], #32
st2 {v0.8b, v1.8b}, [x0], x2
st2 {v15.4h, v16.4h}, [x15], x3
st2 {v31.2s, v0.2s}, [sp], #16
// CHECK: st2 {v0.16b, v1.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x80,0x81,0x4c]
// CHECK: st2 {v15.8h, v16.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x85,0x82,0x4c]
// CHECK: st2 {v31.4s, v0.4s}, [sp], #32
// CHECK: // encoding: [0xff,0x8b,0x9f,0x4c]
// CHECK: st2 {v0.2d, v1.2d}, [x0], #32
// CHECK: // encoding: [0x00,0x8c,0x9f,0x4c]
// CHECK: st2 {v0.8b, v1.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x80,0x82,0x0c]
// CHECK: st2 {v15.4h, v16.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x85,0x83,0x0c]
// CHECK: st2 {v31.2s, v0.2s}, [sp], #16
// CHECK: // encoding: [0xff,0x8b,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 3-element structures from three consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st3 {v0.16b, v1.16b, v2.16b}, [x0], x1
st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
st3 {v31.4s, v0.4s, v1.4s}, [sp], #48
st3 {v0.2d, v1.2d, v2.2d}, [x0], #48
st3 {v0.8b, v1.8b, v2.8b}, [x0], x2
st3 {v15.4h, v16.4h, v17.4h}, [x15], x3
st3 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x40,0x81,0x4c]
// CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x45,0x82,0x4c]
// CHECK: st3 {v31.4s, v0.4s, v1.4s}, [sp], #48
// CHECK: // encoding: [0xff,0x4b,0x9f,0x4c]
// CHECK: st3 {v0.2d, v1.2d, v2.2d}, [x0], #48
// CHECK: // encoding: [0x00,0x4c,0x9f,0x4c]
// CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0], x2
// CHECK: // encoding: [0x00,0x40,0x82,0x0c]
// CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15], x3
// CHECK: // encoding: [0xef,0x45,0x83,0x0c]
// CHECK: st3 {v31.2s, v0.2s, v1.2s}, [sp], #24
// CHECK: // encoding: [0xff,0x4b,0x9f,0x0c]
//------------------------------------------------------------------------------
// Store multiple 4-element structures from four consecutive registers
// (post-index)
//------------------------------------------------------------------------------
st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
// CHECK: // encoding: [0x00,0x00,0x81,0x4c]
// CHECK: st4 {v15.8h, v16.8h, v17.8h, v18.8h}, [x15], x2
// CHECK: // encoding: [0xef,0x05,0x82,0x4c]
// CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
// CHECK: // encoding: [0xff,0x0b,0x9f,0x4c]
// CHECK: st4 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0], #64
// CHECK: // encoding: [0x00,0x0c,0x9f,0x4c]
// CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
// CHECK: // encoding: [0x00,0x00,0x83,0x0c]
// CHECK: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x15], x4
// CHECK: // encoding: [0xef,0x05,0x84,0x0c]
// CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp], #32
// CHECK: // encoding: [0xff,0x0b,0x9f,0x0c]

View File

@ -1,4 +1,4 @@
# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -disassemble < %s | FileCheck %s
G# RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -disassemble < %s | FileCheck %s
#------------------------------------------------------------------------------
# Vector Integer Add/Sub
@ -1971,3 +1971,74 @@
# CHECK: ucvtf d21, d14, #64
0xb6,0xe5,0x20,0x7f
0xd5,0xe5,0x40,0x7f
#----------------------------------------------------------------------
# Vector load/store multiple N-element structure
#----------------------------------------------------------------------
# CHECK: ld1 {v0.16b}, [x0]
# CHECK: ld1 {v15.8h, v16.8h}, [x15]
# CHECK: ld1 {v31.4s, v0.4s, v1.4s}, [sp]
# CHECK: ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
0x00,0x70,0x40,0x4c
0xef,0xa5,0x40,0x4c
0xff,0x6b,0x40,0x4c
0x00,0x2c,0x40,0x4c
# CHECK: ld2 {v0.8b, v1.8b}, [x0]
# CHECK: ld3 {v15.4h, v16.4h, v17.4h}, [x15]
# CHECK: ld4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
0x00,0x80,0x40,0x0c
0xef,0x45,0x40,0x0c
0xff,0x0b,0x40,0x0c
# CHECK: st1 {v0.16b}, [x0]
# CHECK: st1 {v15.8h, v16.8h}, [x15]
# CHECK: st1 {v31.4s, v0.4s, v1.4s}, [sp]
# CHECK: st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [x0]
0x00,0x70,0x00,0x4c
0xef,0xa5,0x00,0x4c
0xff,0x6b,0x00,0x4c
0x00,0x2c,0x00,0x4c
# CHECK: st2 {v0.8b, v1.8b}, [x0]
# CHECK: st3 {v15.4h, v16.4h, v17.4h}, [x15]
# CHECK: st4 {v31.2s, v0.2s, v1.2s, v2.2s}, [sp]
0x00,0x80,0x00,0x0c
0xef,0x45,0x00,0x0c
0xff,0x0b,0x00,0x0c
#----------------------------------------------------------------------
# Vector load/store multiple N-element structure (post-index)
#----------------------------------------------------------------------
# CHECK: ld1 {v15.8h}, [x15], x2
# CHECK: ld1 {v31.4s, v0.4s}, [sp], #32
# CHECK: ld1 {v0.2d, v1.2d, v2.2d}, [x0], #48
# CHECK: ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
0xef,0x75,0xc2,0x4c
0xff,0xab,0xdf,0x4c
0x00,0x6c,0xdf,0x4c
0x00,0x20,0xc3,0x0c
# CHECK: ld2 {v0.16b, v1.16b}, [x0], x1
# CHECK: ld3 {v15.8h, v16.8h, v17.8h}, [x15], x2
# CHECK: ld4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
0x00,0x80,0xc1,0x4c
0xef,0x45,0xc2,0x4c
0xff,0x0b,0xdf,0x4c
# CHECK: st1 {v15.8h}, [x15], x2
# CHECK: st1 {v31.4s, v0.4s}, [sp], #32
# CHECK: st1 {v0.2d, v1.2d, v2.2d}, [x0], #48
# CHECK: st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0], x3
0xef,0x75,0x82,0x4c
0xff,0xab,0x9f,0x4c
0x00,0x6c,0x9f,0x4c
0x00,0x20,0x83,0x0c
# CHECK: st2 {v0.16b, v1.16b}, [x0], x1
# CHECK: st3 {v15.8h, v16.8h, v17.8h}, [x15], x2
# CHECK: st4 {v31.4s, v0.4s, v1.4s, v2.4s}, [sp], #64
0x00,0x80,0x81,0x4c
0xef,0x45,0x82,0x4c
0xff,0x0b,0x9f,0x4c