diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index b979232d466..154c810ea86 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -2288,17 +2288,17 @@ class N2VQPLInt2 op24_23, bits<2> op21_20, bits<2> op19_18, // Shift by immediate, // both double- and quad-register. class N2VDSh op11_8, bit op7, bit op4, - Format f, InstrItinClass itin, string OpcodeStr, string Dt, - ValueType Ty, SDNode OpNode> + Format f, InstrItinClass itin, Operand ImmTy, + string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode> : N2VImm; class N2VQSh op11_8, bit op7, bit op4, - Format f, InstrItinClass itin, string OpcodeStr, string Dt, - ValueType Ty, SDNode OpNode> + Format f, InstrItinClass itin, Operand ImmTy, + string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode> : N2VImm; @@ -3010,40 +3010,77 @@ multiclass N2VPLInt2_QHS op24_23, bits<2> op21_20, bits<2> op17_16, // Neon 2-register vector shift by immediate, // with f of either N2RegVShLFrm or N2RegVShRFrm // element sizes of 8, 16, 32 and 64 bits: -multiclass N2VSh_QHSD op11_8, bit op4, - InstrItinClass itin, string OpcodeStr, string Dt, - SDNode OpNode, Format f> { +multiclass N2VShL_QHSD op11_8, bit op4, + InstrItinClass itin, string OpcodeStr, string Dt, + SDNode OpNode> { // 64-bit vector types. - def v8i8 : N2VDSh { let Inst{21-19} = 0b001; // imm6 = 001xxx } - def v4i16 : N2VDSh { let Inst{21-20} = 0b01; // imm6 = 01xxxx } - def v2i32 : N2VDSh { let Inst{21} = 0b1; // imm6 = 1xxxxx } - def v1i64 : N2VDSh; // imm6 = xxxxxx // 128-bit vector types. - def v16i8 : N2VQSh { let Inst{21-19} = 0b001; // imm6 = 001xxx } - def v8i16 : N2VQSh { let Inst{21-20} = 0b01; // imm6 = 01xxxx } - def v4i32 : N2VQSh { let Inst{21} = 0b1; // imm6 = 1xxxxx } - def v2i64 : N2VQSh; + // imm6 = xxxxxx +} +multiclass N2VShR_QHSD op11_8, bit op4, + InstrItinClass itin, string OpcodeStr, string Dt, + SDNode OpNode> { + // 64-bit vector types. + def v8i8 : N2VDSh { + let Inst{21-19} = 0b001; // imm6 = 001xxx + } + def v4i16 : N2VDSh { + let Inst{21-20} = 0b01; // imm6 = 01xxxx + } + def v2i32 : N2VDSh { + let Inst{21} = 0b1; // imm6 = 1xxxxx + } + def v1i64 : N2VDSh; + // imm6 = xxxxxx + + // 128-bit vector types. + def v16i8 : N2VQSh { + let Inst{21-19} = 0b001; // imm6 = 001xxx + } + def v8i16 : N2VQSh { + let Inst{21-20} = 0b01; // imm6 = 01xxxx + } + def v4i32 : N2VQSh { + let Inst{21} = 0b1; // imm6 = 1xxxxx + } + def v2i64 : N2VQSh; // imm6 = xxxxxx } @@ -3920,14 +3957,13 @@ defm VSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 0, N3RegVShFrm, defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ, "vshl", "u", int_arm_neon_vshiftu>; + // VSHL : Vector Shift Left (Immediate) -defm VSHLi : N2VSh_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl, - N2RegVShLFrm>; +defm VSHLi : N2VShL_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>; + // VSHR : Vector Shift Right (Immediate) -defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s", NEONvshrs, - N2RegVShRFrm>; -defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", NEONvshru, - N2RegVShRFrm>; +defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s",NEONvshrs>; +defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u",NEONvshru>; // VSHLL : Vector Shift Left Long defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>; @@ -3960,10 +3996,8 @@ defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q, "vrshl", "u", int_arm_neon_vrshiftu>; // VRSHR : Vector Rounding Shift Right -defm VRSHRs : N2VSh_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s", NEONvrshrs, - N2RegVShRFrm>; -defm VRSHRu : N2VSh_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u", NEONvrshru, - N2RegVShRFrm>; +defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s",NEONvrshrs>; +defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u",NEONvrshru>; // VRSHRN : Vector Rounding Shift Right and Narrow defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i", @@ -3977,13 +4011,11 @@ defm VQSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 1, N3RegVShFrm, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q, "vqshl", "u", int_arm_neon_vqshiftu>; // VQSHL : Vector Saturating Shift Left (Immediate) -defm VQSHLsi : N2VSh_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls, - N2RegVShLFrm>; -defm VQSHLui : N2VSh_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u",NEONvqshlu, - N2RegVShLFrm>; +defm VQSHLsi : N2VShL_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls>; +defm VQSHLui : N2VShL_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u",NEONvqshlu>; + // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned) -defm VQSHLsu : N2VSh_QHSD<1,1,0b0110,1, IIC_VSHLi4D,"vqshlu","s",NEONvqshlsu, - N2RegVShLFrm>; +defm VQSHLsu : N2VShL_QHSD<1,1,0b0110,1, IIC_VSHLi4D,"vqshlu","s",NEONvqshlsu>; // VQSHRN : Vector Saturating Shift Right and Narrow defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s", diff --git a/test/MC/ARM/neon-shift-encoding.s b/test/MC/ARM/neon-shift-encoding.s index 292692d007c..8b249ae99b6 100644 --- a/test/MC/ARM/neon-shift-encoding.s +++ b/test/MC/ARM/neon-shift-encoding.s @@ -1,174 +1,173 @@ @ RUN: llvm-mc -mcpu=cortex-a8 -triple arm-unknown-unknown -show-encoding < %s | FileCheck %s -@ CHECK: vshl.u8 d16, d17, d16 @ encoding: [0xa1,0x04,0x40,0xf3] +_foo: +@ CHECK: vshl.u8 d16, d17, d16 @ encoding: [0xa1,0x04,0x40,0xf3] vshl.u8 d16, d17, d16 -@ CHECK: vshl.u16 d16, d17, d16 @ encoding: [0xa1,0x04,0x50,0xf3] +@ CHECK: vshl.u16 d16, d17, d16 @ encoding: [0xa1,0x04,0x50,0xf3] vshl.u16 d16, d17, d16 -@ CHECK: vshl.u32 d16, d17, d16 @ encoding: [0xa1,0x04,0x60,0xf3] +@ CHECK: vshl.u32 d16, d17, d16 @ encoding: [0xa1,0x04,0x60,0xf3] vshl.u32 d16, d17, d16 -@ CHECK: vshl.u64 d16, d17, d16 @ encoding: [0xa1,0x04,0x70,0xf3] +@ CHECK: vshl.u64 d16, d17, d16 @ encoding: [0xa1,0x04,0x70,0xf3] vshl.u64 d16, d17, d16 -@ CHECK: vshl.i8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf2] +@ CHECK: vshl.i8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf2] vshl.i8 d16, d16, #7 -@ CHECK: vshl.i16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf2] +@ CHECK: vshl.i16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf2] vshl.i16 d16, d16, #15 -@ CHECK: vshl.i32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf2] +@ CHECK: vshl.i32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf2] vshl.i32 d16, d16, #31 -@ CHECK: vshl.i64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf2] +@ CHECK: vshl.i64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf2] vshl.i64 d16, d16, #63 -@ CHECK: vshl.u8 q8, q9, q8 @ encoding: [0xe2,0x04,0x40,0xf3] +@ CHECK: vshl.u8 q8, q9, q8 @ encoding: [0xe2,0x04,0x40,0xf3] vshl.u8 q8, q9, q8 -@ CHECK: vshl.u16 q8, q9, q8 @ encoding: [0xe2,0x04,0x50,0xf3] +@ CHECK: vshl.u16 q8, q9, q8 @ encoding: [0xe2,0x04,0x50,0xf3] vshl.u16 q8, q9, q8 -@ CHECK: vshl.u32 q8, q9, q8 @ encoding: [0xe2,0x04,0x60,0xf3] +@ CHECK: vshl.u32 q8, q9, q8 @ encoding: [0xe2,0x04,0x60,0xf3] vshl.u32 q8, q9, q8 -@ CHECK: vshl.u64 q8, q9, q8 @ encoding: [0xe2,0x04,0x70,0xf3] +@ CHECK: vshl.u64 q8, q9, q8 @ encoding: [0xe2,0x04,0x70,0xf3] vshl.u64 q8, q9, q8 -@ CHECK: vshl.i8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf2] +@ CHECK: vshl.i8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf2] vshl.i8 q8, q8, #7 -@ CHECK: vshl.i16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf2] +@ CHECK: vshl.i16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf2] vshl.i16 q8, q8, #15 -@ CHECK: vshl.i32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf2] +@ CHECK: vshl.i32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf2] vshl.i32 q8, q8, #31 -@ CHECK: vshl.i64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf2] +@ CHECK: vshl.i64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf2] vshl.i64 q8, q8, #63 -@ CHECK: vshr.u8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf3] - vshr.u8 d16, d16, #8 -@ CHECK: vshr.u16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf3] - vshr.u16 d16, d16, #16 -@ CHECK: vshr.u32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf3] - vshr.u32 d16, d16, #32 -@ CHECK: vshr.u64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf3] - vshr.u64 d16, d16, #64 -@ CHECK: vshr.u8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf3] - vshr.u8 q8, q8, #8 -@ CHECK: vshr.u16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf3] - vshr.u16 q8, q8, #16 -@ CHECK: vshr.u32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf3] - vshr.u32 q8, q8, #32 -@ CHECK: vshr.u64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf3] - vshr.u64 q8, q8, #64 -@ CHECK: vshr.s8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf2] - vshr.s8 d16, d16, #8 -@ CHECK: vshr.s16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf2] - vshr.s16 d16, d16, #16 -@ CHECK: vshr.s32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf2] - vshr.s32 d16, d16, #32 -@ CHECK: vshr.s64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf2] - vshr.s64 d16, d16, #64 -@ CHECK: vshr.s8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf2] - vshr.s8 q8, q8, #8 -@ CHECK: vshr.s16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf2] - vshr.s16 q8, q8, #16 -@ CHECK: vshr.s32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf2 - vshr.s32 q8, q8, #32 -@ CHECK: vshr.s64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf2] - vshr.s64 q8, q8, #64 -@ CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2] +@ CHECK: vshr.u8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf3] + vshr.u8 d16, d16, #7 +@ CHECK: vshr.u16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf3] + vshr.u16 d16, d16, #15 +@ CHECK: vshr.u32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf3] + vshr.u32 d16, d16, #31 +@ CHECK: vshr.u64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf3] + vshr.u64 d16, d16, #63 +@ CHECK: vshr.u8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf3] + vshr.u8 q8, q8, #7 +@ CHECK: vshr.u16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf3] + vshr.u16 q8, q8, #15 +@ CHECK: vshr.u32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf3] + vshr.u32 q8, q8, #31 +@ CHECK: vshr.u64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf3] + vshr.u64 q8, q8, #63 +@ CHECK: vshr.s8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf2] + vshr.s8 d16, d16, #7 +@ CHECK: vshr.s16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf2] + vshr.s16 d16, d16, #15 +@ CHECK: vshr.s32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf2] + vshr.s32 d16, d16, #31 +@ CHECK: vshr.s64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf2] + vshr.s64 d16, d16, #63 +@ CHECK: vshr.s8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf2] + vshr.s8 q8, q8, #7 +@ CHECK: vshr.s16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf2] + vshr.s16 q8, q8, #15 +@ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2] + vshr.s32 q8, q8, #31 +@ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2] + vshr.s64 q8, q8, #63 +@ CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2] vshll.s8 q8, d16, #7 -@ CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2] +@ CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2] vshll.s16 q8, d16, #15 -@ CHECK: vshll.s32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf2] +@ CHECK: vshll.s32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf2] vshll.s32 q8, d16, #31 -@ CHECK: vshll.u8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf3] +@ CHECK: vshll.u8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf3] vshll.u8 q8, d16, #7 -@ CHECK: vshll.u16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf3] +@ CHECK: vshll.u16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf3] vshll.u16 q8, d16, #15 -@ CHECK: vshll.u32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf3] +@ CHECK: vshll.u32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf3] vshll.u32 q8, d16, #31 -@ CHECK: vshll.i8 q8, d16, #8 @ encoding: [0x20,0x03,0xf2,0xf3] +@ CHECK: vshll.i8 q8, d16, #8 @ encoding: [0x20,0x03,0xf2,0xf3] vshll.i8 q8, d16, #8 -@ CHECK: vshll.i16 q8, d16, #16 @ encoding: [0x20,0x03,0xf6,0xf3] +@ CHECK: vshll.i16 q8, d16, #16 @ encoding: [0x20,0x03,0xf6,0xf3] vshll.i16 q8, d16, #16 -@ CHECK: vshll.i32 q8, d16, #32 @ encoding: [0x20,0x03,0xfa,0xf3] +@ CHECK: vshll.i32 q8, d16, #32 @ encoding: [0x20,0x03,0xfa,0xf3] vshll.i32 q8, d16, #32 -@ CHECK: vshrn.i16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf2] +@ CHECK: vshrn.i16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf2] vshrn.i16 d16, q8, #8 -@ CHECK: vshrn.i32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf2] +@ CHECK: vshrn.i32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf2] vshrn.i32 d16, q8, #16 -@ CHECK: vshrn.i64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf2] +@ CHECK: vshrn.i64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf2] vshrn.i64 d16, q8, #32 -@ CHECK: vrshl.s8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf2] +@ CHECK: vrshl.s8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf2] vrshl.s8 d16, d17, d16 -@ CHECK: vrshl.s16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf2] +@ CHECK: vrshl.s16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf2] vrshl.s16 d16, d17, d16 -@ CHECK: vrshl.s32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf2] +@ CHECK: vrshl.s32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf2] vrshl.s32 d16, d17, d16 -@ CHECK: vrshl.s64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0 +@ CHECK: vrshl.s64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0xf2] vrshl.s64 d16, d17, d16 -@ CHECK: vrshl.u8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf3] +@ CHECK: vrshl.u8 d16, d17, d16 @ encoding: [0xa1,0x05,0x40,0xf3] vrshl.u8 d16, d17, d16 -@ CHECK: vrshl.u16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf3] +@ CHECK: vrshl.u16 d16, d17, d16 @ encoding: [0xa1,0x05,0x50,0xf3] vrshl.u16 d16, d17, d16 -@ CHECK: vrshl.u32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf3] +@ CHECK: vrshl.u32 d16, d17, d16 @ encoding: [0xa1,0x05,0x60,0xf3] vrshl.u32 d16, d17, d16 -@ CHECK: vrshl.u64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0xf3] +@ CHECK: vrshl.u64 d16, d17, d16 @ encoding: [0xa1,0x05,0x70,0xf3] vrshl.u64 d16, d17, d16 -@ CHECK: vrshl.s8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf2] +@ CHECK: vrshl.s8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf2] vrshl.s8 q8, q9, q8 -@ CHECK: vrshl.s16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf2] +@ CHECK: vrshl.s16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf2] vrshl.s16 q8, q9, q8 -@ CHECK: vrshl.s32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf2] +@ CHECK: vrshl.s32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf2] vrshl.s32 q8, q9, q8 -@ CHECK: vrshl.s64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf2] +@ CHECK: vrshl.s64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf2] vrshl.s64 q8, q9, q8 -@ CHECK: vrshl.u8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf3] +@ CHECK: vrshl.u8 q8, q9, q8 @ encoding: [0xe2,0x05,0x40,0xf3] vrshl.u8 q8, q9, q8 -@ CHECK: vrshl.u16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf3] +@ CHECK: vrshl.u16 q8, q9, q8 @ encoding: [0xe2,0x05,0x50,0xf3] vrshl.u16 q8, q9, q8 -@ CHECK: vrshl.u32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf3] +@ CHECK: vrshl.u32 q8, q9, q8 @ encoding: [0xe2,0x05,0x60,0xf3] vrshl.u32 q8, q9, q8 -@ CHECK: vrshl.u64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf3] +@ CHECK: vrshl.u64 q8, q9, q8 @ encoding: [0xe2,0x05,0x70,0xf3] vrshl.u64 q8, q9, q8 -@ CHECK: vrshr.s8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf2] +@ CHECK: vrshr.s8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf2] vrshr.s8 d16, d16, #8 -@ CHECK: vrshr.s16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf2] +@ CHECK: vrshr.s16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf2] vrshr.s16 d16, d16, #16 -@ CHECK: vrshr.s32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf2] +@ CHECK: vrshr.s32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf2] vrshr.s32 d16, d16, #32 -@ CHECK: vrshr.s64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf2] +@ CHECK: vrshr.s64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf2] vrshr.s64 d16, d16, #64 -@ CHECK: vrshr.u8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf3] +@ CHECK: vrshr.u8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf3] vrshr.u8 d16, d16, #8 -@ CHECK: vrshr.u16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf3] +@ CHECK: vrshr.u16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf3] vrshr.u16 d16, d16, #16 -@ CHECK: vrshr.u32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf3] +@ CHECK: vrshr.u32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf3] vrshr.u32 d16, d16, #32 -@ CHECK: vrshr.u64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf3] +@ CHECK: vrshr.u64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf3] vrshr.u64 d16, d16, #64 -@ CHECK: vrshr.s8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf2] +@ CHECK: vrshr.s8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf2] vrshr.s8 q8, q8, #8 -@ CHECK: vrshr.s16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf2] +@ CHECK: vrshr.s16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf2] vrshr.s16 q8, q8, #16 -@ CHECK: vrshr.s32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf2] +@ CHECK: vrshr.s32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf2] vrshr.s32 q8, q8, #32 -@ CHECK: vrshr.s64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf2] +@ CHECK: vrshr.s64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf2] vrshr.s64 q8, q8, #64 -@ CHECK: vrshr.u8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf3] +@ CHECK: vrshr.u8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf3] vrshr.u8 q8, q8, #8 -@ CHECK: vrshr.u16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf3] +@ CHECK: vrshr.u16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf3] vrshr.u16 q8, q8, #16 -@ CHECK: vrshr.u32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf3] +@ CHECK: vrshr.u32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf3] vrshr.u32 q8, q8, #32 -@ CHECK: vrshr.u64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf3] +@ CHECK: vrshr.u64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf3] vrshr.u64 q8, q8, #64 -@ CHECK: vrshrn.i16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf2] +@ CHECK: vrshrn.i16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf2] vrshrn.i16 d16, q8, #8 -@ CHECK: vrshrn.i32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf2] +@ CHECK: vrshrn.i32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf2] vrshrn.i32 d16, q8, #16 -@ CHECK: vrshrn.i64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf2] +@ CHECK: vrshrn.i64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf2] vrshrn.i64 d16, q8, #32 - -@ CHECK: vqrshrn.s16 d16, q8, #4 @ encoding: [0x70,0x09,0xcc,0xf2] - vqrshrn.s16 d16, q8, #4 -@ CHECK: vqrshrn.s32 d16, q8, #13 @ encoding: [0x70,0x09,0xd3,0xf2] - vqrshrn.s32 d16, q8, #13 -@ CHECK: vqrshrn.s64 d16, q8, #13 @ encoding: [0x70,0x09,0xf3,0xf2] - vqrshrn.s64 d16, q8, #13 - -@ CHECK: vqrshrn.u16 d16, q8, #4 @ encoding: [0x70,0x09,0xcc,0xf3] - vqrshrn.u16 d16, q8, #4 -@ CHECK: vqrshrn.u32 d16, q8, #13 @ encoding: [0x70,0x09,0xd3,0xf3] - vqrshrn.u32 d16, q8, #13 -@ CHECK: vqrshrn.u64 d16, q8, #13 @ encoding: [0x70,0x09,0xf3,0xf3] - vqrshrn.u64 d16, q8, #13 +@ CHECK: vqrshrn.s16 d16, q8, #4 @ encoding: [0x70,0x09,0xcc,0xf2] + vqrshrn.s16 d16, q8, #4 +@ CHECK: vqrshrn.s32 d16, q8, #13 @ encoding: [0x70,0x09,0xd3,0xf2] + vqrshrn.s32 d16, q8, #13 +@ CHECK: vqrshrn.s64 d16, q8, #13 @ encoding: [0x70,0x09,0xf3,0xf2] + vqrshrn.s64 d16, q8, #13 +@ CHECK: vqrshrn.u16 d16, q8, #4 @ encoding: [0x70,0x09,0xcc,0xf3] + vqrshrn.u16 d16, q8, #4 +@ CHECK: vqrshrn.u32 d16, q8, #13 @ encoding: [0x70,0x09,0xd3,0xf3] + vqrshrn.u32 d16, q8, #13 +@ CHECK: vqrshrn.u64 d16, q8, #13 @ encoding: [0x70,0x09,0xf3,0xf3] + vqrshrn.u64 d16, q8, #13