diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 02145aefacc..d860a1416c9 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -2344,15 +2344,17 @@ class N2VQShAdd op11_8, bit op7, bit op4, // Shift by immediate and insert, // both double- and quad-register. class N2VDShIns op11_8, bit op7, bit op4, - Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp> + Operand ImmTy, Format f, string OpcodeStr, string Dt, + ValueType Ty,SDNode ShOp> : N2VImm; class N2VQShIns op11_8, bit op7, bit op4, - Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp> + Operand ImmTy, Format f, string OpcodeStr, string Dt, + ValueType Ty,SDNode ShOp> : N2VImm; @@ -3129,41 +3131,76 @@ multiclass N2VShAdd_QHSD op11_8, bit op4, // Neon Shift-Insert vector operations, // with f of either N2RegVShLFrm or N2RegVShRFrm // element sizes of 8, 16, 32 and 64 bits: -multiclass N2VShIns_QHSD op11_8, bit op4, - string OpcodeStr, SDNode ShOp, - Format f> { +multiclass N2VShInsL_QHSD op11_8, bit op4, + string OpcodeStr> { // 64-bit vector types. - def v8i8 : N2VDShIns { + def v8i8 : N2VDShIns { let Inst{21-19} = 0b001; // imm6 = 001xxx } - def v4i16 : N2VDShIns { + def v4i16 : N2VDShIns { let Inst{21-20} = 0b01; // imm6 = 01xxxx } - def v2i32 : N2VDShIns { + def v2i32 : N2VDShIns { let Inst{21} = 0b1; // imm6 = 1xxxxx } - def v1i64 : N2VDShIns; + def v1i64 : N2VDShIns; // imm6 = xxxxxx // 128-bit vector types. - def v16i8 : N2VQShIns { + def v16i8 : N2VQShIns { let Inst{21-19} = 0b001; // imm6 = 001xxx } - def v8i16 : N2VQShIns { + def v8i16 : N2VQShIns { let Inst{21-20} = 0b01; // imm6 = 01xxxx } - def v4i32 : N2VQShIns { + def v4i32 : N2VQShIns { let Inst{21} = 0b1; // imm6 = 1xxxxx } - def v2i64 : N2VQShIns; + def v2i64 : N2VQShIns; + // imm6 = xxxxxx +} +multiclass N2VShInsR_QHSD op11_8, bit op4, + string OpcodeStr> { + // 64-bit vector types. + def v8i8 : N2VDShIns { + let Inst{21-19} = 0b001; // imm6 = 001xxx + } + def v4i16 : N2VDShIns { + let Inst{21-20} = 0b01; // imm6 = 01xxxx + } + def v2i32 : N2VDShIns { + let Inst{21} = 0b1; // imm6 = 1xxxxx + } + def v1i64 : N2VDShIns; + // imm6 = xxxxxx + + // 128-bit vector types. + def v16i8 : N2VQShIns { + let Inst{21-19} = 0b001; // imm6 = 001xxx + } + def v8i16 : N2VQShIns { + let Inst{21-20} = 0b01; // imm6 = 01xxxx + } + def v4i32 : N2VQShIns { + let Inst{21} = 0b1; // imm6 = 1xxxxx + } + def v2i64 : N2VQShIns; // imm6 = xxxxxx } @@ -4054,9 +4091,10 @@ defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>; defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>; // VSLI : Vector Shift Left and Insert -defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli", NEONvsli, N2RegVShLFrm>; +defm VSLI : N2VShInsL_QHSD<1, 1, 0b0101, 1, "vsli">; + // VSRI : Vector Shift Right and Insert -defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri", NEONvsri, N2RegVShRFrm>; +defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">; // Vector Absolute and Saturating Absolute. diff --git a/test/MC/ARM/neon-shift-encoding.s b/test/MC/ARM/neon-shift-encoding.s index 898c31dec18..a7a1b838604 100644 --- a/test/MC/ARM/neon-shift-encoding.s +++ b/test/MC/ARM/neon-shift-encoding.s @@ -97,6 +97,38 @@ _foo: vsra.s32 q8, q8, #31 @ CHECK: vsra.s64 q8, q8, #63 @ encoding: [0xf0,0x01,0xc1,0xf2] vsra.s64 q8, q8, #63 +@ CHECK: vsri.8 d16, d16, #7 @ encoding: [0x30,0x04,0xc9,0xf3] + vsri.8 d16, d16, #7 +@ CHECK: vsri.16 d16, d16, #15 @ encoding: [0x30,0x04,0xd1,0xf3] + vsri.16 d16, d16, #15 +@ CHECK: vsri.32 d16, d16, #31 @ encoding: [0x30,0x04,0xe1,0xf3] + vsri.32 d16, d16, #31 +@ CHECK: vsri.64 d16, d16, #63 @ encoding: [0xb0,0x04,0xc1,0xf3] + vsri.64 d16, d16, #63 +@ CHECK: vsri.8 q8, q8, #7 @ encoding: [0x70,0x04,0xc9,0xf3] + vsri.8 q8, q8, #7 +@ CHECK: vsri.16 q8, q8, #15 @ encoding: [0x70,0x04,0xd1,0xf3] + vsri.16 q8, q8, #15 +@ CHECK: vsri.32 q8, q8, #31 @ encoding: [0x70,0x04,0xe1,0xf3] + vsri.32 q8, q8, #31 +@ CHECK: vsri.64 q8, q8, #63 @ encoding: [0xf0,0x04,0xc1,0xf3] + vsri.64 q8, q8, #63 +@ CHECK: vsli.8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf3] + vsli.8 d16, d16, #7 +@ CHECK: vsli.16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf3] + vsli.16 d16, d16, #15 +@ CHECK: vsli.32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf3] + vsli.32 d16, d16, #31 +@ CHECK: vsli.64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf3] + vsli.64 d16, d16, #63 +@ CHECK: vsli.8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf3] + vsli.8 q8, q8, #7 +@ CHECK: vsli.16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf3] + vsli.16 q8, q8, #15 +@ CHECK: vsli.32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf3] + vsli.32 q8, q8, #31 +@ CHECK: vsli.64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf3] + vsli.64 q8, q8, #63 @ CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2] vshll.s8 q8, d16, #7 @ CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2]