Correct the encoding for VRSRA and VSRA instructions.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127294 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bill Wendling 2011-03-09 00:00:35 +00:00
parent 7c6b608a7c
commit c04a9dea78
2 changed files with 46 additions and 13 deletions

View File

@ -2325,16 +2325,18 @@ class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
// Shift right by immediate and accumulate,
// both double- and quad-register.
class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
Operand ImmTy, string OpcodeStr, string Dt,
ValueType Ty, SDNode ShOp>
: N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
(ins DPR:$src1, DPR:$Vm, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
(ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
[(set DPR:$Vd, (Ty (add DPR:$src1,
(Ty (ShOp DPR:$Vm, (i32 imm:$SIMM))))))]>;
class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
Operand ImmTy, string OpcodeStr, string Dt,
ValueType Ty, SDNode ShOp>
: N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
(ins QPR:$src1, QPR:$Vm, i32imm:$SIMM), N2RegVShRFrm, IIC_VPALiD,
(ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
[(set QPR:$Vd, (Ty (add QPR:$src1,
(Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
@ -3090,41 +3092,40 @@ multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
string OpcodeStr, string Dt, SDNode ShOp> {
// 64-bit vector types.
def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4,
def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
OpcodeStr, !strconcat(Dt, "8"), v8i8, ShOp> {
let Inst{21-19} = 0b001; // imm6 = 001xxx
}
def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4,
def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
OpcodeStr, !strconcat(Dt, "16"), v4i16, ShOp> {
let Inst{21-20} = 0b01; // imm6 = 01xxxx
}
def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4,
def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
OpcodeStr, !strconcat(Dt, "32"), v2i32, ShOp> {
let Inst{21} = 0b1; // imm6 = 1xxxxx
}
def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4,
def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
OpcodeStr, !strconcat(Dt, "64"), v1i64, ShOp>;
// imm6 = xxxxxx
// 128-bit vector types.
def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4,
def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
OpcodeStr, !strconcat(Dt, "8"), v16i8, ShOp> {
let Inst{21-19} = 0b001; // imm6 = 001xxx
}
def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4,
def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
OpcodeStr, !strconcat(Dt, "16"), v8i16, ShOp> {
let Inst{21-20} = 0b01; // imm6 = 01xxxx
}
def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4,
def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
OpcodeStr, !strconcat(Dt, "32"), v4i32, ShOp> {
let Inst{21} = 0b1; // imm6 = 1xxxxx
}
def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4,
def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
OpcodeStr, !strconcat(Dt, "64"), v2i64, ShOp>;
// imm6 = xxxxxx
}
// Neon Shift-Insert vector operations,
// with f of either N2RegVShLFrm or N2RegVShRFrm
// element sizes of 8, 16, 32 and 64 bits:

View File

@ -65,6 +65,38 @@ _foo:
vshr.s32 q8, q8, #31
@ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2]
vshr.s64 q8, q8, #63
@ CHECK: vsra.u8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf3]
vsra.u8 d16, d16, #7
@ CHECK: vsra.u16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf3]
vsra.u16 d16, d16, #15
@ CHECK: vsra.u32 d16, d16, #31 @ encoding: [0x30,0x01,0xe1,0xf3]
vsra.u32 d16, d16, #31
@ CHECK: vsra.u64 d16, d16, #63 @ encoding: [0xb0,0x01,0xc1,0xf3]
vsra.u64 d16, d16, #63
@ CHECK: vsra.u8 q8, q8, #7 @ encoding: [0x70,0x01,0xc9,0xf3]
vsra.u8 q8, q8, #7
@ CHECK: vsra.u16 q8, q8, #15 @ encoding: [0x70,0x01,0xd1,0xf3]
vsra.u16 q8, q8, #15
@ CHECK: vsra.u32 q8, q8, #31 @ encoding: [0x70,0x01,0xe1,0xf3]
vsra.u32 q8, q8, #31
@ CHECK: vsra.u64 q8, q8, #63 @ encoding: [0xf0,0x01,0xc1,0xf3]
vsra.u64 q8, q8, #63
@ CHECK: vsra.s8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf2]
vsra.s8 d16, d16, #7
@ CHECK: vsra.s16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf2]
vsra.s16 d16, d16, #15
@ CHECK: vsra.s32 d16, d16, #31 @ encoding: [0x30,0x01,0xe1,0xf2]
vsra.s32 d16, d16, #31
@ CHECK: vsra.s64 d16, d16, #63 @ encoding: [0xb0,0x01,0xc1,0xf2]
vsra.s64 d16, d16, #63
@ CHECK: vsra.s8 q8, q8, #7 @ encoding: [0x70,0x01,0xc9,0xf2]
vsra.s8 q8, q8, #7
@ CHECK: vsra.s16 q8, q8, #15 @ encoding: [0x70,0x01,0xd1,0xf2]
vsra.s16 q8, q8, #15
@ CHECK: vsra.s32 q8, q8, #31 @ encoding: [0x70,0x01,0xe1,0xf2]
vsra.s32 q8, q8, #31
@ CHECK: vsra.s64 q8, q8, #63 @ encoding: [0xf0,0x01,0xc1,0xf2]
vsra.s64 q8, q8, #63
@ CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2]
vshll.s8 q8, d16, #7
@ CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2]