diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 7a63085a9d0..45d10e3a505 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -1668,11 +1668,25 @@ class N3VW op21_20, bits<4> op11_8, bit op4, string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp, bit Commutable> : N3V { + (outs QPR:$Qd), (ins QPR:$Qn, DPR:$Dm), N3RegFrm, IIC_VSUBiD, + OpcodeStr, Dt, "$Qd, $Qn, $Dm", "", + [(set QPR:$Qd, (OpNode (TyQ QPR:$Qn), + (TyQ (ExtOp (TyD DPR:$Dm)))))]> { let isCommutable = Commutable; + + // Instruction operands. + bits<4> Qd; + bits<4> Qn; + bits<5> Dm; + + let Inst{15-13} = Qd{2-0}; + let Inst{22} = Qd{3}; + let Inst{12} = 0; + let Inst{19-17} = Qn{2-0}; + let Inst{7} = Qn{3}; + let Inst{16} = 0; + let Inst{3-0} = Dm{3-0}; + let Inst{5} = Dm{4}; } // Pairwise long 2-register intrinsics, both double- and quad-register. diff --git a/test/MC/ARM/neon-fp-encoding.ll b/test/MC/ARM/neon-fp-encoding.ll index 52880364339..9cd5bd77008 100644 --- a/test/MC/ARM/neon-fp-encoding.ll +++ b/test/MC/ARM/neon-fp-encoding.ll @@ -119,3 +119,63 @@ define <2 x i64> @vaddlu_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { %tmp5 = add <2 x i64> %tmp3, %tmp4 ret <2 x i64> %tmp5 } + +; CHECK: vaddws_8xi8 +define <8 x i16> @vaddws_8xi8(<8 x i16>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = sext <8 x i8> %tmp2 to <8 x i16> +; CHECK: vaddw.s8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf2] + %tmp4 = add <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 +} + +; CHECK: vaddws_4xi16 +define <4 x i32> @vaddws_4xi16(<4 x i32>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = sext <4 x i16> %tmp2 to <4 x i32> +; CHECK: vaddw.s16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf2] + %tmp4 = add <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 +} + +; CHECK: vaddws_2xi32 +define <2 x i64> @vaddws_2xi32(<2 x i64>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i64>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = sext <2 x i32> %tmp2 to <2 x i64> +; CHECK: vaddw.s32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf2] + %tmp4 = add <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 +} + +; CHECK: vaddwu_8xi8 +define <8 x i16> @vaddwu_8xi8(<8 x i16>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = zext <8 x i8> %tmp2 to <8 x i16> +; CHECK: vaddw.u8 q8, q8, d18 @ encoding: [0xa2,0x01,0xc0,0xf3] + %tmp4 = add <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 +} + +; CHECK: vaddwu_4xi16 +define <4 x i32> @vaddwu_4xi16(<4 x i32>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = zext <4 x i16> %tmp2 to <4 x i32> +; CHECK: vaddw.u16 q8, q8, d18 @ encoding: [0xa2,0x01,0xd0,0xf3] + %tmp4 = add <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 +} + +; CHECK: vaddwu_2xi32 +define <2 x i64> @vaddwu_2xi32(<2 x i64>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i64>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = zext <2 x i32> %tmp2 to <2 x i64> +; CHECK: vaddw.u32 q8, q8, d18 @ encoding: [0xa2,0x01,0xe0,0xf3] + %tmp4 = add <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 +}