mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
Add correct NEON encodings for vsli and vsri.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117459 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
dd31ed67e6
commit
0745c389d9
@ -1747,16 +1747,16 @@ class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
|
||||
// both double- and quad-register.
|
||||
class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
|
||||
Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp>
|
||||
: N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$dst),
|
||||
(ins DPR:$src1, DPR:$src2, i32imm:$SIMM), f, IIC_VSHLiD,
|
||||
OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
|
||||
[(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
|
||||
: N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
|
||||
(ins DPR:$src1, DPR:$Vm, i32imm:$SIMM), f, IIC_VSHLiD,
|
||||
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
|
||||
[(set DPR:$Vd, (Ty (ShOp DPR:$src1, DPR:$Vm, (i32 imm:$SIMM))))]>;
|
||||
class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
|
||||
Format f, string OpcodeStr, string Dt, ValueType Ty,SDNode ShOp>
|
||||
: N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$dst),
|
||||
(ins QPR:$src1, QPR:$src2, i32imm:$SIMM), f, IIC_VSHLiQ,
|
||||
OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
|
||||
[(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
|
||||
: N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
|
||||
(ins QPR:$src1, QPR:$Vm, i32imm:$SIMM), f, IIC_VSHLiQ,
|
||||
OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
|
||||
[(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
|
||||
|
||||
// Convert, with fractional bits immediate,
|
||||
// both double- and quad-register.
|
||||
|
@ -307,3 +307,141 @@ declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind r
|
||||
declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
||||
|
||||
define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = load <8 x i8>* %B
|
||||
; CHECK: vsli.8 d17, d16, #7 @ encoding: [0x30,0x15,0xcf,0xf3]
|
||||
%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
|
||||
ret <8 x i8> %tmp3
|
||||
}
|
||||
|
||||
define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = load <4 x i16>* %B
|
||||
; CHECK: vsli.16 d17, d16, #15 @ encoding: [0x30,0x15,0xdf,0xf3]
|
||||
%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
|
||||
ret <4 x i16> %tmp3
|
||||
}
|
||||
|
||||
define <2 x i32> @vsli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = load <2 x i32>* %B
|
||||
; CHECK: vsli.32 d17, d16, #31 @ encoding: [0x30,0x15,0xff,0xf3]
|
||||
%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
|
||||
ret <2 x i32> %tmp3
|
||||
}
|
||||
|
||||
define <1 x i64> @vsli64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = load <1 x i64>* %B
|
||||
; CHECK: vsli.64 d17, d16, #63 @ encoding: [0xb0,0x15,0xff,0xf3]
|
||||
%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
|
||||
ret <1 x i64> %tmp3
|
||||
}
|
||||
|
||||
define <16 x i8> @vsliQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = load <16 x i8>* %B
|
||||
; CHECK: vsli.8 q8, q9, #7 @ encoding: [0x72,0x05,0xcf,0xf3]
|
||||
%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
|
||||
ret <16 x i8> %tmp3
|
||||
}
|
||||
|
||||
define <8 x i16> @vsliQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = load <8 x i16>* %B
|
||||
; CHECK: vsli.16 q8, q9, #15 @ encoding: [0x72,0x05,0xdf,0xf3]
|
||||
%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
|
||||
ret <8 x i16> %tmp3
|
||||
}
|
||||
|
||||
define <4 x i32> @vsliQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = load <4 x i32>* %B
|
||||
; CHECK: vsli.32 q8, q9, #31 @ encoding: [0x72,0x05,0xff,0xf3]
|
||||
%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
|
||||
ret <4 x i32> %tmp3
|
||||
}
|
||||
|
||||
define <2 x i64> @vsliQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = load <2 x i64>* %B
|
||||
; CHECK: vsli.64 q8, q9, #63 @ encoding: [0xf2,0x05,0xff,0xf3]
|
||||
%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 63, i64 63 >)
|
||||
ret <2 x i64> %tmp3
|
||||
}
|
||||
|
||||
define <8 x i8> @vsri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = load <8 x i8>* %B
|
||||
; CHECK: vsri.8 d17, d16, #8 @ encoding: [0x30,0x14,0xc8,0xf3]
|
||||
%tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
|
||||
ret <8 x i8> %tmp3
|
||||
}
|
||||
|
||||
define <4 x i16> @vsri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = load <4 x i16>* %B
|
||||
; CHECK: vsri.16 d17, d16, #16 @ encoding: [0x30,0x14,0xd0,0xf3
|
||||
%tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
|
||||
ret <4 x i16> %tmp3
|
||||
}
|
||||
|
||||
define <2 x i32> @vsri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = load <2 x i32>* %B
|
||||
; CHECK: vsri.32 d17, d16, #32 @ encoding: [0x30,0x14,0xe0,0xf3]
|
||||
%tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
|
||||
ret <2 x i32> %tmp3
|
||||
}
|
||||
|
||||
define <1 x i64> @vsri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = load <1 x i64>* %B
|
||||
; CHECK: vsri.64 d17, d16, #64 @ encoding: [0xb0,0x14,0xc0,0xf3]
|
||||
%tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 -64 >)
|
||||
ret <1 x i64> %tmp3
|
||||
}
|
||||
|
||||
define <16 x i8> @vsriQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = load <16 x i8>* %B
|
||||
; CHECK: vsri.8 q8, q9, #8 @ encoding: [0x72,0x04,0xc8,0xf3]
|
||||
%tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
|
||||
ret <16 x i8> %tmp3
|
||||
}
|
||||
|
||||
define <8 x i16> @vsriQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = load <8 x i16>* %B
|
||||
; CHECK: vsri.16 q8, q9, #16 @ encoding: [0x72,0x04,0xd0,0xf3]
|
||||
%tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
|
||||
ret <8 x i16> %tmp3
|
||||
}
|
||||
|
||||
define <4 x i32> @vsriQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = load <4 x i32>* %B
|
||||
; CHECK: vsri.32 q8, q9, #32 @ encoding: [0x72,0x04,0xe0,0xf3]
|
||||
%tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
|
||||
ret <4 x i32> %tmp3
|
||||
}
|
||||
|
||||
define <2 x i64> @vsriQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = load <2 x i64>* %B
|
||||
; CHECK: vsri.64 q8, q9, #64 @ encoding: [0xf2,0x04,0xc0,0xf3]
|
||||
%tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
|
||||
ret <2 x i64> %tmp3
|
||||
}
|
||||
|
||||
declare <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
|
||||
declare <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
|
||||
declare <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
|
||||
declare <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) nounwind readnone
|
||||
|
||||
declare <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
|
||||
declare <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
|
||||
declare <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
|
||||
declare <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
|
||||
|
Loading…
Reference in New Issue
Block a user