mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
Add codegen support for NEON vld4lane intrinsics with 128-bit vectors.
Also fix some copy-and-paste errors in previous changes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@83590 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
9c8068078b
commit
62e053e5a1
@ -1596,7 +1596,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
EVT RegVT;
|
||||
unsigned Opc2 = 0;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld2lane type");
|
||||
default: llvm_unreachable("unhandled vld3lane type");
|
||||
case MVT::v8i16:
|
||||
Opc = ARM::VLD3LNq16a;
|
||||
Opc2 = ARM::VLD3LNq16b;
|
||||
@ -1650,6 +1650,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
|
||||
return NULL;
|
||||
if (VT.is64BitVector()) {
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld4lane type");
|
||||
case MVT::v8i8: Opc = ARM::VLD4LNd8; break;
|
||||
@ -1666,6 +1667,67 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
ResTys.push_back(MVT::Other);
|
||||
return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 9);
|
||||
}
|
||||
// Quad registers are handled by extracting subregs, doing the load,
|
||||
// and then inserting the results as subregs.
|
||||
EVT RegVT;
|
||||
unsigned Opc2 = 0;
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: llvm_unreachable("unhandled vld4lane type");
|
||||
case MVT::v8i16:
|
||||
Opc = ARM::VLD4LNq16a;
|
||||
Opc2 = ARM::VLD4LNq16b;
|
||||
RegVT = MVT::v4i16;
|
||||
break;
|
||||
case MVT::v4f32:
|
||||
Opc = ARM::VLD4LNq32a;
|
||||
Opc2 = ARM::VLD4LNq32b;
|
||||
RegVT = MVT::v2f32;
|
||||
break;
|
||||
case MVT::v4i32:
|
||||
Opc = ARM::VLD4LNq32a;
|
||||
Opc2 = ARM::VLD4LNq32b;
|
||||
RegVT = MVT::v2i32;
|
||||
break;
|
||||
}
|
||||
SDValue Chain = N->getOperand(0);
|
||||
unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
|
||||
unsigned NumElts = RegVT.getVectorNumElements();
|
||||
int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
|
||||
|
||||
SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
|
||||
N->getOperand(3));
|
||||
SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
|
||||
N->getOperand(4));
|
||||
SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
|
||||
N->getOperand(5));
|
||||
SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
|
||||
N->getOperand(6));
|
||||
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
|
||||
getI32Imm(Lane % NumElts), Chain };
|
||||
std::vector<EVT> ResTys(4, RegVT);
|
||||
ResTys.push_back(MVT::Other);
|
||||
SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
|
||||
dl, ResTys, Ops, 9);
|
||||
SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
|
||||
N->getOperand(3),
|
||||
SDValue(VLdLn, 0));
|
||||
SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
|
||||
N->getOperand(4),
|
||||
SDValue(VLdLn, 1));
|
||||
SDValue Q2 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
|
||||
N->getOperand(5),
|
||||
SDValue(VLdLn, 2));
|
||||
SDValue Q3 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
|
||||
N->getOperand(6),
|
||||
SDValue(VLdLn, 3));
|
||||
Chain = SDValue(VLdLn, 4);
|
||||
ReplaceUses(SDValue(N, 0), Q0);
|
||||
ReplaceUses(SDValue(N, 1), Q1);
|
||||
ReplaceUses(SDValue(N, 2), Q2);
|
||||
ReplaceUses(SDValue(N, 3), Q3);
|
||||
ReplaceUses(SDValue(N, 4), Chain);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
case Intrinsic::arm_neon_vst2: {
|
||||
SDValue MemAddr, MemUpdate, MemOpc;
|
||||
|
@ -299,15 +299,15 @@ def VLD3LNd16 : VLD3LN<0b0110, "vld3.16">;
|
||||
def VLD3LNd32 : VLD3LN<0b1010, "vld3.32">;
|
||||
|
||||
// vld3 to double-spaced even registers.
|
||||
def VLD3LNq16a: VLD3LN<0b0101, "vld3.16">;
|
||||
def VLD3LNq32a: VLD3LN<0b1001, "vld3.32">;
|
||||
def VLD3LNq16a: VLD3LN<0b0110, "vld3.16">;
|
||||
def VLD3LNq32a: VLD3LN<0b1010, "vld3.32">;
|
||||
|
||||
// vld3 to double-spaced odd registers.
|
||||
def VLD3LNq16b: VLD3LN<0b0101, "vld3.16">;
|
||||
def VLD3LNq32b: VLD3LN<0b1001, "vld3.32">;
|
||||
def VLD3LNq16b: VLD3LN<0b0110, "vld3.16">;
|
||||
def VLD3LNq32b: VLD3LN<0b1010, "vld3.32">;
|
||||
|
||||
// VLD4LN : Vector Load (single 4-element structure to one lane)
|
||||
class VLD4LND<bits<4> op11_8, string OpcodeStr>
|
||||
class VLD4LN<bits<4> op11_8, string OpcodeStr>
|
||||
: NLdSt<1,0b10,op11_8,0b0000,
|
||||
(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
|
||||
(ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
|
||||
@ -316,9 +316,17 @@ class VLD4LND<bits<4> op11_8, string OpcodeStr>
|
||||
"\t\\{$dst1[$lane],$dst2[$lane],$dst3[$lane],$dst4[$lane]\\}, $addr"),
|
||||
"$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []>;
|
||||
|
||||
def VLD4LNd8 : VLD4LND<0b0011, "vld4.8">;
|
||||
def VLD4LNd16 : VLD4LND<0b0111, "vld4.16">;
|
||||
def VLD4LNd32 : VLD4LND<0b1011, "vld4.32">;
|
||||
def VLD4LNd8 : VLD4LN<0b0011, "vld4.8">;
|
||||
def VLD4LNd16 : VLD4LN<0b0111, "vld4.16">;
|
||||
def VLD4LNd32 : VLD4LN<0b1011, "vld4.32">;
|
||||
|
||||
// vld4 to double-spaced even registers.
|
||||
def VLD4LNq16a: VLD4LN<0b0111, "vld4.16">;
|
||||
def VLD4LNq32a: VLD4LN<0b1011, "vld4.32">;
|
||||
|
||||
// vld4 to double-spaced odd registers.
|
||||
def VLD4LNq16b: VLD4LN<0b0111, "vld4.16">;
|
||||
def VLD4LNq32b: VLD4LN<0b1011, "vld4.32">;
|
||||
|
||||
// VLD1DUP : Vector Load (single element to all lanes)
|
||||
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
|
||||
|
@ -154,6 +154,22 @@ static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
|
||||
Stride = 2;
|
||||
return true;
|
||||
|
||||
case ARM::VLD4LNq16a:
|
||||
case ARM::VLD4LNq32a:
|
||||
FirstOpnd = 0;
|
||||
NumRegs = 4;
|
||||
Offset = 0;
|
||||
Stride = 2;
|
||||
return true;
|
||||
|
||||
case ARM::VLD4LNq16b:
|
||||
case ARM::VLD4LNq32b:
|
||||
FirstOpnd = 0;
|
||||
NumRegs = 4;
|
||||
Offset = 1;
|
||||
Stride = 2;
|
||||
return true;
|
||||
|
||||
case ARM::VST2d8:
|
||||
case ARM::VST2d16:
|
||||
case ARM::VST2d32:
|
||||
|
@ -209,6 +209,10 @@ declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x flo
|
||||
%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
|
||||
%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
|
||||
|
||||
%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
|
||||
%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
|
||||
%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
|
||||
|
||||
define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
|
||||
;CHECK: vld4lanei8:
|
||||
;CHECK: vld4.8
|
||||
@ -269,7 +273,56 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
|
||||
ret <2 x float> %tmp9
|
||||
}
|
||||
|
||||
define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vld4laneQi16:
|
||||
;CHECK: vld4.16
|
||||
%tmp1 = load <8 x i16>* %B
|
||||
%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
|
||||
%tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
|
||||
%tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
|
||||
%tmp5 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 2
|
||||
%tmp6 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 3
|
||||
%tmp7 = add <8 x i16> %tmp3, %tmp4
|
||||
%tmp8 = add <8 x i16> %tmp5, %tmp6
|
||||
%tmp9 = add <8 x i16> %tmp7, %tmp8
|
||||
ret <8 x i16> %tmp9
|
||||
}
|
||||
|
||||
define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
|
||||
;CHECK: vld4laneQi32:
|
||||
;CHECK: vld4.32
|
||||
%tmp1 = load <4 x i32>* %B
|
||||
%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
|
||||
%tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
|
||||
%tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
|
||||
%tmp5 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 2
|
||||
%tmp6 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 3
|
||||
%tmp7 = add <4 x i32> %tmp3, %tmp4
|
||||
%tmp8 = add <4 x i32> %tmp5, %tmp6
|
||||
%tmp9 = add <4 x i32> %tmp7, %tmp8
|
||||
ret <4 x i32> %tmp9
|
||||
}
|
||||
|
||||
define <4 x float> @vld4laneQf(float* %A, <4 x float>* %B) nounwind {
|
||||
;CHECK: vld4laneQf:
|
||||
;CHECK: vld4.32
|
||||
%tmp1 = load <4 x float>* %B
|
||||
%tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
|
||||
%tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 0
|
||||
%tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1
|
||||
%tmp5 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 2
|
||||
%tmp6 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 3
|
||||
%tmp7 = add <4 x float> %tmp3, %tmp4
|
||||
%tmp8 = add <4 x float> %tmp5, %tmp6
|
||||
%tmp9 = add <4 x float> %tmp7, %tmp8
|
||||
ret <4 x float> %tmp9
|
||||
}
|
||||
|
||||
declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
|
||||
declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
|
||||
declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
|
||||
declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
|
||||
|
||||
declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
|
||||
declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
|
||||
declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
|
||||
|
Loading…
Reference in New Issue
Block a user