diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index bdc29402b45..a9b86f02848 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1445,18 +1445,61 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDValue MemAddr, MemUpdate, MemOpc; if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc)) return NULL; + if (VT.is64BitVector()) { + switch (VT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("unhandled vld4 type"); + case MVT::v8i8: Opc = ARM::VLD4d8; break; + case MVT::v4i16: Opc = ARM::VLD4d16; break; + case MVT::v2f32: + case MVT::v2i32: Opc = ARM::VLD4d32; break; + } + SDValue Chain = N->getOperand(0); + const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain }; + std::vector ResTys(4, VT); + ResTys.push_back(MVT::Other); + return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4); + } + // Quad registers are loaded with two separate instructions, where one + // loads the even registers and the other loads the odd registers. + EVT RegVT = VT; + unsigned Opc2 = 0; switch (VT.getSimpleVT().SimpleTy) { default: llvm_unreachable("unhandled vld4 type"); - case MVT::v8i8: Opc = ARM::VLD4d8; break; - case MVT::v4i16: Opc = ARM::VLD4d16; break; - case MVT::v2f32: - case MVT::v2i32: Opc = ARM::VLD4d32; break; + case MVT::v16i8: + Opc = ARM::VLD4q8a; Opc2 = ARM::VLD4q8b; RegVT = MVT::v8i8; break; + case MVT::v8i16: + Opc = ARM::VLD4q16a; Opc2 = ARM::VLD4q16b; RegVT = MVT::v4i16; break; + case MVT::v4f32: + Opc = ARM::VLD4q32a; Opc2 = ARM::VLD4q32b; RegVT = MVT::v2f32; break; + case MVT::v4i32: + Opc = ARM::VLD4q32a; Opc2 = ARM::VLD4q32b; RegVT = MVT::v2i32; break; } SDValue Chain = N->getOperand(0); - const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain }; - std::vector ResTys(4, VT); + // Enable writeback to the address register. + MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32); + + std::vector ResTys(4, RegVT); + ResTys.push_back(MemAddr.getValueType()); ResTys.push_back(MVT::Other); - return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4); + + const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, Chain }; + SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 4); + Chain = SDValue(VLdA, 5); + + const SDValue OpsB[] = { SDValue(VLdA, 4), MemUpdate, MemOpc, Chain }; + SDNode *VLdB = CurDAG->getMachineNode(Opc2, dl, ResTys, OpsB, 4); + Chain = SDValue(VLdB, 5); + + SDNode *Q0 = PairDRegs(VT, SDValue(VLdA, 0), SDValue(VLdB, 0)); + SDNode *Q1 = PairDRegs(VT, SDValue(VLdA, 1), SDValue(VLdB, 1)); + SDNode *Q2 = PairDRegs(VT, SDValue(VLdA, 2), SDValue(VLdB, 2)); + SDNode *Q3 = PairDRegs(VT, SDValue(VLdA, 3), SDValue(VLdB, 3)); + ReplaceUses(SDValue(N, 0), SDValue(Q0, 0)); + ReplaceUses(SDValue(N, 1), SDValue(Q1, 0)); + ReplaceUses(SDValue(N, 2), SDValue(Q2, 0)); + ReplaceUses(SDValue(N, 3), SDValue(Q3, 0)); + ReplaceUses(SDValue(N, 4), Chain); + return NULL; } case Intrinsic::arm_neon_vld2lane: { diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index c7ff523b437..75ff78a63dc 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -227,11 +227,26 @@ class VLD4D (ins addrmode6:$addr), IIC_VLD4, !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3,$dst4\\}, $addr"), "", []>; +class VLD4WB + : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb), + (ins addrmode6:$addr), IIC_VLD4, + !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3,$dst4\\}, $addr"), + "$addr.addr = $wb", []>; def VLD4d8 : VLD4D<"vld4.8">; def VLD4d16 : VLD4D<"vld4.16">; def VLD4d32 : VLD4D<"vld4.32">; +// vld4 to double-spaced even registers. +def VLD4q8a : VLD4WB<"vld4.8">; +def VLD4q16a : VLD4WB<"vld4.16">; +def VLD4q32a : VLD4WB<"vld4.32">; + +// vld4 to double-spaced odd registers. +def VLD4q8b : VLD4WB<"vld4.8">; +def VLD4q16b : VLD4WB<"vld4.16">; +def VLD4q32b : VLD4WB<"vld4.32">; + // VLD2LN : Vector Load (single 2-element structure to one lane) class VLD2LND : NLdSt<(outs DPR:$dst1, DPR:$dst2), diff --git a/lib/Target/ARM/NEONPreAllocPass.cpp b/lib/Target/ARM/NEONPreAllocPass.cpp index fab62f6cfc2..1232f896c42 100644 --- a/lib/Target/ARM/NEONPreAllocPass.cpp +++ b/lib/Target/ARM/NEONPreAllocPass.cpp @@ -101,6 +101,24 @@ static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs, NumRegs = 4; return true; + case ARM::VLD4q8a: + case ARM::VLD4q16a: + case ARM::VLD4q32a: + FirstOpnd = 0; + NumRegs = 4; + Offset = 0; + Stride = 2; + return true; + + case ARM::VLD4q8b: + case ARM::VLD4q16b: + case ARM::VLD4q32b: + FirstOpnd = 0; + NumRegs = 4; + Offset = 1; + Stride = 2; + return true; + case ARM::VST2d8: case ARM::VST2d16: case ARM::VST2d32: diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll index 6e6bb75c168..08f67a793a5 100644 --- a/test/CodeGen/ARM/vld4.ll +++ b/test/CodeGen/ARM/vld4.ll @@ -5,6 +5,11 @@ %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> } +%struct.__neon_int8x16x4_t = type { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } +%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } +%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } +%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> } + define <8 x i8> @vld4i8(i8* %A) nounwind { ;CHECK: vld4i8: ;CHECK: vld4.8 @@ -45,7 +50,56 @@ define <2 x float> @vld4f(float* %A) nounwind { ret <2 x float> %tmp4 } +define <16 x i8> @vld4Qi8(i8* %A) nounwind { +;CHECK: vld4Qi8: +;CHECK: vld4.8 +;CHECK: vld4.8 + %tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A) + %tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0 + %tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2 + %tmp4 = add <16 x i8> %tmp2, %tmp3 + ret <16 x i8> %tmp4 +} + +define <8 x i16> @vld4Qi16(i16* %A) nounwind { +;CHECK: vld4Qi16: +;CHECK: vld4.16 +;CHECK: vld4.16 + %tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i16* %A) + %tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0 + %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 2 + %tmp4 = add <8 x i16> %tmp2, %tmp3 + ret <8 x i16> %tmp4 +} + +define <4 x i32> @vld4Qi32(i32* %A) nounwind { +;CHECK: vld4Qi32: +;CHECK: vld4.32 +;CHECK: vld4.32 + %tmp1 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i32* %A) + %tmp2 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 0 + %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 2 + %tmp4 = add <4 x i32> %tmp2, %tmp3 + ret <4 x i32> %tmp4 +} + +define <4 x float> @vld4Qf(float* %A) nounwind { +;CHECK: vld4Qf: +;CHECK: vld4.32 +;CHECK: vld4.32 + %tmp1 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(float* %A) + %tmp2 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 0 + %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 2 + %tmp4 = add <4 x float> %tmp2, %tmp3 + ret <4 x float> %tmp4 +} + declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8*) nounwind readonly declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8*) nounwind readonly declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*) nounwind readonly declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8*) nounwind readonly + +declare %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8*) nounwind readonly +declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8*) nounwind readonly +declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8*) nounwind readonly +declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8*) nounwind readonly