mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Do not lose mem_operands while lowering VLD / VST intrinsics.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129738 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
05a5ff1f00
commit
b58a340fa2
@ -455,6 +455,10 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
|
||||
// Add an implicit def for the super-register.
|
||||
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
|
||||
TransferImpOps(MI, MIB, MIB);
|
||||
|
||||
// Transfer memoperands.
|
||||
(*MIB).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
|
||||
|
||||
MI.eraseFromParent();
|
||||
}
|
||||
|
||||
@ -500,6 +504,10 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
|
||||
// Add an implicit kill for the super-reg.
|
||||
(*MIB).addRegisterKilled(SrcReg, TRI, true);
|
||||
TransferImpOps(MI, MIB, MIB);
|
||||
|
||||
// Transfer memoperands.
|
||||
(*MIB).setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
|
||||
|
||||
MI.eraseFromParent();
|
||||
}
|
||||
|
||||
|
@ -1553,6 +1553,11 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
|
||||
Ops.data(), Ops.size());
|
||||
}
|
||||
|
||||
// Transfer memoperands.
|
||||
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
||||
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
||||
cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
|
||||
|
||||
if (NumVecs == 1)
|
||||
return VLd;
|
||||
|
||||
@ -1582,6 +1587,9 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
||||
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
|
||||
return NULL;
|
||||
|
||||
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
||||
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
||||
|
||||
SDValue Chain = N->getOperand(0);
|
||||
EVT VT = N->getOperand(Vec0Idx).getValueType();
|
||||
bool is64BitVector = VT.is64BitVector();
|
||||
@ -1654,7 +1662,13 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
||||
Ops.push_back(Pred);
|
||||
Ops.push_back(Reg0);
|
||||
Ops.push_back(Chain);
|
||||
return CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
|
||||
SDNode *VSt =
|
||||
CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
|
||||
|
||||
// Transfer memoperands.
|
||||
cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
|
||||
|
||||
return VSt;
|
||||
}
|
||||
|
||||
// Otherwise, quad registers are stored with two separate instructions,
|
||||
@ -1675,6 +1689,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
||||
SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
|
||||
MemAddr.getValueType(),
|
||||
MVT::Other, OpsA, 7);
|
||||
cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
|
||||
Chain = SDValue(VStA, 1);
|
||||
|
||||
// Store the odd D registers.
|
||||
@ -1691,8 +1706,10 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
|
||||
Ops.push_back(Pred);
|
||||
Ops.push_back(Reg0);
|
||||
Ops.push_back(Chain);
|
||||
return CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
|
||||
Ops.data(), Ops.size());
|
||||
SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
|
||||
Ops.data(), Ops.size());
|
||||
cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
|
||||
return VStB;
|
||||
}
|
||||
|
||||
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
|
||||
@ -1708,6 +1725,9 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
|
||||
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
|
||||
return NULL;
|
||||
|
||||
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
||||
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
||||
|
||||
SDValue Chain = N->getOperand(0);
|
||||
unsigned Lane =
|
||||
cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
|
||||
@ -1794,6 +1814,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
|
||||
QOpcodes[OpcodeIndex]);
|
||||
SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
|
||||
Ops.data(), Ops.size());
|
||||
cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
|
||||
if (!IsLoad)
|
||||
return VLdLn;
|
||||
|
||||
@ -1820,6 +1841,9 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
|
||||
if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
|
||||
return NULL;
|
||||
|
||||
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
|
||||
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
|
||||
|
||||
SDValue Chain = N->getOperand(0);
|
||||
EVT VT = N->getValueType(0);
|
||||
|
||||
@ -1864,12 +1888,13 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
|
||||
|
||||
unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
|
||||
std::vector<EVT> ResTys;
|
||||
ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts));
|
||||
ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
|
||||
if (isUpdating)
|
||||
ResTys.push_back(MVT::i32);
|
||||
ResTys.push_back(MVT::Other);
|
||||
SDNode *VLdDup =
|
||||
CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
|
||||
cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
|
||||
SuperReg = SDValue(VLdDup, 0);
|
||||
|
||||
// Extract the subregisters.
|
||||
|
@ -133,8 +133,6 @@ declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
|
||||
; Do not crash if the vld1 result is not used.
|
||||
define void @unused_vld1_result() {
|
||||
entry:
|
||||
;CHECK: unused_vld1_result
|
||||
;CHECK: vld1.32
|
||||
%0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1)
|
||||
call void @llvm.trap()
|
||||
unreachable
|
||||
|
@ -491,7 +491,7 @@ declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x flo
|
||||
; in the QPR_VFP2 regclass, it needs to be copied to a QPR regclass because
|
||||
; we don't currently have a QQQQ_VFP2 super-regclass. (The "0" for the low
|
||||
; part of %ins67 is supposed to be loaded by a VLDRS instruction in this test.)
|
||||
define void @test_qqqq_regsequence_subreg([6 x i64] %b) nounwind {
|
||||
define <8 x i16> @test_qqqq_regsequence_subreg([6 x i64] %b) nounwind {
|
||||
;CHECK: test_qqqq_regsequence_subreg
|
||||
;CHECK: vld3.16
|
||||
%tmp63 = extractvalue [6 x i64] %b, 5
|
||||
@ -500,8 +500,12 @@ define void @test_qqqq_regsequence_subreg([6 x i64] %b) nounwind {
|
||||
%ins67 = or i128 %tmp65, 0
|
||||
%tmp78 = bitcast i128 %ins67 to <8 x i16>
|
||||
%vld3_lane = tail call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* undef, <8 x i16> undef, <8 x i16> undef, <8 x i16> %tmp78, i32 1, i32 2)
|
||||
call void @llvm.trap()
|
||||
unreachable
|
||||
%tmp3 = extractvalue %struct.__neon_int16x8x3_t %vld3_lane, 0
|
||||
%tmp4 = extractvalue %struct.__neon_int16x8x3_t %vld3_lane, 1
|
||||
%tmp5 = extractvalue %struct.__neon_int16x8x3_t %vld3_lane, 2
|
||||
%tmp6 = add <8 x i16> %tmp3, %tmp4
|
||||
%tmp7 = add <8 x i16> %tmp5, %tmp6
|
||||
ret <8 x i16> %tmp7
|
||||
}
|
||||
|
||||
declare void @llvm.trap() nounwind
|
||||
|
Loading…
Reference in New Issue
Block a user