Add codegen support for NEON vst2lane intrinsics with 128-bit vectors.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@83596 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bob Wilson 2009-10-08 23:38:24 +00:00
parent 4cf0189d5a
commit c5c6edb74f
4 changed files with 103 additions and 13 deletions

View File

@ -1911,18 +1911,56 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDValue MemAddr, MemUpdate, MemOpc;
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
return NULL;
switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
VT = N->getOperand(3).getValueType();
if (VT.is64BitVector()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled vst2lane type");
case MVT::v8i8: Opc = ARM::VST2LNd8; break;
case MVT::v4i16: Opc = ARM::VST2LNd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST2LNd32; break;
}
SDValue Chain = N->getOperand(0);
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(3), N->getOperand(4),
N->getOperand(5), Chain };
return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
}
// Quad registers are handled by extracting subregs and then doing
// the store.
EVT RegVT;
unsigned Opc2 = 0;
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled vst2lane type");
case MVT::v8i8: Opc = ARM::VST2LNd8; break;
case MVT::v4i16: Opc = ARM::VST2LNd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST2LNd32; break;
case MVT::v8i16:
Opc = ARM::VST2LNq16a;
Opc2 = ARM::VST2LNq16b;
RegVT = MVT::v4i16;
break;
case MVT::v4f32:
Opc = ARM::VST2LNq32a;
Opc2 = ARM::VST2LNq32b;
RegVT = MVT::v2f32;
break;
case MVT::v4i32:
Opc = ARM::VST2LNq32a;
Opc2 = ARM::VST2LNq32b;
RegVT = MVT::v2i32;
break;
}
SDValue Chain = N->getOperand(0);
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(3), N->getOperand(4),
N->getOperand(5), Chain };
return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
unsigned Lane = cast<ConstantSDNode>(N->getOperand(5))->getZExtValue();
unsigned NumElts = RegVT.getVectorNumElements();
int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(3));
SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(4));
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1,
getI32Imm(Lane % NumElts), Chain };
return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
dl, MVT::Other, Ops, 7);
}
case Intrinsic::arm_neon_vst3lane: {

View File

@ -449,16 +449,24 @@ def VST4q32b : VST4WB<0b1000, "vst4.32">;
// FIXME: Not yet implemented.
// VST2LN : Vector Store (single 2-element structure from one lane)
class VST2LND<bits<4> op11_8, string OpcodeStr>
class VST2LN<bits<4> op11_8, string OpcodeStr>
: NLdSt<1,0b00,op11_8,0b0000, (outs),
(ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
IIC_VST,
!strconcat(OpcodeStr, "\t\\{$src1[$lane],$src2[$lane]\\}, $addr"),
"", []>;
def VST2LNd8 : VST2LND<0b0000, "vst2.8">;
def VST2LNd16 : VST2LND<0b0100, "vst2.16">;
def VST2LNd32 : VST2LND<0b1000, "vst2.32">;
def VST2LNd8 : VST2LN<0b0000, "vst2.8">;
def VST2LNd16 : VST2LN<0b0100, "vst2.16">;
def VST2LNd32 : VST2LN<0b1000, "vst2.32">;
// vst2 to double-spaced even registers.
def VST2LNq16a: VST2LN<0b0100, "vst2.16">;
def VST2LNq32a: VST2LN<0b1000, "vst2.32">;
// vst2 to double-spaced odd registers.
def VST2LNq16b: VST2LN<0b0100, "vst2.16">;
def VST2LNq32b: VST2LN<0b1000, "vst2.32">;
// VST3LN : Vector Store (single 3-element structure from one lane)
class VST3LND<bits<4> op11_8, string OpcodeStr>

View File

@ -188,6 +188,22 @@ static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
NumRegs = 4;
return true;
case ARM::VST2LNq16a:
case ARM::VST2LNq32a:
FirstOpnd = 3;
NumRegs = 2;
Offset = 0;
Stride = 2;
return true;
case ARM::VST2LNq16b:
case ARM::VST2LNq32b:
FirstOpnd = 3;
NumRegs = 2;
Offset = 1;
Stride = 2;
return true;
case ARM::VST3d8:
case ARM::VST3d16:
case ARM::VST3d32:

View File

@ -32,11 +32,39 @@ define void @vst2lanef(float* %A, <2 x float>* %B) nounwind {
ret void
}
define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst2laneQi16:
;CHECK: vst2.16
%tmp1 = load <8 x i16>* %B
call void @llvm.arm.neon.vst2lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
ret void
}
define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst2laneQi32:
;CHECK: vst2.32
%tmp1 = load <4 x i32>* %B
call void @llvm.arm.neon.vst2lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
ret void
}
define void @vst2laneQf(float* %A, <4 x float>* %B) nounwind {
;CHECK: vst2laneQf:
;CHECK: vst2.32
%tmp1 = load <4 x float>* %B
call void @llvm.arm.neon.vst2lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, i32 3)
ret void
}
declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vst3lanei8:
;CHECK: vst3.8