diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index e8d3f58630e..3fd63f01b34 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -133,6 +133,28 @@ def VLD1q32 : VLD1Q<"vld1.32", v4i32, int_arm_neon_vldi>; def VLD1qf : VLD1Q<"vld1.32", v4f32, int_arm_neon_vldf>; def VLD1q64 : VLD1Q<"vld1.64", v2i64, int_arm_neon_vldi>; +// VST1 : Vector Store (multiple single elements) +class VST1D + : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src), + !strconcat(OpcodeStr, "\t${src:dregsingle}, $addr"), + [(IntOp addrmode6:$addr, (Ty DPR:$src), 1)]>; +class VST1Q + : NLdSt<(outs), (ins addrmode6:$addr, QPR:$src), + !strconcat(OpcodeStr, "\t${src:dregpair}, $addr"), + [(IntOp addrmode6:$addr, (Ty QPR:$src), 1)]>; + +def VST1d8 : VST1D<"vst1.8", v8i8, int_arm_neon_vsti>; +def VST1d16 : VST1D<"vst1.16", v4i16, int_arm_neon_vsti>; +def VST1d32 : VST1D<"vst1.32", v2i32, int_arm_neon_vsti>; +def VST1df : VST1D<"vst1.32", v2f32, int_arm_neon_vstf>; +def VST1d64 : VST1D<"vst1.64", v1i64, int_arm_neon_vsti>; + +def VST1q8 : VST1Q<"vst1.8", v16i8, int_arm_neon_vsti>; +def VST1q16 : VST1Q<"vst1.16", v8i16, int_arm_neon_vsti>; +def VST1q32 : VST1Q<"vst1.32", v4i32, int_arm_neon_vsti>; +def VST1qf : VST1Q<"vst1.32", v4f32, int_arm_neon_vstf>; +def VST1q64 : VST1Q<"vst1.64", v2i64, int_arm_neon_vsti>; + //===----------------------------------------------------------------------===// // NEON pattern fragments diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll new file mode 100644 index 00000000000..70a05fa8036 --- /dev/null +++ b/test/CodeGen/ARM/vst1.ll @@ -0,0 +1,77 @@ +; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t +; RUN: grep {vst1\\.8} %t | count 2 +; RUN: grep {vst1\\.16} %t | count 2 +; RUN: grep {vst1\\.32} %t | count 4 +; RUN: grep {vst1\\.64} %t | count 2 + +define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %B + call void @llvm.arm.neon.vsti.v8i8(i8* %A, <8 x i8> %tmp1, i32 1) + ret void +} + +define void @vst1i16(i16* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %B + call void @llvm.arm.neon.vsti.v4i16(i16* %A, <4 x i16> %tmp1, i32 1) + ret void +} + +define void @vst1i32(i32* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %B + call void @llvm.arm.neon.vsti.v2i32(i32* %A, <2 x i32> %tmp1, i32 1) + ret void +} + +define void @vst1f(float* %A, <2 x float>* %B) nounwind { + %tmp1 = load <2 x float>* %B + call void @llvm.arm.neon.vstf.v2f32(float* %A, <2 x float> %tmp1, i32 1) + ret void +} + +define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind { + %tmp1 = load <1 x i64>* %B + call void @llvm.arm.neon.vsti.v1i64(i64* %A, <1 x i64> %tmp1, i32 1) + ret void +} + +define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %B + call void @llvm.arm.neon.vsti.v16i8(i8* %A, <16 x i8> %tmp1, i32 1) + ret void +} + +define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vsti.v8i16(i16* %A, <8 x i16> %tmp1, i32 1) + ret void +} + +define void @vst1Qi32(i32* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vsti.v4i32(i32* %A, <4 x i32> %tmp1, i32 1) + ret void +} + +define void @vst1Qf(float* %A, <4 x float>* %B) nounwind { + %tmp1 = load <4 x float>* %B + call void @llvm.arm.neon.vstf.v4f32(float* %A, <4 x float> %tmp1, i32 1) + ret void +} + +define void @vst1Qi64(i64* %A, <2 x i64>* %B) nounwind { + %tmp1 = load <2 x i64>* %B + call void @llvm.arm.neon.vsti.v2i64(i64* %A, <2 x i64> %tmp1, i32 1) + ret void +} + +declare void @llvm.arm.neon.vsti.v8i8(i8*, <8 x i8>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v4i16(i16*, <4 x i16>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v2i32(i32*, <2 x i32>, i32) nounwind readnone +declare void @llvm.arm.neon.vstf.v2f32(float*, <2 x float>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v1i64(i64*, <1 x i64>, i32) nounwind readnone + +declare void @llvm.arm.neon.vsti.v16i8(i8*, <16 x i8>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v8i16(i16*, <8 x i16>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v4i32(i32*, <4 x i32>, i32) nounwind readnone +declare void @llvm.arm.neon.vstf.v4f32(float*, <4 x float>, i32) nounwind readnone +declare void @llvm.arm.neon.vsti.v2i64(i64*, <2 x i64>, i32) nounwind readnone