From a92bac64cb75853c65a6146e015c2bf60c710869 Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Fri, 10 Dec 2010 19:37:42 +0000 Subject: [PATCH] Fix some invalid alignments for Neon vld-dup and vld/st-lane instructions. Alignments smaller than the total size of the memory being loaded or stored, unless the alignment is 8 bytes, are not allowed. Add tests for this, too. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@121506 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMISelDAGToDAG.cpp | 4 ++++ test/CodeGen/ARM/vlddup.ll | 23 ++++++++++++++++++++--- test/CodeGen/ARM/vldlane.ll | 13 ++++++++----- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index bfba11449f4..f96f68fa16a 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1622,6 +1622,8 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; if (Alignment > NumBytes) Alignment = NumBytes; + if (Alignment < 8 && Alignment < NumBytes) + Alignment = 0; // Alignment must be a power of two; make sure of that. Alignment = (Alignment & -Alignment); if (Alignment == 1) @@ -1720,6 +1722,8 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, unsigned NumVecs, unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; if (Alignment > NumBytes) Alignment = NumBytes; + if (Alignment < 8 && Alignment < NumBytes) + Alignment = 0; // Alignment must be a power of two; make sure of that. Alignment = (Alignment & -Alignment); if (Alignment == 1) diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll index 5822d3c4ff5..bb07ce2d239 100644 --- a/test/CodeGen/ARM/vlddup.ll +++ b/test/CodeGen/ARM/vlddup.ll @@ -41,6 +41,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nounwind { } %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } +%struct.__neon_int4x16x2_t = type { <4 x i16>, <4 x i16> } %struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> } define <8 x i8> @vld2dupi8(i8* %A) nounwind { @@ -56,6 +57,20 @@ define <8 x i8> @vld2dupi8(i8* %A) nounwind { ret <8 x i8> %tmp5 } +define <4 x i16> @vld2dupi16(i16* %A) nounwind { +;CHECK: vld2dupi16: +;Check that a power-of-two alignment smaller than the total size of the memory +;being loaded is ignored. +;CHECK: vld2.16 {d16[], d17[]}, [r0] + %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) + %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 + %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer + %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 + %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer + %tmp5 = add <4 x i16> %tmp2, %tmp4 + ret <4 x i16> %tmp5 +} + define <2 x i32> @vld2dupi32(i32* %A) nounwind { ;CHECK: vld2dupi32: ;Check the alignment value. Max for this instruction is 64 bits: @@ -70,6 +85,7 @@ define <2 x i32> @vld2dupi32(i32* %A) nounwind { } declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly +declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i16*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i32*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> } @@ -96,9 +112,10 @@ declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16*, <4 x i16> define <2 x i32> @vld4dupi32(i32* %A) nounwind { ;CHECK: vld4dupi32: -;Check the alignment value. Max for this instruction is 128 bits: -;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :128] - %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 32) +;Check the alignment value. An 8-byte alignment is allowed here even though +;it is smaller than the total size of the memory being loaded. +;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0, :64] + %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8) %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1 diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll index fc14e24d867..09b68a6ffbc 100644 --- a/test/CodeGen/ARM/vldlane.ll +++ b/test/CodeGen/ARM/vldlane.ll @@ -306,10 +306,12 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind { ;CHECK: vld4lanei16: -;CHECK: vld4.16 +;Check that a power-of-two alignment smaller than the total size of the memory +;being loaded is ignored. +;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [r0] %tmp0 = bitcast i16* %A to i8* %tmp1 = load <4 x i16>* %B - %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1) + %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 4) %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 2 @@ -322,11 +324,12 @@ define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind { define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind { ;CHECK: vld4lanei32: -;Check the alignment value. Max for this instruction is 128 bits: -;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :128] +;Check the alignment value. An 8-byte alignment is allowed here even though +;it is smaller than the total size of the memory being loaded. +;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :64] %tmp0 = bitcast i32* %A to i8* %tmp1 = load <2 x i32>* %B - %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16) + %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 8) %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 2