mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-10-31 08:16:47 +00:00 
			
		
		
		
	[ARM] Don't break alignment when combining base updates into load/stores.
r223862/r224203 tried to also combine base-updating load/stores.
There was a mistake there: the alignment was added as is as an operand to
the ARMISD::VLD/VST node.  However, the VLD/VST selection logic doesn't care
about less-than-standard alignment attributes.
For example, no matter the alignment of a v2i64 load (say 1), SelectVLD picks
VLD1q64 (because of the memory type).  But VLD1q64 ("vld1.64 {dXX, dYY}") is
8-aligned, per ARMARMv7a 3.2.1.
For the 1-aligned load, what we really want is VLD1q8.
This commit introduces bitcasts if necessary, and changes the vld/vst type to
one whose standard alignment matches the original load/store alignment.
Differential Revision: http://reviews.llvm.org/D6759
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224754 91177308-0d34-0410-b5e6-96231b3b80d8
			
			
This commit is contained in:
		| @@ -8976,13 +8976,42 @@ static SDValue CombineBaseUpdate(SDNode *N, | ||||
|       continue; | ||||
|     } | ||||
|  | ||||
|     EVT AlignedVecTy = VecTy; | ||||
|  | ||||
|     // If this is a less-than-standard-aligned load/store, change the type to | ||||
|     // match the standard alignment. | ||||
|     // The alignment is overlooked when selecting _UPD variants; and it's | ||||
|     // easier to introduce bitcasts here than fix that. | ||||
|     // There are 3 ways to get to this base-update combine: | ||||
|     // - intrinsics: they are assumed to be properly aligned (to the standard | ||||
|     //   alignment of the memory type), so we don't need to do anything. | ||||
|     // - ARMISD::VLDx nodes: they are only generated from the aforementioned | ||||
|     //   intrinsics, so, likewise, there's nothing to do. | ||||
|     // - generic load/store instructions: the alignment is specified as an | ||||
|     //   explicit operand, rather than implicitly as the standard alignment | ||||
|     //   of the memory type (like the intrisics).  We need to change the | ||||
|     //   memory type to match the explicit alignment.  That way, we don't | ||||
|     //   generate non-standard-aligned ARMISD::VLDx nodes. | ||||
|     if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N)) { | ||||
|       unsigned Alignment = LSN->getAlignment(); | ||||
|       if (Alignment == 0) | ||||
|         Alignment = 1; | ||||
|       if (Alignment < VecTy.getScalarSizeInBits() / 8) { | ||||
|         MVT EltTy = MVT::getIntegerVT(Alignment * 8); | ||||
|         assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); | ||||
|         assert(!isLaneOp && "Unexpected generic load/store lane."); | ||||
|         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); | ||||
|         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     // Create the new updating load/store node. | ||||
|     // First, create an SDVTList for the new updating node's results. | ||||
|     EVT Tys[6]; | ||||
|     unsigned NumResultVecs = (isLoad ? NumVecs : 0); | ||||
|     unsigned n; | ||||
|     for (n = 0; n < NumResultVecs; ++n) | ||||
|       Tys[n] = VecTy; | ||||
|       Tys[n] = AlignedVecTy; | ||||
|     Tys[n++] = MVT::i32; | ||||
|     Tys[n] = MVT::Other; | ||||
|     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); | ||||
| @@ -9000,9 +9029,17 @@ static SDValue CombineBaseUpdate(SDNode *N, | ||||
|       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) | ||||
|         Ops.push_back(N->getOperand(i)); | ||||
|     } | ||||
|  | ||||
|     // If this is a non-standard-aligned store, the penultimate operand is the | ||||
|     // stored value.  Bitcast it to the aligned type. | ||||
|     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { | ||||
|       SDValue &StVal = Ops[Ops.size()-2]; | ||||
|       StVal = DAG.getNode(ISD::BITCAST, SDLoc(N), AlignedVecTy, StVal); | ||||
|     } | ||||
|  | ||||
|     MemSDNode *MemInt = cast<MemSDNode>(N); | ||||
|     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, | ||||
|                                            Ops, MemInt->getMemoryVT(), | ||||
|                                            Ops, AlignedVecTy, | ||||
|                                            MemInt->getMemOperand()); | ||||
|  | ||||
|     // Update the uses. | ||||
| @@ -9010,6 +9047,14 @@ static SDValue CombineBaseUpdate(SDNode *N, | ||||
|     for (unsigned i = 0; i < NumResultVecs; ++i) { | ||||
|       NewResults.push_back(SDValue(UpdN.getNode(), i)); | ||||
|     } | ||||
|  | ||||
|     // If this is an non-standard-aligned load, the first result is the loaded | ||||
|     // value.  Bitcast it to the expected result type. | ||||
|     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { | ||||
|       SDValue &LdVal = NewResults[0]; | ||||
|       LdVal = DAG.getNode(ISD::BITCAST, SDLoc(N), VecTy, LdVal); | ||||
|     } | ||||
|  | ||||
|     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain | ||||
|     DCI.CombineTo(N, NewResults); | ||||
|     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); | ||||
|   | ||||
| @@ -46,8 +46,8 @@ entry: | ||||
| ; CHECK: movw [[REG2:r[0-9]+]], #16716 | ||||
| ; CHECK: movt [[REG2:r[0-9]+]], #72 | ||||
| ; CHECK: str [[REG2]], [r0, #32] | ||||
| ; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! | ||||
| ; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]! | ||||
| ; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! | ||||
| ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]! | ||||
| ; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1] | ||||
| ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] | ||||
|   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false) | ||||
| @@ -57,8 +57,8 @@ entry: | ||||
| define void @t3(i8* nocapture %C) nounwind { | ||||
| entry: | ||||
| ; CHECK-LABEL: t3: | ||||
| ; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! | ||||
| ; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]! | ||||
| ; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]! | ||||
| ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]! | ||||
| ; CHECK: vld1.8 {d{{[0-9]+}}}, [r1] | ||||
| ; CHECK: vst1.8 {d{{[0-9]+}}}, [r0] | ||||
|   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false) | ||||
| @@ -69,7 +69,7 @@ define void @t4(i8* nocapture %C) nounwind { | ||||
| entry: | ||||
| ; CHECK-LABEL: t4: | ||||
| ; CHECK: vld1.8 {[[REG3:d[0-9]+]], [[REG4:d[0-9]+]]}, [r1] | ||||
| ; CHECK: vst1.64 {[[REG3]], [[REG4]]}, [r0]! | ||||
| ; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]! | ||||
| ; CHECK: strh [[REG5:r[0-9]+]], [r0] | ||||
|   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false) | ||||
|   ret void | ||||
|   | ||||
| @@ -31,7 +31,7 @@ define <4 x i16> @load_v4i16(<4 x i16>** %ptr) { | ||||
|  | ||||
| define <4 x i16> @load_v4i16_update(<4 x i16>** %ptr) { | ||||
| ;CHECK-LABEL: load_v4i16_update: | ||||
| ;CHECK: vld1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x i16>** %ptr | ||||
| 	%lA = load <4 x i16>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x i16>* %A, i34 1 | ||||
| @@ -49,7 +49,7 @@ define <2 x i32> @load_v2i32(<2 x i32>** %ptr) { | ||||
|  | ||||
| define <2 x i32> @load_v2i32_update(<2 x i32>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i32_update: | ||||
| ;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i32>** %ptr | ||||
| 	%lA = load <2 x i32>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x i32>* %A, i32 1 | ||||
| @@ -67,7 +67,7 @@ define <2 x float> @load_v2f32(<2 x float>** %ptr) { | ||||
|  | ||||
| define <2 x float> @load_v2f32_update(<2 x float>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2f32_update: | ||||
| ;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x float>** %ptr | ||||
| 	%lA = load <2 x float>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x float>* %A, i32 1 | ||||
| @@ -85,7 +85,7 @@ define <1 x i64> @load_v1i64(<1 x i64>** %ptr) { | ||||
|  | ||||
| define <1 x i64> @load_v1i64_update(<1 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v1i64_update: | ||||
| ;CHECK: vld1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <1 x i64>** %ptr | ||||
| 	%lA = load <1 x i64>* %A, align 1 | ||||
| 	%inc = getelementptr <1 x i64>* %A, i31 1 | ||||
| @@ -121,7 +121,7 @@ define <8 x i16> @load_v8i16(<8 x i16>** %ptr) { | ||||
|  | ||||
| define <8 x i16> @load_v8i16_update(<8 x i16>** %ptr) { | ||||
| ;CHECK-LABEL: load_v8i16_update: | ||||
| ;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <8 x i16>** %ptr | ||||
| 	%lA = load <8 x i16>* %A, align 1 | ||||
| 	%inc = getelementptr <8 x i16>* %A, i38 1 | ||||
| @@ -139,7 +139,7 @@ define <4 x i32> @load_v4i32(<4 x i32>** %ptr) { | ||||
|  | ||||
| define <4 x i32> @load_v4i32_update(<4 x i32>** %ptr) { | ||||
| ;CHECK-LABEL: load_v4i32_update: | ||||
| ;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x i32>** %ptr | ||||
| 	%lA = load <4 x i32>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x i32>* %A, i34 1 | ||||
| @@ -157,7 +157,7 @@ define <4 x float> @load_v4f32(<4 x float>** %ptr) { | ||||
|  | ||||
| define <4 x float> @load_v4f32_update(<4 x float>** %ptr) { | ||||
| ;CHECK-LABEL: load_v4f32_update: | ||||
| ;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x float>** %ptr | ||||
| 	%lA = load <4 x float>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x float>* %A, i34 1 | ||||
| @@ -175,7 +175,7 @@ define <2 x i64> @load_v2i64(<2 x i64>** %ptr) { | ||||
|  | ||||
| define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i64_update: | ||||
| ;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	%lA = load <2 x i64>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
| @@ -183,6 +183,47 @@ define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) { | ||||
| 	ret <2 x i64> %lA | ||||
| } | ||||
|  | ||||
| ; Make sure we change the type to match alignment if necessary. | ||||
| define <2 x i64> @load_v2i64_update_aligned2(<2 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i64_update_aligned2: | ||||
| ;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	%lA = load <2 x i64>* %A, align 2 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret <2 x i64> %lA | ||||
| } | ||||
|  | ||||
| define <2 x i64> @load_v2i64_update_aligned4(<2 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i64_update_aligned4: | ||||
| ;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	%lA = load <2 x i64>* %A, align 4 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret <2 x i64> %lA | ||||
| } | ||||
|  | ||||
| define <2 x i64> @load_v2i64_update_aligned8(<2 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i64_update_aligned8: | ||||
| ;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:64]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	%lA = load <2 x i64>* %A, align 8 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret <2 x i64> %lA | ||||
| } | ||||
|  | ||||
| define <2 x i64> @load_v2i64_update_aligned16(<2 x i64>** %ptr) { | ||||
| ;CHECK-LABEL: load_v2i64_update_aligned16: | ||||
| ;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	%lA = load <2 x i64>* %A, align 16 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret <2 x i64> %lA | ||||
| } | ||||
|  | ||||
| ; Make sure we don't break smaller-than-dreg extloads. | ||||
| define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) { | ||||
| ;CHECK-LABEL: zextload_v8i8tov8i32: | ||||
| @@ -190,7 +231,7 @@ define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) { | ||||
| ;CHECK: vmovl.u8        {{q[0-9]+}}, {{d[0-9]+}} | ||||
| ;CHECK: vmovl.u16       {{q[0-9]+}}, {{d[0-9]+}} | ||||
| 	%A = load <4 x i8>** %ptr | ||||
| 	%lA = load <4 x i8>* %A, align 1 | ||||
| 	%lA = load <4 x i8>* %A, align 4 | ||||
|         %zlA = zext <4 x i8> %lA to <4 x i32> | ||||
| 	ret <4 x i32> %zlA | ||||
| } | ||||
|   | ||||
| @@ -31,7 +31,7 @@ define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) { | ||||
|  | ||||
| define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) { | ||||
| ;CHECK-LABEL: store_v4i16_update: | ||||
| ;CHECK: vst1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x i16>** %ptr | ||||
| 	store  <4 x i16> %val, <4 x i16>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x i16>* %A, i34 1 | ||||
| @@ -49,7 +49,7 @@ define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) { | ||||
|  | ||||
| define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) { | ||||
| ;CHECK-LABEL: store_v2i32_update: | ||||
| ;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i32>** %ptr | ||||
| 	store  <2 x i32> %val, <2 x i32>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x i32>* %A, i32 1 | ||||
| @@ -67,7 +67,7 @@ define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) { | ||||
|  | ||||
| define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) { | ||||
| ;CHECK-LABEL: store_v2f32_update: | ||||
| ;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x float>** %ptr | ||||
| 	store  <2 x float> %val, <2 x float>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x float>* %A, i32 1 | ||||
| @@ -85,7 +85,7 @@ define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) { | ||||
|  | ||||
| define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v1i64_update: | ||||
| ;CHECK: vst1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <1 x i64>** %ptr | ||||
| 	store  <1 x i64> %val, <1 x i64>* %A, align 1 | ||||
| 	%inc = getelementptr <1 x i64>* %A, i31 1 | ||||
| @@ -121,7 +121,7 @@ define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) { | ||||
|  | ||||
| define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) { | ||||
| ;CHECK-LABEL: store_v8i16_update: | ||||
| ;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <8 x i16>** %ptr | ||||
| 	store  <8 x i16> %val, <8 x i16>* %A, align 1 | ||||
| 	%inc = getelementptr <8 x i16>* %A, i38 1 | ||||
| @@ -139,7 +139,7 @@ define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) { | ||||
|  | ||||
| define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) { | ||||
| ;CHECK-LABEL: store_v4i32_update: | ||||
| ;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x i32>** %ptr | ||||
| 	store  <4 x i32> %val, <4 x i32>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x i32>* %A, i34 1 | ||||
| @@ -157,7 +157,7 @@ define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) { | ||||
|  | ||||
| define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) { | ||||
| ;CHECK-LABEL: store_v4f32_update: | ||||
| ;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <4 x float>** %ptr | ||||
| 	store  <4 x float> %val, <4 x float>* %A, align 1 | ||||
| 	%inc = getelementptr <4 x float>* %A, i34 1 | ||||
| @@ -175,7 +175,7 @@ define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
|  | ||||
| define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v2i64_update: | ||||
| ;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| ;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	store  <2 x i64> %val, <2 x i64>* %A, align 1 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
| @@ -183,6 +183,46 @@ define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
| define void @store_v2i64_update_aligned2(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v2i64_update_aligned2: | ||||
| ;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	store  <2 x i64> %val, <2 x i64>* %A, align 2 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
| define void @store_v2i64_update_aligned4(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v2i64_update_aligned4: | ||||
| ;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	store  <2 x i64> %val, <2 x i64>* %A, align 4 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
| define void @store_v2i64_update_aligned8(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v2i64_update_aligned8: | ||||
| ;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:64]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	store  <2 x i64> %val, <2 x i64>* %A, align 8 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
| define void @store_v2i64_update_aligned16(<2 x i64>** %ptr, <2 x i64> %val) { | ||||
| ;CHECK-LABEL: store_v2i64_update_aligned16: | ||||
| ;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]! | ||||
| 	%A = load <2 x i64>** %ptr | ||||
| 	store  <2 x i64> %val, <2 x i64>* %A, align 16 | ||||
| 	%inc = getelementptr <2 x i64>* %A, i32 1 | ||||
|         store <2 x i64>* %inc, <2 x i64>** %ptr | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
| define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) { | ||||
| ;CHECK-LABEL: truncstore_v4i32tov4i8: | ||||
| ;CHECK: ldr.w   r9, [sp] | ||||
| @@ -191,10 +231,10 @@ define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) { | ||||
| ;CHECK: vmovn.i32       [[VECLO:d[0-9]+]], {{q[0-9]+}} | ||||
| ;CHECK: vuzp.8  [[VECLO]], {{d[0-9]+}} | ||||
| ;CHECK: ldr     r[[PTRREG:[0-9]+]], [r0] | ||||
| ;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]] | ||||
| ;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32] | ||||
| 	%A = load <4 x i8>** %ptr | ||||
|         %trunc = trunc <4 x i32> %val to <4 x i8> | ||||
| 	store  <4 x i8> %trunc, <4 x i8>* %A, align 1 | ||||
| 	store  <4 x i8> %trunc, <4 x i8>* %A, align 4 | ||||
| 	ret void | ||||
| } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user