mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-10-30 16:17:05 +00:00 
			
		
		
		
	[PowerPC] VSX loads and stores support unaligned access
I've not yet updated PPCTTI because I'm not sure what the actual relative cost is compared to the aligned uses. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@204848 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
		| @@ -7960,7 +7960,6 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, | ||||
|     unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); | ||||
|     if (ISD::isNON_EXTLoad(N) && VT.isVector() && | ||||
|         TM.getSubtarget<PPCSubtarget>().hasAltivec() && | ||||
|         // FIXME: Update this for VSX! | ||||
|         (VT == MVT::v16i8 || VT == MVT::v8i16 || | ||||
|          VT == MVT::v4i32 || VT == MVT::v4f32) && | ||||
|         LD->getAlignment() < ABIAlignment) { | ||||
| @@ -8716,8 +8715,14 @@ bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, | ||||
|   if (!VT.isSimple()) | ||||
|     return false; | ||||
|  | ||||
|   if (VT.getSimpleVT().isVector()) | ||||
|   if (VT.getSimpleVT().isVector()) { | ||||
|     if (PPCSubTarget.hasVSX()) { | ||||
|       if (VT != MVT::v2f64 && VT != MVT::v2i64) | ||||
|         return false; | ||||
|     } else { | ||||
|       return false; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   if (VT == MVT::ppcf128) | ||||
|     return false; | ||||
|   | ||||
| @@ -244,6 +244,8 @@ unsigned PPCTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, | ||||
|   // Each load/store unit costs 1. | ||||
|   unsigned Cost = LT.first * 1; | ||||
|  | ||||
|   // FIXME: Update this for VSX loads/stores that support unaligned access. | ||||
|  | ||||
|   // PPC in general does not support unaligned loads and stores. They'll need | ||||
|   // to be decomposed based on the alignment factor. | ||||
|   unsigned SrcBytes = LT.second.getStoreSize(); | ||||
|   | ||||
| @@ -314,6 +314,24 @@ define void @test29(<2 x double>* %a, <2 x double> %b) { | ||||
| ; CHECK: blr | ||||
| } | ||||
|  | ||||
| define <2 x double> @test28u(<2 x double>* %a) { | ||||
|   %v = load <2 x double>* %a, align 8 | ||||
|   ret <2 x double> %v | ||||
|  | ||||
| ; CHECK-LABEL: @test28u | ||||
| ; CHECK: lxvd2x 34, 0, 3 | ||||
| ; CHECK: blr | ||||
| } | ||||
|  | ||||
| define void @test29u(<2 x double>* %a, <2 x double> %b) { | ||||
|   store <2 x double> %b, <2 x double>* %a, align 8 | ||||
|   ret void | ||||
|  | ||||
| ; CHECK-LABEL: @test29u | ||||
| ; CHECK: stxvd2x 34, 0, 3 | ||||
| ; CHECK: blr | ||||
| } | ||||
|  | ||||
| define <2 x i64> @test30(<2 x i64>* %a) { | ||||
|   %v = load <2 x i64>* %a, align 16 | ||||
|   ret <2 x i64> %v | ||||
|   | ||||
		Reference in New Issue
	
	Block a user