From 5cd95e1478ddb8f3f1efde56a1cd2db47b312d72 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Wed, 11 Jul 2012 13:27:05 +0000 Subject: [PATCH] When ext-loading and trunc-storing vectors to memory, on x86 32bit systems, allow loads/stores of 64bit values from xmm registers. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160044 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++++++++++++-- test/CodeGen/X86/2012-07-10-extload64.ll | 23 +++++++++++++++++++++-- test/CodeGen/X86/mmx-punpckhdq.ll | 2 +- test/CodeGen/X86/pointer-vector.ll | 3 +-- test/CodeGen/X86/widen_cast-1.ll | 4 ++-- test/CodeGen/X86/widen_cast-5.ll | 3 +-- 6 files changed, 38 insertions(+), 11 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0df18cb0152..4dccd40555d 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -14464,6 +14464,11 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, } } + // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. + if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && + (64 <= MemSz)) + SclrLoadTy = MVT::f64; + // Calculate the number of scalar loads that we need to perform // in order to load our vector from memory. unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); @@ -14615,13 +14620,18 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { MVT Tp = (MVT::SimpleValueType)tp; - if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz) + if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) StoreType = Tp; } + // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. + if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && + (64 <= NumElems * ToSz)) + StoreType = MVT::f64; + // Bitcast the original vector into a vector of store-size units EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), - StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); + StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); SmallVector Chains; diff --git a/test/CodeGen/X86/2012-07-10-extload64.ll b/test/CodeGen/X86/2012-07-10-extload64.ll index 3284f5e7e38..906b748fa42 100644 --- a/test/CodeGen/X86/2012-07-10-extload64.ll +++ b/test/CodeGen/X86/2012-07-10-extload64.ll @@ -3,11 +3,30 @@ ; CHECK: load_store define void @load_store(<4 x i16>* %in) { entry: +; CHECK: movsd %A27 = load <4 x i16>* %in, align 4 %A28 = add <4 x i16> %A27, %A27 +; CHECK: movlpd store <4 x i16> %A28, <4 x i16>* %in, align 4 ret void -; CHECK: movd -; CHECK: pinsrd ; CHECK: ret } + +; Make sure that we store a 64bit value, even on 32bit systems. +;CHECK: store_64 +define void @store_64(<2 x i32>* %ptr) { +BB: + store <2 x i32> zeroinitializer, <2 x i32>* %ptr + ret void +;CHECK: movlpd +;CHECK: ret +} + +;CHECK: load_64 +define <2 x i32> @load_64(<2 x i32>* %ptr) { +BB: + %t = load <2 x i32>* %ptr + ret <2 x i32> %t +;CHECK: movsd +;CHECK: ret +} diff --git a/test/CodeGen/X86/mmx-punpckhdq.ll b/test/CodeGen/X86/mmx-punpckhdq.ll index 689f7bf5956..206cb33494c 100644 --- a/test/CodeGen/X86/mmx-punpckhdq.ll +++ b/test/CodeGen/X86/mmx-punpckhdq.ll @@ -3,7 +3,7 @@ define void @bork(<1 x i64>* %x) { ; CHECK: bork -; CHECK: pextrd +; CHECK: movlpd entry: %tmp2 = load <1 x i64>* %x ; <<1 x i64>> [#uses=1] %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> ; <<2 x i32>> [#uses=1] diff --git a/test/CodeGen/X86/pointer-vector.ll b/test/CodeGen/X86/pointer-vector.ll index cc1df2fffcc..800fbedb4f9 100644 --- a/test/CodeGen/X86/pointer-vector.ll +++ b/test/CodeGen/X86/pointer-vector.ll @@ -105,8 +105,7 @@ define <2 x i32*> @BITCAST1(<2 x i8*>* %p) nounwind { entry: %G = load <2 x i8*>* %p ;CHECK: movl -;CHECK: movd -;CHECK: pinsrd +;CHECK: movsd %T = bitcast <2 x i8*> %G to <2 x i32*> ;CHECK: ret ret <2 x i32*> %T diff --git a/test/CodeGen/X86/widen_cast-1.ll b/test/CodeGen/X86/widen_cast-1.ll index 4330aae8ec8..d886f2c97e7 100644 --- a/test/CodeGen/X86/widen_cast-1.ll +++ b/test/CodeGen/X86/widen_cast-1.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=x86 -mattr=+sse42 < %s | FileCheck %s ; CHECK: paddd -; CHECK: pextrd -; CHECK: movd +; CHECK: movl +; CHECK: movlpd ; bitcast a v4i16 to v2i32 diff --git a/test/CodeGen/X86/widen_cast-5.ll b/test/CodeGen/X86/widen_cast-5.ll index 136578df1e8..9086d3a9cfd 100644 --- a/test/CodeGen/X86/widen_cast-5.ll +++ b/test/CodeGen/X86/widen_cast-5.ll @@ -1,9 +1,8 @@ ; RUN: llc < %s -march=x86 -mattr=+sse42 | FileCheck %s ; CHECK: movl -; CHECK: movd +; CHECK: movlpd ; bitcast a i64 to v2i32 - define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind { entry: %conv = bitcast i64 %src to <2 x i32>