From 6ad251358ed5775cb94f6d4b15afa7bd745c1125 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Wed, 10 Aug 2011 17:45:17 +0000 Subject: [PATCH] The following X86 pattern is incorrect: def : Pat<(X86Movss VR128:$src1, (bc_v4i32 (v2i64 (load addr:$src2)))), (MOVLPSrm VR128:$src1, addr:$src2)>; This matches a MOVSS dag with a MOVLPS instruction. However, MOVSS will replace only the low 32 bits of the register, while the MOVLPS instruction will replace the low 64 bits. A testcase is added and illustrates the bug and also modified the one that was already present. Patch by Tanya Lattner. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137227 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 7 ------- test/CodeGen/X86/vec_shuffle-37.ll | 24 ++++++++++++++++++++++-- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index d8239bc9930..d2d8cd11fba 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -5933,13 +5933,6 @@ def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)), def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)), (MOVSSrr (v4f32 VR128:$src1), (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>; -// FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem -// is during lowering, where it's not possible to recognize the load fold cause -// it has two uses through a bitcast. One use disappears at isel time and the -// fold opportunity reappears. -def : Pat<(X86Movss VR128:$src1, - (bc_v4i32 (v2i64 (load addr:$src2)))), - (MOVLPSrm VR128:$src1, addr:$src2)>; // Shuffle with MOVSD def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))), diff --git a/test/CodeGen/X86/vec_shuffle-37.ll b/test/CodeGen/X86/vec_shuffle-37.ll index 2efdb14b404..950040a124a 100644 --- a/test/CodeGen/X86/vec_shuffle-37.ll +++ b/test/CodeGen/X86/vec_shuffle-37.ll @@ -5,8 +5,8 @@ define <4 x i32> @t00(<4 x i32>* %a0) nounwind ssp { entry: ; CHECK: movaps ({{%rdi|%rcx}}), %xmm0 -; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movlps (%rax), %xmm1 +; CHECK: movaps %xmm0, %xmm1 +; CHECK-NEXT: movss %xmm2, %xmm1 ; CHECK-NEXT: shufps $36, %xmm1, %xmm0 %0 = load <4 x i32>* undef, align 16 %1 = load <4 x i32>* %a0, align 16 @@ -23,3 +23,23 @@ entry: store <2 x double> %vecinit94, <2 x double>* undef ret void } + +define void @t02(<8 x i32>* %source, <2 x i32>* %dest) nounwind noinline { +entry: +; CHECK: movaps 32({{%rdi|%rcx}}), %xmm0 +; CHECK-NEXT: movaps 48({{%rdi|%rcx}}), %xmm1 +; CHECK-NEXT: movss %xmm1, %xmm0 +; CHECK-NEXT: movq %xmm0, ({{%rsi|%rdx}}) + %0 = bitcast <8 x i32>* %source to <4 x i32>* + %arrayidx = getelementptr inbounds <4 x i32>* %0, i64 3 + %tmp2 = load <4 x i32>* %arrayidx, align 16 + %tmp3 = extractelement <4 x i32> %tmp2, i32 0 + %tmp5 = insertelement <2 x i32> , i32 %tmp3, i32 0 + %arrayidx7 = getelementptr inbounds <8 x i32>* %source, i64 1 + %1 = bitcast <8 x i32>* %arrayidx7 to <4 x i32>* + %tmp8 = load <4 x i32>* %1, align 16 + %tmp9 = extractelement <4 x i32> %tmp8, i32 1 + %tmp11 = insertelement <2 x i32> %tmp5, i32 %tmp9, i32 1 + store <2 x i32> %tmp11, <2 x i32>* %dest, align 8 + ret void +}