mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-11-04 05:17:07 +00:00 
			
		
		
		
	Summary: When inserting an element that's coming from a vector load or a broadcast of a vector (or scalar) load, combine the load into the insertps instruction. Added PerformINSERTPSCombine for the case where we need to fix the load (load of a vector + insertps with a non-zero CountS). Added patterns for the broadcasts. Also added tests for SSE4.1, AVX, and AVX2. Reviewers: delena, nadav, craig.topper Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D3581 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209156 91177308-0d34-0410-b5e6-96231b3b80d8
		
			
				
	
	
		
			40 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
			
		
		
	
	
			40 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
; RUN: llc < %s -march=x86-64 -mcpu=corei7 -mattr=+sse4.1 | FileCheck %s
 | 
						|
 | 
						|
; rdar://12721174
 | 
						|
; We should not fold movss into pshufd since pshufd expects m128 while movss
 | 
						|
; loads from m32.
 | 
						|
define void @sample_test(<4 x float>* %source, <2 x float>* %dest) nounwind {
 | 
						|
; CHECK: sample_test
 | 
						|
; CHECK-NOT: movaps
 | 
						|
; CHECK: insertps
 | 
						|
entry:
 | 
						|
  %source.addr = alloca <4 x float>*, align 8
 | 
						|
  %dest.addr = alloca <2 x float>*, align 8
 | 
						|
  %tmp = alloca <2 x float>, align 8
 | 
						|
  store <4 x float>* %source, <4 x float>** %source.addr, align 8
 | 
						|
  store <2 x float>* %dest, <2 x float>** %dest.addr, align 8
 | 
						|
  store <2 x float> zeroinitializer, <2 x float>* %tmp, align 8
 | 
						|
  %0 = load <4 x float>** %source.addr, align 8
 | 
						|
  %arrayidx = getelementptr inbounds <4 x float>* %0, i64 0
 | 
						|
  %1 = load <4 x float>* %arrayidx, align 16
 | 
						|
  %2 = extractelement <4 x float> %1, i32 0
 | 
						|
  %3 = load <2 x float>* %tmp, align 8
 | 
						|
  %4 = insertelement <2 x float> %3, float %2, i32 1
 | 
						|
  store <2 x float> %4, <2 x float>* %tmp, align 8
 | 
						|
  %5 = load <2 x float>* %tmp, align 8
 | 
						|
  %6 = load <2 x float>** %dest.addr, align 8
 | 
						|
  %arrayidx1 = getelementptr inbounds <2 x float>* %6, i64 0
 | 
						|
  store <2 x float> %5, <2 x float>* %arrayidx1, align 8
 | 
						|
  %7 = load <2 x float>** %dest.addr, align 8
 | 
						|
  %arrayidx2 = getelementptr inbounds <2 x float>* %7, i64 0
 | 
						|
  %8 = load <2 x float>* %arrayidx2, align 8
 | 
						|
  %vecext = extractelement <2 x float> %8, i32 0
 | 
						|
  %9 = load <2 x float>** %dest.addr, align 8
 | 
						|
  %arrayidx3 = getelementptr inbounds <2 x float>* %9, i64 0
 | 
						|
  %10 = load <2 x float>* %arrayidx3, align 8
 | 
						|
  %vecext4 = extractelement <2 x float> %10, i32 1
 | 
						|
  call void @ext(float %vecext, float %vecext4)
 | 
						|
  ret void
 | 
						|
}
 | 
						|
declare void @ext(float, float)
 |