mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-11-04 05:17:07 +00:00 
			
		
		
		
	This is still a work in progress but most of the NEON instruction set is supported. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73919 91177308-0d34-0410-b5e6-96231b3b80d8
		
			
				
	
	
		
			68 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
			
		
		
	
	
			68 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
 | 
						|
; RUN: grep {vmull\\.s8} %t | count 1
 | 
						|
; RUN: grep {vmull\\.s16} %t | count 1
 | 
						|
; RUN: grep {vmull\\.s32} %t | count 1
 | 
						|
; RUN: grep {vmull\\.u8} %t | count 1
 | 
						|
; RUN: grep {vmull\\.u16} %t | count 1
 | 
						|
; RUN: grep {vmull\\.u32} %t | count 1
 | 
						|
; RUN: grep {vmull\\.p8} %t | count 1
 | 
						|
 | 
						|
define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 | 
						|
	%tmp1 = load <8 x i8>* %A
 | 
						|
	%tmp2 = load <8 x i8>* %B
 | 
						|
	%tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 | 
						|
	ret <8 x i16> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 | 
						|
	%tmp1 = load <4 x i16>* %A
 | 
						|
	%tmp2 = load <4 x i16>* %B
 | 
						|
	%tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
 | 
						|
	ret <4 x i32> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 | 
						|
	%tmp1 = load <2 x i32>* %A
 | 
						|
	%tmp2 = load <2 x i32>* %B
 | 
						|
	%tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
 | 
						|
	ret <2 x i64> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 | 
						|
	%tmp1 = load <8 x i8>* %A
 | 
						|
	%tmp2 = load <8 x i8>* %B
 | 
						|
	%tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 | 
						|
	ret <8 x i16> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 | 
						|
	%tmp1 = load <4 x i16>* %A
 | 
						|
	%tmp2 = load <4 x i16>* %B
 | 
						|
	%tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
 | 
						|
	ret <4 x i32> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 | 
						|
	%tmp1 = load <2 x i32>* %A
 | 
						|
	%tmp2 = load <2 x i32>* %B
 | 
						|
	%tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
 | 
						|
	ret <2 x i64> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 | 
						|
	%tmp1 = load <8 x i8>* %A
 | 
						|
	%tmp2 = load <8 x i8>* %B
 | 
						|
	%tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
 | 
						|
	ret <8 x i16> %tmp3
 | 
						|
}
 | 
						|
 | 
						|
declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 | 
						|
declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 | 
						|
declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
 | 
						|
 | 
						|
declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 | 
						|
declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 | 
						|
declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
 | 
						|
 | 
						|
declare <8 x i16>  @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 |