llvm-6502/test/CodeGen/X86/mmx-arg-passing2.ll
Dale Johannesen 0488fb649a Massive rewrite of MMX:
The x86_mmx type is used for MMX intrinsics, parameters and
return values where these use MMX registers, and is also
supported in load, store, and bitcast.

Only the above operations generate MMX instructions, and optimizations
do not operate on or produce MMX intrinsics. 

MMX-sized vectors <2 x i32> etc. are lowered to XMM or split into
smaller pieces.  Optimizations may occur on these forms and the
result casted back to x86_mmx, provided the result feeds into a
previous existing x86_mmx operation.

The point of all this is prevent optimizations from introducing
MMX operations, which is unsafe due to the EMMS problem.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115243 91177308-0d34-0410-b5e6-96231b3b80d8
2010-09-30 23:57:10 +00:00

29 lines
889 B
LLVM

; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2
; Since the add is not an MMX add, we don't have a movq2dq any more.
@g_v8qi = external global <8 x i8>
define void @t1() nounwind {
%tmp3 = load <8 x i8>* @g_v8qi, align 8
%tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void
}
define void @t2(x86_mmx %v1, x86_mmx %v2) nounwind {
%v1a = bitcast x86_mmx %v1 to <8 x i8>
%v2b = bitcast x86_mmx %v2 to <8 x i8>
%tmp3 = add <8 x i8> %v1a, %v2b
%tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void
}
define void @t3() nounwind {
call void @pass_v1di( <1 x i64> zeroinitializer )
ret void
}
declare i32 @pass_v8qi(...)
declare void @pass_v1di(<1 x i64>)