Massive rewrite of MMX:

The x86_mmx type is used for MMX intrinsics, parameters and
return values where these use MMX registers, and is also
supported in load, store, and bitcast.

Only the above operations generate MMX instructions, and optimizations
do not operate on or produce MMX intrinsics. 

MMX-sized vectors <2 x i32> etc. are lowered to XMM or split into
smaller pieces.  Optimizations may occur on these forms and the
result casted back to x86_mmx, provided the result feeds into a
previous existing x86_mmx operation.

The point of all this is prevent optimizations from introducing
MMX operations, which is unsafe due to the EMMS problem.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115243 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dale Johannesen 2010-09-30 23:57:10 +00:00
parent a7e3b56477
commit 0488fb649a
40 changed files with 1433 additions and 1192 deletions

View File

@ -130,12 +130,12 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i64_ty], [IntrNoMem]>; llvm_i64_ty], [IntrNoMem]>;
def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">, def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">, def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">, def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
} }
// SIMD load ops // SIMD load ops
@ -445,11 +445,11 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_v4f32_ty], [IntrNoMem]>; llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">, def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">, def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">, def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2i32_ty], [IntrNoMem]>; Intrinsic<[llvm_v2f64_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
} }
// SIMD load ops // SIMD load ops
@ -563,50 +563,50 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Horizontal arithmetic ops // Horizontal arithmetic ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw">, def int_x86_ssse3_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_w_128 : GCCBuiltin<"__builtin_ia32_phaddw128">, def int_x86_ssse3_phadd_w_128 : GCCBuiltin<"__builtin_ia32_phaddw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>; llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd">, def int_x86_ssse3_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_d_128 : GCCBuiltin<"__builtin_ia32_phaddd128">, def int_x86_ssse3_phadd_d_128 : GCCBuiltin<"__builtin_ia32_phaddd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>; llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw">, def int_x86_ssse3_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">, def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>; llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">, def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_w_128 : GCCBuiltin<"__builtin_ia32_phsubw128">, def int_x86_ssse3_phsub_w_128 : GCCBuiltin<"__builtin_ia32_phsubw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>; llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd">, def int_x86_ssse3_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_d_128 : GCCBuiltin<"__builtin_ia32_phsubd128">, def int_x86_ssse3_phsub_d_128 : GCCBuiltin<"__builtin_ia32_phsubd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>; llvm_v4i32_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw">, def int_x86_ssse3_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_sw_128 : GCCBuiltin<"__builtin_ia32_phsubsw128">, def int_x86_ssse3_phsub_sw_128 : GCCBuiltin<"__builtin_ia32_phsubsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>; llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw">, def int_x86_ssse3_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">, def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>; llvm_v8i16_ty], [IntrNoMem]>;
@ -615,8 +615,8 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Packed multiply high with round and scale // Packed multiply high with round and scale
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw">, def int_x86_ssse3_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_ssse3_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128">, def int_x86_ssse3_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem, Commutative]>; llvm_v8i16_ty], [IntrNoMem, Commutative]>;
@ -625,35 +625,35 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Shuffle ops // Shuffle ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb">, def int_x86_ssse3_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">, def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
llvm_v16i8_ty], [IntrNoMem]>; llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_ssse3_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">, def int_x86_ssse3_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
[IntrNoMem]>; [IntrNoMem]>;
} }
// Sign ops // Sign ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_psign_b : GCCBuiltin<"__builtin_ia32_psignb">, def int_x86_ssse3_psign_b : GCCBuiltin<"__builtin_ia32_psignb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_b_128 : GCCBuiltin<"__builtin_ia32_psignb128">, def int_x86_ssse3_psign_b_128 : GCCBuiltin<"__builtin_ia32_psignb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
llvm_v16i8_ty], [IntrNoMem]>; llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_w : GCCBuiltin<"__builtin_ia32_psignw">, def int_x86_ssse3_psign_w : GCCBuiltin<"__builtin_ia32_psignw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_w_128 : GCCBuiltin<"__builtin_ia32_psignw128">, def int_x86_ssse3_psign_w_128 : GCCBuiltin<"__builtin_ia32_psignw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>; llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_d : GCCBuiltin<"__builtin_ia32_psignd">, def int_x86_ssse3_psign_d : GCCBuiltin<"__builtin_ia32_psignd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_psign_d_128 : GCCBuiltin<"__builtin_ia32_psignd128">, def int_x86_ssse3_psign_d_128 : GCCBuiltin<"__builtin_ia32_psignd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty], [IntrNoMem]>; llvm_v4i32_ty], [IntrNoMem]>;
@ -662,17 +662,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Absolute value ops // Absolute value ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb">, def int_x86_ssse3_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_b_128 : GCCBuiltin<"__builtin_ia32_pabsb128">, def int_x86_ssse3_pabs_b_128 : GCCBuiltin<"__builtin_ia32_pabsb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>; Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw">, def int_x86_ssse3_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_w_128 : GCCBuiltin<"__builtin_ia32_pabsw128">, def int_x86_ssse3_pabs_w_128 : GCCBuiltin<"__builtin_ia32_pabsw128">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>; Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd">, def int_x86_ssse3_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pabs_d_128 : GCCBuiltin<"__builtin_ia32_pabsd128">, def int_x86_ssse3_pabs_d_128 : GCCBuiltin<"__builtin_ia32_pabsd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>; Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
} }
@ -1328,281 +1328,257 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Addition // Addition
def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">, def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">, def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">, def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">, def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">, def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">, def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">, def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">, def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Subtraction // Subtraction
def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">, def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">, def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">, def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">, def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">, def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">, def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">, def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">, def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
// Multiplication // Multiplication
def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">, def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">, def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">, def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">, def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">, def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Bitwise operations // Bitwise operations
def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">, def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">, def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">, def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">, def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
// Averages // Averages
def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">, def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">, def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Maximum // Maximum
def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">, def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">, def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Minimum // Minimum
def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">, def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">, def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
// Packed sum of absolute differences // Packed sum of absolute differences
def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">, def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>; llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
} }
// Integer shift ops. // Integer shift ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Shift left logical // Shift left logical
def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">, def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">, def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">, def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">, def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">, def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">, def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">, def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">, def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">, def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">, def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">, def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">, def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">, def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">, def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">, def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">, def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty], [IntrNoMem]>;
} }
// Pack ops. // Pack ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">, def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">, def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">, def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
} }
// Unpacking ops. // Unpacking ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">, def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">, def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">, def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">, def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">, def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">, def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
[IntrNoMem]>; [IntrNoMem]>;
} }
// Integer comparison ops // Integer comparison ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">, def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">, def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">, def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">, def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v8i8_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">, def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v4i16_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">, def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">,
Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v2i32_ty], [IntrNoMem]>; llvm_x86mmx_ty], [IntrNoMem]>;
} }
// Misc. // Misc.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">, def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">,
Intrinsic<[], [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; Intrinsic<[], [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_ptr_ty], []>;
def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">, def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">,
Intrinsic<[llvm_i32_ty], [llvm_v8i8_ty], [IntrNoMem]>; Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">, def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">,
Intrinsic<[], [llvm_ptr_ty, llvm_v1i64_ty], []>; Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">, def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>; llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_mmx_pextr_w : def int_x86_mmx_pextr_w : GCCBuiltin<"__builtin_ia32_vec_ext_v4hi">,
Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty, llvm_i32_ty], Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
[IntrNoMem]>; [IntrNoMem]>;
def int_x86_mmx_pinsr_w : def int_x86_mmx_pinsr_w : GCCBuiltin<"__builtin_ia32_vec_set_v4hi">,
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_cvtsi32_si64 :
Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_x86_mmx_cvtsi64_si32 :
Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty], [IntrNoMem]>;
def int_x86_mmx_vec_init_b : GCCBuiltin<"__builtin_ia32_vec_init_v8qi">,
Intrinsic<[llvm_v8i8_ty],
[llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty,
llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_mmx_vec_init_w : GCCBuiltin<"__builtin_ia32_vec_init_v4hi">,
Intrinsic<[llvm_v4i16_ty],
[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_mmx_vec_init_d : GCCBuiltin<"__builtin_ia32_vec_init_v2si">,
Intrinsic<[llvm_v2i32_ty],
[llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_mmx_vec_ext_d : GCCBuiltin<"__builtin_ia32_vec_ext_v2si">,
Intrinsic<[llvm_v2i32_ty],
[llvm_v2i32_ty, llvm_i32_ty],
[IntrNoMem]>;
} }

View File

@ -4322,6 +4322,66 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0; return 0;
} }
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_pslli_q:
case Intrinsic::x86_mmx_psrli_w:
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d: {
SDValue ShAmt = getValue(I.getArgOperand(1));
if (isa<ConstantSDNode>(ShAmt)) {
visitTargetIntrinsic(I, Intrinsic);
return 0;
}
unsigned NewIntrinsic = 0;
EVT ShAmtVT = MVT::v2i32;
switch (Intrinsic) {
case Intrinsic::x86_mmx_pslli_w:
NewIntrinsic = Intrinsic::x86_mmx_psll_w;
break;
case Intrinsic::x86_mmx_pslli_d:
NewIntrinsic = Intrinsic::x86_mmx_psll_d;
break;
case Intrinsic::x86_mmx_pslli_q:
NewIntrinsic = Intrinsic::x86_mmx_psll_q;
break;
case Intrinsic::x86_mmx_psrli_w:
NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
break;
case Intrinsic::x86_mmx_psrli_d:
NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
break;
case Intrinsic::x86_mmx_psrli_q:
NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
break;
case Intrinsic::x86_mmx_psrai_w:
NewIntrinsic = Intrinsic::x86_mmx_psra_w;
break;
case Intrinsic::x86_mmx_psrai_d:
NewIntrinsic = Intrinsic::x86_mmx_psra_d;
break;
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
}
// The vector shift intrinsics with scalars uses 32b shift amounts but
// the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
// to be zero.
// We must do this early because v2i32 is not a legal type.
DebugLoc dl = getCurDebugLoc();
SDValue ShOps[2];
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
EVT DestVT = TLI.getValueType(I.getType());
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(NewIntrinsic, MVT::i32),
getValue(I.getArgOperand(0)), ShAmt);
setValue(&I, Res);
return 0;
}
case Intrinsic::convertff: case Intrinsic::convertff:
case Intrinsic::convertfsi: case Intrinsic::convertfsi:
case Intrinsic::convertfui: case Intrinsic::convertfui:

View File

@ -48,7 +48,7 @@ def RetCC_X86Common : CallingConv<[
// MMX vector types are always returned in MM0. If the target doesn't have // MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types. // MM0, it doesn't support these vector types.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>, CCIfType<[x86mmx, v1i64], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE). // Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>> CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@ -95,14 +95,14 @@ def RetCC_X86_64_C : CallingConv<[
// returned in RAX. This disagrees with ABI documentation but is bug // returned in RAX. This disagrees with ABI documentation but is bug
// compatible with gcc. // compatible with gcc.
CCIfType<[v1i64], CCAssignToReg<[RAX]>>, CCIfType<[v1i64], CCAssignToReg<[RAX]>>,
CCIfType<[v8i8, v4i16, v2i32], CCAssignToReg<[XMM0, XMM1]>>, CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common> CCDelegateTo<RetCC_X86Common>
]>; ]>;
// X86-Win64 C return-value convention. // X86-Win64 C return-value convention.
def RetCC_X86_Win64_C : CallingConv<[ def RetCC_X86_Win64_C : CallingConv<[
// The X86-Win64 calling convention always returns __m64 values in RAX. // The X86-Win64 calling convention always returns __m64 values in RAX.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCBitConvertToType<i64>>, CCIfType<[x86mmx, v1i64], CCBitConvertToType<i64>>,
// And FP in XMM0 only. // And FP in XMM0 only.
CCIfType<[f32], CCAssignToReg<[XMM0]>>, CCIfType<[f32], CCAssignToReg<[XMM0]>>,
@ -161,7 +161,7 @@ def CC_X86_64_C : CallingConv<[
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM // The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin. // registers on Darwin.
CCIfType<[v8i8, v4i16, v2i32], CCIfType<[x86mmx],
CCIfSubtarget<"isTargetDarwin()", CCIfSubtarget<"isTargetDarwin()",
CCIfSubtarget<"hasSSE2()", CCIfSubtarget<"hasSSE2()",
CCPromoteToType<v2i64>>>>, CCPromoteToType<v2i64>>>>,
@ -192,7 +192,7 @@ def CC_X86_64_C : CallingConv<[
CCAssignToStack<32, 32>>, CCAssignToStack<32, 32>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned. // __m64 vectors get 8-byte stack slots that are 8-byte aligned.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>> CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>>
]>; ]>;
// Calling convention used on Win64 // Calling convention used on Win64
@ -210,8 +210,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
// The first 4 MMX vector arguments are passed in GPRs. // The first 4 MMX vector arguments are passed in GPRs.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCIfType<[x86mmx, v1i64], CCBitConvertToType<i64>>,
CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers. // The first 4 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ], CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
@ -233,7 +232,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[f80], CCAssignToStack<0, 0>>, CCIfType<[f80], CCAssignToStack<0, 0>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned. // __m64 vectors get 8-byte stack slots that are 8-byte aligned.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>> CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 8>>
]>; ]>;
def CC_X86_64_GHC : CallingConv<[ def CC_X86_64_GHC : CallingConv<[
@ -269,7 +268,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx // The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
// registers if the call is not a vararg call. // registers if the call is not a vararg call.
CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32], CCIfNotVarArg<CCIfType<[x86mmx],
CCAssignToReg<[MM0, MM1, MM2]>>>, CCAssignToReg<[MM0, MM1, MM2]>>>,
// Integer/Float values get stored in stack slots that are 4 bytes in // Integer/Float values get stored in stack slots that are 4 bytes in
@ -300,7 +299,7 @@ def CC_X86_32_Common : CallingConv<[
// __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
// passed in the parameter area. // passed in the parameter area.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>; CCIfType<[x86mmx,v1i64], CCAssignToStack<8, 4>>]>;
def CC_X86_32_C : CallingConv<[ def CC_X86_32_C : CallingConv<[
// Promote i8/i16 arguments to i32. // Promote i8/i16 arguments to i32.

View File

@ -218,10 +218,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory; i64->MMX is Legal. // Without SSE, i64->f64 goes through memory.
if (Subtarget->hasMMX() && !DisableMMX)
setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Custom);
else
setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand);
} }
} }
@ -615,91 +612,41 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// with -msoft-float, disable use of MMX as well. // with -msoft-float, disable use of MMX as well.
if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) { if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass, false); addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass, false);
// No operations on x86mmx supported, everything uses intrinsics.
// FIXME: Remove the rest of this stuff.
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
setOperationAction(ISD::ADD, MVT::v2i32, Legal);
setOperationAction(ISD::ADD, MVT::v1i64, Legal);
setOperationAction(ISD::SUB, MVT::v8i8, Legal);
setOperationAction(ISD::SUB, MVT::v4i16, Legal);
setOperationAction(ISD::SUB, MVT::v2i32, Legal);
setOperationAction(ISD::SUB, MVT::v1i64, Legal);
setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
setOperationAction(ISD::MUL, MVT::v4i16, Legal);
setOperationAction(ISD::AND, MVT::v8i8, Promote);
AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v4i16, Promote);
AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v2i32, Promote);
AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::AND, MVT::v1i64, Legal);
setOperationAction(ISD::OR, MVT::v8i8, Promote);
AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v4i16, Promote);
AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v2i32, Promote);
AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::OR, MVT::v1i64, Legal);
setOperationAction(ISD::XOR, MVT::v8i8, Promote);
AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v4i16, Promote);
AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v2i32, Promote);
AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::XOR, MVT::v1i64, Legal);
setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
setOperationAction(ISD::SELECT, MVT::v8i8, Promote);
setOperationAction(ISD::SELECT, MVT::v4i16, Promote);
setOperationAction(ISD::SELECT, MVT::v2i32, Promote);
setOperationAction(ISD::SELECT, MVT::v1i64, Custom);
setOperationAction(ISD::VSETCC, MVT::v8i8, Custom);
setOperationAction(ISD::VSETCC, MVT::v4i16, Custom);
setOperationAction(ISD::VSETCC, MVT::v2i32, Custom);
if (!X86ScalarSSEf64 && Subtarget->is64Bit()) {
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
}
} }
// MMX-sized vectors (other than x86mmx) are expected to be expanded
// into smaller operations.
setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
setOperationAction(ISD::AND, MVT::v8i8, Expand);
setOperationAction(ISD::AND, MVT::v4i16, Expand);
setOperationAction(ISD::AND, MVT::v2i32, Expand);
setOperationAction(ISD::AND, MVT::v1i64, Expand);
setOperationAction(ISD::OR, MVT::v8i8, Expand);
setOperationAction(ISD::OR, MVT::v4i16, Expand);
setOperationAction(ISD::OR, MVT::v2i32, Expand);
setOperationAction(ISD::OR, MVT::v1i64, Expand);
setOperationAction(ISD::XOR, MVT::v8i8, Expand);
setOperationAction(ISD::XOR, MVT::v4i16, Expand);
setOperationAction(ISD::XOR, MVT::v2i32, Expand);
setOperationAction(ISD::XOR, MVT::v1i64, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Expand);
if (!UseSoftFloat && Subtarget->hasSSE1()) { if (!UseSoftFloat && Subtarget->hasSSE1()) {
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
@ -821,10 +768,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
if (!DisableMMX && Subtarget->hasMMX()) {
setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
}
} }
if (Subtarget->hasSSE41()) { if (Subtarget->hasSSE41()) {
@ -1210,8 +1153,7 @@ X86TargetLowering::findRepresentativeClass(EVT VT) const{
RRC = (Subtarget->is64Bit() RRC = (Subtarget->is64Bit()
? X86::GR64RegisterClass : X86::GR32RegisterClass); ? X86::GR64RegisterClass : X86::GR32RegisterClass);
break; break;
case MVT::v8i8: case MVT::v4i16: case MVT::x86mmx:
case MVT::v2i32: case MVT::v1i64:
RRC = X86::VR64RegisterClass; RRC = X86::VR64RegisterClass;
break; break;
case MVT::f32: case MVT::f64: case MVT::f32: case MVT::f64:
@ -1345,12 +1287,11 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX. // which is returned in RAX / RDX.
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
if (ValVT.isVector() && ValVT.getSizeInBits() == 64) { if (ValVT == MVT::x86mmx) {
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
ValToCopy); ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated // If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal. // register is legal.
if (!Subtarget->hasSSE2()) if (!Subtarget->hasSSE2())
@ -1650,7 +1591,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
RC = X86::VR256RegisterClass; RC = X86::VR256RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass; RC = X86::VR128RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 64) else if (RegVT == MVT::x86mmx)
RC = X86::VR64RegisterClass; RC = X86::VR64RegisterClass;
else else
llvm_unreachable("Unknown argument type!"); llvm_unreachable("Unknown argument type!");
@ -1673,9 +1614,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
if (VA.isExtInLoc()) { if (VA.isExtInLoc()) {
// Handle MMX values passed in XMM regs. // Handle MMX values passed in XMM regs.
if (RegVT.isVector()) { if (RegVT.isVector()) {
ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(),
ArgValue, DAG.getConstant(0, MVT::i64)); ArgValue);
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
} else } else
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
} }
@ -2876,7 +2816,7 @@ static bool isUndefOrEqual(int Val, int CmpVal) {
/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference /// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
/// the second operand. /// the second operand.
static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
if (VT == MVT::v4f32 || VT == MVT::v4i32 || VT == MVT::v4i16) if (VT == MVT::v4f32 || VT == MVT::v4i32 )
return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
if (VT == MVT::v2f64 || VT == MVT::v2i64) if (VT == MVT::v2f64 || VT == MVT::v2i64)
return (Mask[0] < 2 && Mask[1] < 2); return (Mask[0] < 2 && Mask[1] < 2);
@ -3548,13 +3488,10 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
DebugLoc dl) { DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type"); assert(VT.isVector() && "Expected a vector type");
// Always build zero vectors as <4 x i32> or <2 x i32> bitcasted // Always build SSE zero vectors as <4 x i32> bitcasted
// to their dest type. This ensures they get CSE'd. // to their dest type. This ensures they get CSE'd.
SDValue Vec; SDValue Vec;
if (VT.getSizeInBits() == 64) { // MMX if (VT.getSizeInBits() == 128) { // SSE
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
} else if (VT.getSizeInBits() == 128) {
if (HasSSE2) { // SSE2 if (HasSSE2) { // SSE2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32); SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
@ -3582,9 +3519,6 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
// type. This ensures they get CSE'd. // type. This ensures they get CSE'd.
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec; SDValue Vec;
if (VT.getSizeInBits() == 64) // MMX
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
else // SSE
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
} }
@ -4025,8 +3959,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
unsigned NumBits, SelectionDAG &DAG, unsigned NumBits, SelectionDAG &DAG,
const TargetLowering &TLI, DebugLoc dl) { const TargetLowering &TLI, DebugLoc dl) {
bool isMMX = VT.getSizeInBits() == 64; EVT ShVT = MVT::v2i64;
EVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp); SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
@ -4180,10 +4113,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ISD::isBuildVectorAllZeros(Op.getNode()) || if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
(Op.getValueType().getSizeInBits() != 256 && (Op.getValueType().getSizeInBits() != 256 &&
ISD::isBuildVectorAllOnes(Op.getNode()))) { ISD::isBuildVectorAllOnes(Op.getNode()))) {
// Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to // Canonicalize this to <4 x i32> (SSE) to
// 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
// eliminated on x86-32 hosts. // eliminated on x86-32 hosts.
if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) if (Op.getValueType() == MVT::v4i32)
return Op; return Op;
if (ISD::isBuildVectorAllOnes(Op.getNode())) if (ISD::isBuildVectorAllOnes(Op.getNode()))
@ -4234,9 +4167,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
(!IsAllConstants || Idx == 0)) { (!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
// Handle MMX and SSE both. // Handle SSE only.
EVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32; assert(VT == MVT::v2i64 && "Expected an SSE value type!");
unsigned VecElts = VT == MVT::v2i64 ? 4 : 2; EVT VecVT = MVT::v4i32;
unsigned VecElts = 4;
// Truncate the value (which may itself be a constant) to i32, and // Truncate the value (which may itself be a constant) to i32, and
// convert it to a vector with movd (S2V+shuffle to zero extend). // convert it to a vector with movd (S2V+shuffle to zero extend).
@ -4275,7 +4209,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DAG); DAG);
} else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
EVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32; assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
EVT MiddleVT = MVT::v4i32;
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Item = getShuffleVectorZeroOrUndef(Item, 0, true,
Subtarget->hasSSE2(), DAG); Subtarget->hasSSE2(), DAG);
@ -5418,11 +5353,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
// FIXME: this is somehow handled during isel by MMX pattern fragments. Remove // Shuffle operations on MMX not supported.
// the check or come up with another solution when all MMX move to intrinsics, if (isMMX)
// but don't allow this to be considered legal, we don't want vector_shuffle
// operations to be matched during isel anymore.
if (isMMX && SVOp->isSplat())
return Op; return Op;
// Vector shuffle lowering takes 3 steps: // Vector shuffle lowering takes 3 steps:
@ -5456,10 +5388,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef && if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef &&
RelaxedMayFoldVectorLoad(V1) && !isMMX) RelaxedMayFoldVectorLoad(V1))
return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
if (!isMMX && X86::isMOVHLPS_v_undef_Mask(SVOp)) if (X86::isMOVHLPS_v_undef_Mask(SVOp))
return getMOVHighToLow(Op, dl, DAG); return getMOVHighToLow(Op, dl, DAG);
// Use to match splats // Use to match splats
@ -5507,7 +5439,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return V2; return V2;
if (ISD::isBuildVectorAllZeros(V1.getNode())) if (ISD::isBuildVectorAllZeros(V1.getNode()))
return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
if (!isMMX && !X86::isMOVLPMask(SVOp)) { if (!X86::isMOVLPMask(SVOp)) {
if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
@ -5517,7 +5449,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
} }
// FIXME: fold these into legal mask. // FIXME: fold these into legal mask.
if (!isMMX) {
if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
return getMOVLowToHigh(Op, dl, DAG, HasSSE2); return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
@ -5532,7 +5463,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
if (X86::isMOVLPMask(SVOp)) if (X86::isMOVLPMask(SVOp))
return getMOVLP(Op, dl, DAG, HasSSE2); return getMOVLP(Op, dl, DAG, HasSSE2);
}
if (ShouldXformToMOVHLPS(SVOp) || if (ShouldXformToMOVHLPS(SVOp) ||
ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
@ -5573,12 +5503,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
} }
if (X86::isUNPCKLMask(SVOp)) if (X86::isUNPCKLMask(SVOp))
return (isMMX) ? return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
Op : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
if (X86::isUNPCKHMask(SVOp)) if (X86::isUNPCKHMask(SVOp))
return (isMMX) ? return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
Op : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
if (V2IsSplat) { if (V2IsSplat) {
// Normalize mask so all entries that point to V2 points to its first // Normalize mask so all entries that point to V2 points to its first
@ -5602,18 +5530,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
if (X86::isUNPCKLMask(NewSVOp)) if (X86::isUNPCKLMask(NewSVOp))
return (isMMX) ? return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
NewOp : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
if (X86::isUNPCKHMask(NewSVOp)) if (X86::isUNPCKHMask(NewSVOp))
return (isMMX) ? return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
NewOp : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
} }
// FIXME: for mmx, bitcast v2i32 to v4i16 for shuffle.
// Normalize the node to match x86 shuffle ops if needed // Normalize the node to match x86 shuffle ops if needed
if (!isMMX && V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
return CommuteVectorShuffle(SVOp, DAG); return CommuteVectorShuffle(SVOp, DAG);
// The checks below are all present in isShuffleMaskLegal, but they are // The checks below are all present in isShuffleMaskLegal, but they are
@ -5627,12 +5551,6 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
X86::getShufflePALIGNRImmediate(SVOp), X86::getShufflePALIGNRImmediate(SVOp),
DAG); DAG);
// Only a few shuffle masks are handled for 64-bit vectors (MMX), and
// 64-bit vectors which made to this point can't be handled, they are
// expanded.
if (isMMX)
return SDValue();
if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
SVOp->getSplatIndex() == 0 && V2IsUndef) { SVOp->getSplatIndex() == 0 && V2IsUndef) {
if (VT == MVT::v2f64) if (VT == MVT::v2f64)
@ -5681,8 +5599,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return NewOp; return NewOp;
} }
// Handle all 4 wide cases with a number of shuffles except for MMX. // Handle all 4 wide cases with a number of shuffles.
if (NumElems == 4 && !isMMX) if (NumElems == 4)
return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG); return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG);
return SDValue(); return SDValue();
@ -5824,8 +5742,6 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
unsigned Opc; unsigned Opc;
if (VT == MVT::v8i16) if (VT == MVT::v8i16)
Opc = X86ISD::PINSRW; Opc = X86ISD::PINSRW;
else if (VT == MVT::v4i16)
Opc = X86ISD::MMX_PINSRW;
else if (VT == MVT::v16i8) else if (VT == MVT::v16i8)
Opc = X86ISD::PINSRB; Opc = X86ISD::PINSRB;
else else
@ -5881,8 +5797,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32) if (N2.getValueType() != MVT::i32)
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
return DAG.getNode(VT == MVT::v8i16 ? X86ISD::PINSRW : X86ISD::MMX_PINSRW, return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
dl, VT, N0, N1, N2);
} }
return SDValue(); return SDValue();
} }
@ -5896,16 +5811,10 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
EVT VT = MVT::v2i32; assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
switch (Op.getValueType().getSimpleVT().SimpleTy) { "Expected an SSE type!");
default: break;
case MVT::v16i8:
case MVT::v8i16:
VT = MVT::v4i32;
break;
}
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(),
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, AnyExt)); DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
} }
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
@ -6322,11 +6231,8 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType(); EVT SrcVT = Op.getOperand(0).getValueType();
if (SrcVT.isVector()) { if (SrcVT.isVector())
if (SrcVT == MVT::v2i32 && Op.getValueType() == MVT::v2f64)
return Op;
return SDValue(); return SDValue();
}
assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!"); "Unknown SINT_TO_FP to lower!");
@ -6702,13 +6608,8 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
if (Op.getValueType().isVector()) { if (Op.getValueType().isVector())
if (Op.getValueType() == MVT::v2i32 &&
Op.getOperand(0).getValueType() == MVT::v2f64) {
return Op;
}
return SDValue(); return SDValue();
}
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true);
SDValue FIST = Vals.first, StackSlot = Vals.second; SDValue FIST = Vals.first, StackSlot = Vals.second;
@ -7211,11 +7112,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
switch (VT.getSimpleVT().SimpleTy) { switch (VT.getSimpleVT().SimpleTy) {
default: break; default: break;
case MVT::v8i8:
case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
case MVT::v4i16:
case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
case MVT::v2i32:
case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
} }
@ -7930,6 +7828,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4);
} else { } else {
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
// FIXME this must be lowered to get rid of the invalid type.
} }
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
@ -8840,7 +8739,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
case X86ISD::PINSRB: return "X86ISD::PINSRB"; case X86ISD::PINSRB: return "X86ISD::PINSRB";
case X86ISD::PINSRW: return "X86ISD::PINSRW"; case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::FMAX: return "X86ISD::FMAX"; case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN"; case X86ISD::FMIN: return "X86ISD::FMIN";
@ -9711,7 +9609,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::TLSCall_64: case X86::TLSCall_64:
return EmitLoweredTLSCall(MI, BB); return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_GR8: case X86::CMOV_GR8:
case X86::CMOV_V1I64:
case X86::CMOV_FR32: case X86::CMOV_FR32:
case X86::CMOV_FR64: case X86::CMOV_FR64:
case X86::CMOV_V4F32: case X86::CMOV_V4F32:

View File

@ -128,11 +128,15 @@ namespace llvm {
/// relative displacements. /// relative displacements.
WrapperRIP, WrapperRIP,
/// MOVQ2DQ - Copies a 64-bit value from a vector to another vector. /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
/// Can be used to move a vector value from a MMX register to a XMM /// of an XMM vector, with the high word zero filled.
/// register.
MOVQ2DQ, MOVQ2DQ,
/// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
/// to an MMX vector. If you think this is too close to the previous
/// mnemonic, so do I; blame Intel.
MOVDQ2Q,
/// PEXTRB - Extract an 8-bit value from a vector and zero extend it to /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
/// i32, corresponds to X86::PEXTRB. /// i32, corresponds to X86::PEXTRB.
PEXTRB, PEXTRB,

View File

@ -15,51 +15,8 @@
// MMX Pattern Fragments // MMX Pattern Fragments
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>; def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
//===----------------------------------------------------------------------===//
// MMX Masks
//===----------------------------------------------------------------------===//
// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
// PSHUFW imm.
def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShuffleSHUFImmediate(N));
}]>;
// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
}]>;
// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
}]>;
// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
}]>;
// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
}]>;
def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
}], MMX_SHUFFLE_get_shuf_imm>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// SSE specific DAG Nodes. // SSE specific DAG Nodes.
@ -286,9 +243,7 @@ def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 8; return cast<LoadSDNode>(N)->getAlignment() >= 8;
}]>; }]>;
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>; def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
// MOVNT Support // MOVNT Support
// Like 'store', but requires the non-temporal bit to be set // Like 'store', but requires the non-temporal bit to be set

View File

@ -21,28 +21,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
let Constraints = "$src1 = $dst" in { let Constraints = "$src1 = $dst" in {
// MMXI_binop_rm - Simple MMX binary operator based on llvm operator. // MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic.
multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, bit Commutable = 0> {
def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> {
let isCommutable = Commutable;
}
def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (OpVT (OpNode VR64:$src1,
(bitconvert
(load_mmx addr:$src2)))))]>;
}
// MMXI_binop_rm_int - Simple MMX binary operator based on intrinsic, with a
// different name for the generated instructions than MMXI_binop_rm uses.
// Thus int and rm can coexist for different implementations of the same
// instruction. This is temporary during transition to intrinsic-only
// implementation; eventually the non-intrinsic forms will go away. When
// When this is cleaned up, remove the FIXME from X86RecognizableInstr.cpp. // When this is cleaned up, remove the FIXME from X86RecognizableInstr.cpp.
multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId, multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
bit Commutable = 0> { bit Commutable = 0> {
@ -59,26 +38,6 @@ let Constraints = "$src1 = $dst" in {
(bitconvert (load_mmx addr:$src2))))]>; (bitconvert (load_mmx addr:$src2))))]>;
} }
// MMXI_binop_rm_v1i64 - Simple MMX binary operator whose type is v1i64.
//
// FIXME: we could eliminate this and use MMXI_binop_rm instead if tblgen knew
// to collapse (bitconvert VT to VT) into its operand.
//
multiclass MMXI_binop_rm_v1i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
bit Commutable = 0> {
def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (v1i64 (OpNode VR64:$src1, VR64:$src2)))]> {
let isCommutable = Commutable;
}
def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst,
(OpNode VR64:$src1,(load_mmx addr:$src2)))]>;
}
multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, Intrinsic IntId, string OpcodeStr, Intrinsic IntId,
Intrinsic IntId2> { Intrinsic IntId2> {
@ -100,7 +59,7 @@ let Constraints = "$src1 = $dst" in {
/// Unary MMX instructions requiring SSSE3. /// Unary MMX instructions requiring SSSE3.
multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr, multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
PatFrag mem_frag64, Intrinsic IntId64> { Intrinsic IntId64> {
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst, (IntId64 VR64:$src))]>; [(set VR64:$dst, (IntId64 VR64:$src))]>;
@ -108,13 +67,13 @@ multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst, [(set VR64:$dst,
(IntId64 (bitconvert (mem_frag64 addr:$src))))]>; (IntId64 (bitconvert (memopmmx addr:$src))))]>;
} }
/// Binary MMX instructions requiring SSSE3. /// Binary MMX instructions requiring SSSE3.
let ImmT = NoImm, Constraints = "$src1 = $dst" in { let ImmT = NoImm, Constraints = "$src1 = $dst" in {
multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr, multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
PatFrag mem_frag64, Intrinsic IntId64> { Intrinsic IntId64> {
let isCommutable = 0 in let isCommutable = 0 in
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2), (ins VR64:$src1, VR64:$src2),
@ -125,18 +84,12 @@ multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, [(set VR64:$dst,
(IntId64 VR64:$src1, (IntId64 VR64:$src1,
(bitconvert (mem_frag64 addr:$src2))))]>; (bitconvert (memopmmx addr:$src2))))]>;
} }
} }
/// PALIGN MMX instructions (require SSSE3). /// PALIGN MMX instructions (require SSSE3).
multiclass ssse3_palign_mm<string asm, Intrinsic IntId> { multiclass ssse3_palign_mm<string asm, Intrinsic IntId> {
def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
(ins VR64:$src1, i64mem:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
(ins VR64:$src1, VR64:$src2, i8imm:$src3), (ins VR64:$src1, VR64:$src2, i8imm:$src3),
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@ -184,12 +137,12 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms",
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v2i32 (scalar_to_vector GR32:$src)))]>; (x86mmx (scalar_to_vector GR32:$src)))]>;
let canFoldAsLoad = 1, isReMaterializable = 1 in let canFoldAsLoad = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; (x86mmx (scalar_to_vector (loadi32 addr:$src))))]>;
let mayStore = 1 in let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>; "movd\t{$src, $dst|$dst, $src}", []>;
@ -201,42 +154,41 @@ def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[]>; []>;
let neverHasSideEffects = 1 in
// These are 64 bit moves, but since the OS X assembler doesn't // These are 64 bit moves, but since the OS X assembler doesn't
// recognize a register-register movq, we write them as // recognize a register-register movq, we write them as
// movd. // movd.
def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg, def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
(outs GR64:$dst), (ins VR64:$src), (outs GR64:$dst), (ins VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>; "movd\t{$src, $dst|$dst, $src}",
[(set GR64:$dst,
(bitconvert VR64:$src))]>;
def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v1i64 (scalar_to_vector GR64:$src)))]>; (bitconvert GR64:$src))]>;
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
"movq\t{$src, $dst|$dst, $src}", []>; "movq\t{$src, $dst|$dst, $src}", []>;
let canFoldAsLoad = 1, isReMaterializable = 1 in let canFoldAsLoad = 1 in
def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}", "movq\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (load_mmx addr:$src))]>; [(set VR64:$dst, (load_mmx addr:$src))]>;
def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movq\t{$src, $dst|$dst, $src}", "movq\t{$src, $dst|$dst, $src}",
[(store (v1i64 VR64:$src), addr:$dst)]>; [(store (x86mmx VR64:$src), addr:$dst)]>;
def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"movdq2q\t{$src, $dst|$dst, $src}", "movdq2q\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v1i64 (bitconvert (x86mmx (bitconvert
(i64 (vector_extract (v2i64 VR128:$src), (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))))]>; (iPTR 0))))))]>;
def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
"movq2dq\t{$src, $dst|$dst, $src}", "movq2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, [(set VR128:$dst,
(movl immAllZerosV,
(v2i64 (scalar_to_vector (v2i64 (scalar_to_vector
(i64 (bitconvert (v1i64 VR64:$src)))))))]>; (i64 (bitconvert (x86mmx VR64:$src))))))]>;
let neverHasSideEffects = 1 in let neverHasSideEffects = 1 in
def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src), def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src),
@ -254,54 +206,40 @@ let AddedComplexity = 15 in
def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>; (x86mmx (X86vzmovl (x86mmx (scalar_to_vector GR32:$src)))))]>;
let AddedComplexity = 20 in let AddedComplexity = 20 in
def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst),
(ins i32mem:$src), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}", "movd\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, [(set VR64:$dst,
(v2i32 (X86vzmovl (v2i32 (x86mmx (X86vzmovl (x86mmx
(scalar_to_vector (loadi32 addr:$src))))))]>; (scalar_to_vector (loadi32 addr:$src))))))]>;
// Arithmetic Instructions // Arithmetic Instructions
defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", memopv8i8, defm MMX_PABSB : SS3I_unop_rm_int_mm<0x1C, "pabsb", int_x86_ssse3_pabs_b>;
int_x86_ssse3_pabs_b>; defm MMX_PABSW : SS3I_unop_rm_int_mm<0x1D, "pabsw", int_x86_ssse3_pabs_w>;
defm MMX_PABSW : SS3I_unop_rm_int_mm<0x1D, "pabsw", memopv4i16, defm MMX_PABSD : SS3I_unop_rm_int_mm<0x1E, "pabsd", int_x86_ssse3_pabs_d>;
int_x86_ssse3_pabs_w>;
defm MMX_PABSD : SS3I_unop_rm_int_mm<0x1E, "pabsd", memopv2i32,
int_x86_ssse3_pabs_d>;
// -- Addition // -- Addition
defm MMX_PADDB : MMXI_binop_rm<0xFC, "paddb", add, v8i8, 1>, defm MMX_PADDB : MMXI_binop_rm_int<0xFC, "paddb", int_x86_mmx_padd_b, 1>;
MMXI_binop_rm_int<0xFC, "paddb", int_x86_mmx_padd_b, 1>; defm MMX_PADDW : MMXI_binop_rm_int<0xFD, "paddw", int_x86_mmx_padd_w, 1>;
defm MMX_PADDW : MMXI_binop_rm<0xFD, "paddw", add, v4i16, 1>, defm MMX_PADDD : MMXI_binop_rm_int<0xFE, "paddd", int_x86_mmx_padd_d, 1>;
MMXI_binop_rm_int<0xFD, "paddw", int_x86_mmx_padd_w, 1>; defm MMX_PADDQ : MMXI_binop_rm_int<0xD4, "paddq", int_x86_mmx_padd_q, 1>;
defm MMX_PADDD : MMXI_binop_rm<0xFE, "paddd", add, v2i32, 1>,
MMXI_binop_rm_int<0xFE, "paddd", int_x86_mmx_padd_d, 1>;
defm MMX_PADDQ : MMXI_binop_rm<0xD4, "paddq", add, v1i64, 1>,
MMXI_binop_rm_int<0xD4, "paddq", int_x86_mmx_padd_q, 1>;
defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>; defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>;
defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>; defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>;
defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>; defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>;
defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>; defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>;
defm MMX_PHADDW : SS3I_binop_rm_int_mm<0x01, "phaddw", memopv4i16, defm MMX_PHADDW : SS3I_binop_rm_int_mm<0x01, "phaddw", int_x86_ssse3_phadd_w>;
int_x86_ssse3_phadd_w>; defm MMX_PHADD : SS3I_binop_rm_int_mm<0x02, "phaddd", int_x86_ssse3_phadd_d>;
defm MMX_PHADD : SS3I_binop_rm_int_mm<0x02, "phaddd", memopv2i32, defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw",int_x86_ssse3_phadd_sw>;
int_x86_ssse3_phadd_d>;
defm MMX_PHADDSW : SS3I_binop_rm_int_mm<0x03, "phaddsw", memopv4i16,
int_x86_ssse3_phadd_sw>;
// -- Subtraction // -- Subtraction
defm MMX_PSUBB : MMXI_binop_rm<0xF8, "psubb", sub, v8i8>, defm MMX_PSUBB : MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b>;
MMXI_binop_rm_int<0xF8, "psubb", int_x86_mmx_psub_b>; defm MMX_PSUBW : MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w>;
defm MMX_PSUBW : MMXI_binop_rm<0xF9, "psubw", sub, v4i16>, defm MMX_PSUBD : MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d>;
MMXI_binop_rm_int<0xF9, "psubw", int_x86_mmx_psub_w>; defm MMX_PSUBQ : MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q>;
defm MMX_PSUBD : MMXI_binop_rm<0xFA, "psubd", sub, v2i32>,
MMXI_binop_rm_int<0xFA, "psubd", int_x86_mmx_psub_d>;
defm MMX_PSUBQ : MMXI_binop_rm<0xFB, "psubq", sub, v1i64>,
MMXI_binop_rm_int<0xFB, "psubq", int_x86_mmx_psub_q>;
defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>; defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>;
defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>; defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
@ -309,28 +247,24 @@ defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>; defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>;
defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>; defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>;
defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", memopv4i16, defm MMX_PHSUBW : SS3I_binop_rm_int_mm<0x05, "phsubw", int_x86_ssse3_phsub_w>;
int_x86_ssse3_phsub_w>; defm MMX_PHSUBD : SS3I_binop_rm_int_mm<0x06, "phsubd", int_x86_ssse3_phsub_d>;
defm MMX_PHSUBD : SS3I_binop_rm_int_mm<0x06, "phsubd", memopv2i32, defm MMX_PHSUBSW : SS3I_binop_rm_int_mm<0x07, "phsubsw",int_x86_ssse3_phsub_sw>;
int_x86_ssse3_phsub_d>;
defm MMX_PHSUBSW : SS3I_binop_rm_int_mm<0x07, "phsubsw", memopv4i16,
int_x86_ssse3_phsub_sw>;
// -- Multiplication // -- Multiplication
defm MMX_PMULLW : MMXI_binop_rm<0xD5, "pmullw", mul, v4i16, 1>, defm MMX_PMULLW : MMXI_binop_rm_int<0xD5, "pmullw", int_x86_mmx_pmull_w, 1>;
MMXI_binop_rm_int<0xD5, "pmullw", int_x86_mmx_pmull_w, 1>;
defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>; defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>;
defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>; defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>;
defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>; defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>;
let isCommutable = 1 in let isCommutable = 1 in
defm MMX_PMULHRSW : SS3I_binop_rm_int_mm<0x0B, "pmulhrsw", memopv4i16, defm MMX_PMULHRSW : SS3I_binop_rm_int_mm<0x0B, "pmulhrsw",
int_x86_ssse3_pmul_hr_sw>; int_x86_ssse3_pmul_hr_sw>;
// -- Miscellanea // -- Miscellanea
defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>; defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
defm MMX_PMADDUBSW : SS3I_binop_rm_int_mm<0x04, "pmaddubsw", memopv8i8, defm MMX_PMADDUBSW : SS3I_binop_rm_int_mm<0x04, "pmaddubsw",
int_x86_ssse3_pmadd_ub_sw>; int_x86_ssse3_pmadd_ub_sw>;
defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>; defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>;
defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>; defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>;
@ -343,57 +277,18 @@ defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w, 1>;
defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw, 1>; defm MMX_PSADBW : MMXI_binop_rm_int<0xF6, "psadbw", int_x86_mmx_psad_bw, 1>;
defm MMX_PSIGNB : SS3I_binop_rm_int_mm<0x08, "psignb", memopv8i8, defm MMX_PSIGNB : SS3I_binop_rm_int_mm<0x08, "psignb", int_x86_ssse3_psign_b>;
int_x86_ssse3_psign_b>; defm MMX_PSIGNW : SS3I_binop_rm_int_mm<0x09, "psignw", int_x86_ssse3_psign_w>;
defm MMX_PSIGNW : SS3I_binop_rm_int_mm<0x09, "psignw", memopv4i16, defm MMX_PSIGND : SS3I_binop_rm_int_mm<0x0A, "psignd", int_x86_ssse3_psign_d>;
int_x86_ssse3_psign_w>;
defm MMX_PSIGND : SS3I_binop_rm_int_mm<0x0A, "psignd", memopv2i32,
int_x86_ssse3_psign_d>;
let Constraints = "$src1 = $dst" in let Constraints = "$src1 = $dst" in
defm MMX_PALIGN : ssse3_palign_mm<"palignr", int_x86_mmx_palignr_b>; defm MMX_PALIGN : ssse3_palign_mm<"palignr", int_x86_mmx_palignr_b>;
let AddedComplexity = 5 in {
def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
}
// Logical Instructions // Logical Instructions
defm MMX_PAND : MMXI_binop_rm_v1i64<0xDB, "pand", and, 1>, defm MMX_PAND : MMXI_binop_rm_int<0xDB, "pand", int_x86_mmx_pand, 1>;
MMXI_binop_rm_int<0xDB, "pand", int_x86_mmx_pand, 1>; defm MMX_POR : MMXI_binop_rm_int<0xEB, "por" , int_x86_mmx_por, 1>;
defm MMX_POR : MMXI_binop_rm_v1i64<0xEB, "por" , or, 1>, defm MMX_PXOR : MMXI_binop_rm_int<0xEF, "pxor", int_x86_mmx_pxor, 1>;
MMXI_binop_rm_int<0xEB, "por" , int_x86_mmx_por, 1>;
defm MMX_PXOR : MMXI_binop_rm_v1i64<0xEF, "pxor", xor, 1>,
MMXI_binop_rm_int<0xEF, "pxor", int_x86_mmx_pxor, 1>;
defm MMX_PANDN : MMXI_binop_rm_int<0xDF, "pandn", int_x86_mmx_pandn, 1>; defm MMX_PANDN : MMXI_binop_rm_int<0xDF, "pandn", int_x86_mmx_pandn, 1>;
let Constraints = "$src1 = $dst" in {
def MMX_PANDNrr : MMXI<0xDF, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"pandn\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
VR64:$src2)))]>;
def MMX_PANDNrm : MMXI<0xDF, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"pandn\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
(load addr:$src2))))]>;
}
// Shift Instructions // Shift Instructions
defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>; int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>;
@ -414,12 +309,6 @@ defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
int_x86_mmx_psra_d, int_x86_mmx_psrai_d>; int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
// Shift up / down and insert zero's.
def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
(MMX_PSLLQri VR64:$src, (GetLo32XForm imm:$amt))>;
def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
(MMX_PSRLQri VR64:$src, (GetLo32XForm imm:$amt))>;
// Comparison Instructions // Comparison Instructions
defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>; defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>; defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>;
@ -429,84 +318,7 @@ defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b>;
defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>; defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>;
defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>; defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>;
// Conversion Instructions
// -- Unpack Instructions // -- Unpack Instructions
let Constraints = "$src1 = $dst" in {
// Unpack High Packed Data Instructions
def MMX_PUNPCKHBWrr : MMXI<0x68, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpckhbw\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v8i8 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKHBWrm : MMXI<0x68, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpckhbw\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v8i8 (mmx_unpckh VR64:$src1,
(bc_v8i8 (load_mmx addr:$src2)))))]>;
def MMX_PUNPCKHWDrr : MMXI<0x69, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpckhwd\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v4i16 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKHWDrm : MMXI<0x69, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpckhwd\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v4i16 (mmx_unpckh VR64:$src1,
(bc_v4i16 (load_mmx addr:$src2)))))]>;
def MMX_PUNPCKHDQrr : MMXI<0x6A, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpckhdq\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v2i32 (mmx_unpckh VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKHDQrm : MMXI<0x6A, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpckhdq\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v2i32 (mmx_unpckh VR64:$src1,
(bc_v2i32 (load_mmx addr:$src2)))))]>;
// Unpack Low Packed Data Instructions
def MMX_PUNPCKLBWrr : MMXI<0x60, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpcklbw\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v8i8 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKLBWrm : MMXI<0x60, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpcklbw\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v8i8 (mmx_unpckl VR64:$src1,
(bc_v8i8 (load_mmx addr:$src2)))))]>;
def MMX_PUNPCKLWDrr : MMXI<0x61, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpcklwd\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v4i16 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKLWDrm : MMXI<0x61, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpcklwd\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v4i16 (mmx_unpckl VR64:$src1,
(bc_v4i16 (load_mmx addr:$src2)))))]>;
def MMX_PUNPCKLDQrr : MMXI<0x62, MRMSrcReg,
(outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
"punpckldq\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v2i32 (mmx_unpckl VR64:$src1, VR64:$src2)))]>;
def MMX_PUNPCKLDQrm : MMXI<0x62, MRMSrcMem,
(outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
"punpckldq\t{$src2, $dst|$dst, $src2}",
[(set VR64:$dst,
(v2i32 (mmx_unpckl VR64:$src1,
(bc_v2i32 (load_mmx addr:$src2)))))]>;
}
defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw", defm MMX_PUNPCKHBW : MMXI_binop_rm_int<0x68, "punpckhbw",
int_x86_mmx_punpckhbw>; int_x86_mmx_punpckhbw>;
defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd", defm MMX_PUNPCKHWD : MMXI_binop_rm_int<0x69, "punpckhwd",
@ -526,61 +338,9 @@ defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw>;
defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>; defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>;
// -- Shuffle Instructions // -- Shuffle Instructions
def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg, defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", int_x86_ssse3_pshuf_b>;
(outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
(v4i16 (mmx_pshufw:$src2 VR64:$src1, (undef))))]>;
def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
(outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
"pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR64:$dst,
(mmx_pshufw:$src2 (bc_v4i16 (load_mmx addr:$src1)),
(undef)))]>;
defm MMX_PSHUFB : SS3I_binop_rm_int_mm<0x00, "pshufb", memopv8i8,
int_x86_ssse3_pshuf_b>;
// Shuffle with PALIGN
def : Pat<(v1i64 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
def : Pat<(v2i32 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
def : Pat<(v4i16 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
def : Pat<(v8i8 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
(MMX_PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
// -- Conversion Instructions // -- Conversion Instructions
let neverHasSideEffects = 1 in {
def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
"cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
let mayLoad = 1 in
def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst),
(ins i64mem:$src),
"cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
"cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
let mayLoad = 1 in
def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst),
(ins i64mem:$src),
"cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
let mayLoad = 1 in
def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst),
(ins f128mem:$src),
"cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"cvttps2pi\t{$src, $dst|$dst, $src}", []>;
let mayLoad = 1 in
def MMX_CVTTPS2PIrm : MMXI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
"cvttps2pi\t{$src, $dst|$dst, $src}", []>;
} // end neverHasSideEffects
// Intrinsic forms.
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi, defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}", f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
SSEPackedSingle>, TB; SSEPackedSingle>, TB;
@ -602,43 +362,14 @@ let Constraints = "$src1 = $dst" in {
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}", i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
SSEPackedSingle>, TB; SSEPackedSingle>, TB;
} }
// MMX->MMX vector casts.
def : Pat<(v2f64 (sint_to_fp (v2i32 VR64:$src))),
(MMX_CVTPI2PDrr VR64:$src)>, Requires<[HasSSE2]>;
def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
(MMX_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
// Extract / Insert // Extract / Insert
def MMX_X86pinsrw : SDNode<"X86ISD::MMX_PINSRW",
SDTypeProfile<1, 3, [SDTCisVT<0, v4i16>, SDTCisSameAs<0,1>,
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR64:$src1, i16i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v4i16 VR64:$src1),
(iPTR imm:$src2)))]>;
def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg, def MMX_PEXTRWirri: MMXIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2), (outs GR32:$dst), (ins VR64:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1, [(set GR32:$dst, (int_x86_mmx_pextr_w VR64:$src1,
(iPTR imm:$src2)))]>; (iPTR imm:$src2)))]>;
let Constraints = "$src1 = $dst" in { let Constraints = "$src1 = $dst" in {
def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst),
(ins VR64:$src1, GR32:$src2,i16i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
GR32:$src2,(iPTR imm:$src3))))]>;
def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem,
(outs VR64:$dst),
(ins VR64:$src1, i16mem:$src2, i16i8imm:$src3),
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR64:$dst,
(v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
(i32 (anyext (loadi16 addr:$src2))),
(iPTR imm:$src3))))]>;
def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg, def MMX_PINSRWirri : MMXIi8<0xC4, MRMSrcReg,
(outs VR64:$dst), (outs VR64:$dst),
(ins VR64:$src1, GR32:$src2, i32i8imm:$src3), (ins VR64:$src1, GR32:$src2, i32i8imm:$src3),
@ -655,9 +386,16 @@ let Constraints = "$src1 = $dst" in {
(iPTR imm:$src3)))]>; (iPTR imm:$src3)))]>;
} }
// Mask creation
def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
(int_x86_mmx_pmovmskb VR64:$src))]>;
// MMX to XMM for vector types // MMX to XMM for vector types
def MMX_X86movq2dq : SDNode<"X86ISD::MOVQ2DQ", SDTypeProfile<1, 1, def MMX_X86movq2dq : SDNode<"X86ISD::MOVQ2DQ", SDTypeProfile<1, 1,
[SDTCisVT<0, v2i64>, SDTCisVT<1, v1i64>]>>; [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>;
def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)), def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>; (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
@ -665,14 +403,19 @@ def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)),
def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))), def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))),
(v2i64 (MOVQI2PQIrm addr:$src))>; (v2i64 (MOVQI2PQIrm addr:$src))>;
def : Pat<(v2i64 (MMX_X86movq2dq (v1i64 (bitconvert def : Pat<(v2i64 (MMX_X86movq2dq
(v2i32 (scalar_to_vector (loadi32 addr:$src))))))), (x86mmx (scalar_to_vector (loadi32 addr:$src))))),
(v2i64 (MOVDI2PDIrm addr:$src))>; (v2i64 (MOVDI2PDIrm addr:$src))>;
// Mask creation // Low word of XMM to MMX.
def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src), def MMX_X86movdq2q : SDNode<"X86ISD::MOVDQ2Q", SDTypeProfile<1, 1,
"pmovmskb\t{$src, $dst|$dst, $src}", [SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>;
[(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>;
def : Pat<(x86mmx (MMX_X86movdq2q VR128:$src)),
(x86mmx (MMX_MOVDQ2Qrr VR128:$src))>;
def : Pat<(x86mmx (MMX_X86movdq2q (loadv2i64 addr:$src))),
(x86mmx (MMX_MOVQ64rm addr:$src))>;
// Misc. // Misc.
let Uses = [EDI] in let Uses = [EDI] in
@ -684,181 +427,14 @@ def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask),
"maskmovq\t{$mask, $src|$src, $mask}", "maskmovq\t{$mask, $src|$src, $mask}",
[(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)]>; [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, RDI)]>;
//===----------------------------------------------------------------------===//
// Alias Instructions
//===----------------------------------------------------------------------===//
// Alias instructions that map zero vector to pxor.
let isReMaterializable = 1, isCodeGenOnly = 1 in {
// FIXME: Change encoding to pseudo.
def MMX_V_SET0 : MMXI<0xEF, MRMInitReg, (outs VR64:$dst), (ins), "",
[(set VR64:$dst, (v2i32 immAllZerosV))]>;
def MMX_V_SETALLONES : MMXI<0x76, MRMInitReg, (outs VR64:$dst), (ins), "",
[(set VR64:$dst, (v2i32 immAllOnesV))]>;
}
let Predicates = [HasMMX] in {
def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>;
def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>;
def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>;
}
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
// Store 64-bit integer vector values.
def : Pat<(store (v8i8 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v4i16 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v2i32 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v1i64 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
// Bit convert.
def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
// 64-bit bit convert. // 64-bit bit convert.
def : Pat<(v1i64 (bitconvert (i64 GR64:$src))), def : Pat<(x86mmx (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>; (MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v2i32 (bitconvert (i64 GR64:$src))), def : Pat<(i64 (bitconvert (x86mmx VR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>; (MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v2i32 VR64:$src))), def : Pat<(f64 (bitconvert (x86mmx VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(f64 (bitconvert (v1i64 VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>; (MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(f64 (bitconvert (v2i32 VR64:$src))), def : Pat<(x86mmx (bitconvert (f64 FR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(f64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(f64 (bitconvert (v8i8 VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(v1i64 (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>;
def : Pat<(v2i32 (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>;
def : Pat<(v4i16 (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>;
def : Pat<(v8i8 (bitconvert (f64 FR64:$src))),
(MMX_MOVFR642Qrr FR64:$src)>; (MMX_MOVFR642Qrr FR64:$src)>;
let AddedComplexity = 20 in {
def : Pat<(v2i32 (X86vzmovl (bc_v2i32 (load_mmx addr:$src)))),
(MMX_MOVZDI2PDIrm addr:$src)>;
}
// Clear top half.
let AddedComplexity = 15 in {
def : Pat<(v2i32 (X86vzmovl VR64:$src)),
(MMX_PUNPCKLDQrr VR64:$src, (v2i32 (MMX_V_SET0)))>;
}
// Patterns to perform canonical versions of vector shuffling.
let AddedComplexity = 10 in {
def : Pat<(v8i8 (mmx_unpckl_undef VR64:$src, (undef))),
(MMX_PUNPCKLBWrr VR64:$src, VR64:$src)>;
def : Pat<(v4i16 (mmx_unpckl_undef VR64:$src, (undef))),
(MMX_PUNPCKLWDrr VR64:$src, VR64:$src)>;
def : Pat<(v2i32 (mmx_unpckl_undef VR64:$src, (undef))),
(MMX_PUNPCKLDQrr VR64:$src, VR64:$src)>;
}
let AddedComplexity = 10 in {
def : Pat<(v8i8 (mmx_unpckh_undef VR64:$src, (undef))),
(MMX_PUNPCKHBWrr VR64:$src, VR64:$src)>;
def : Pat<(v4i16 (mmx_unpckh_undef VR64:$src, (undef))),
(MMX_PUNPCKHWDrr VR64:$src, VR64:$src)>;
def : Pat<(v2i32 (mmx_unpckh_undef VR64:$src, (undef))),
(MMX_PUNPCKHDQrr VR64:$src, VR64:$src)>;
}
// Some special case PANDN patterns.
// FIXME: Get rid of these.
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
VR64:$src2)),
(MMX_PANDNrr VR64:$src1, VR64:$src2)>;
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
(load addr:$src2))),
(MMX_PANDNrm VR64:$src1, addr:$src2)>;
// Move MMX to lower 64-bit of XMM
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v8i8 VR64:$src))))),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v4i16 VR64:$src))))),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v2i32 VR64:$src))))),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v1i64 VR64:$src))))),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
// Move lower 64-bit of XMM to MMX.
def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))),
(v2i32 (MMX_MOVDQ2Qrr VR128:$src))>;
def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))),
(v4i16 (MMX_MOVDQ2Qrr VR128:$src))>;
def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))),
(v8i8 (MMX_MOVDQ2Qrr VR128:$src))>;
// Patterns for vector comparisons
def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, VR64:$src2)),
(MMX_PCMPEQBirr VR64:$src1, VR64:$src2)>;
def : Pat<(v8i8 (X86pcmpeqb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPEQBirm VR64:$src1, addr:$src2)>;
def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, VR64:$src2)),
(MMX_PCMPEQWirr VR64:$src1, VR64:$src2)>;
def : Pat<(v4i16 (X86pcmpeqw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPEQWirm VR64:$src1, addr:$src2)>;
def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, VR64:$src2)),
(MMX_PCMPEQDirr VR64:$src1, VR64:$src2)>;
def : Pat<(v2i32 (X86pcmpeqd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPEQDirm VR64:$src1, addr:$src2)>;
def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, VR64:$src2)),
(MMX_PCMPGTBirr VR64:$src1, VR64:$src2)>;
def : Pat<(v8i8 (X86pcmpgtb VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPGTBirm VR64:$src1, addr:$src2)>;
def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, VR64:$src2)),
(MMX_PCMPGTWirr VR64:$src1, VR64:$src2)>;
def : Pat<(v4i16 (X86pcmpgtw VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPGTWirm VR64:$src1, addr:$src2)>;
def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, VR64:$src2)),
(MMX_PCMPGTDirr VR64:$src1, VR64:$src2)>;
def : Pat<(v2i32 (X86pcmpgtd VR64:$src1, (bitconvert (load_mmx addr:$src2)))),
(MMX_PCMPGTDirm VR64:$src1, addr:$src2)>;
// CMOV* - Used to implement the SELECT DAG operation. Expanded after
// instruction selection into a branch sequence.
let Uses = [EFLAGS], usesCustomInserter = 1 in {
def CMOV_V1I64 : I<0, Pseudo,
(outs VR64:$dst), (ins VR64:$t, VR64:$f, i8imm:$cond),
"#CMOV_V1I64 PSEUDO!",
[(set VR64:$dst,
(v1i64 (X86cmov VR64:$t, VR64:$f, imm:$cond,
EFLAGS)))]>;
}

View File

@ -377,9 +377,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break; case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break; case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break; case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
case X86::MMX_V_SET0: LowerUnaryToTwoAddr(OutMI, X86::MMX_PXORrr); break;
case X86::MMX_V_SETALLONES:
LowerUnaryToTwoAddr(OutMI, X86::MMX_PCMPEQDirr); break;
case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break; case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break;

View File

@ -792,7 +792,7 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32,
} }
// Generic vector registers: VR64 and VR128. // Generic vector registers: VR64 and VR128.
def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64], 64, def VR64: RegisterClass<"X86", [x86mmx], 64,
[MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>; [MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128, def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,

View File

@ -791,6 +791,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
} else if (New->getType()->isVoidTy()) { } else if (New->getType()->isVoidTy()) {
// Our return value has uses, but they will get removed later on. // Our return value has uses, but they will get removed later on.
// Replace by null for now. // Replace by null for now.
if (!Call->getType()->isX86_MMXTy())
Call->replaceAllUsesWith(Constant::getNullValue(Call->getType())); Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
} else { } else {
assert(RetTy->isStructTy() && assert(RetTy->isStructTy() &&
@ -854,6 +855,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
} else { } else {
// If this argument is dead, replace any uses of it with null constants // If this argument is dead, replace any uses of it with null constants
// (these are guaranteed to become unused later on). // (these are guaranteed to become unused later on).
if (!I->getType()->isX86_MMXTy())
I->replaceAllUsesWith(Constant::getNullValue(I->getType())); I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
} }

View File

@ -321,6 +321,9 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
// Don't break volatile loads. // Don't break volatile loads.
if (LI->isVolatile()) if (LI->isVolatile())
return false; return false;
// Don't touch MMX operations.
if (LI->getType()->isX86_MMXTy())
return false;
MergeInType(LI->getType(), Offset); MergeInType(LI->getType(), Offset);
continue; continue;
} }
@ -328,6 +331,9 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
if (StoreInst *SI = dyn_cast<StoreInst>(User)) { if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Storing the pointer, not into the value? // Storing the pointer, not into the value?
if (SI->getOperand(0) == V || SI->isVolatile()) return false; if (SI->getOperand(0) == V || SI->isVolatile()) return false;
// Don't touch MMX operations.
if (SI->getOperand(0)->getType()->isX86_MMXTy())
return false;
MergeInType(SI->getOperand(0)->getType(), Offset); MergeInType(SI->getOperand(0)->getType(), Offset);
continue; continue;
} }

View File

@ -288,18 +288,39 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
break; break;
case 'x': case 'x':
// This fixes all MMX shift intrinsic instructions to take a // This fixes all MMX shift intrinsic instructions to take a
// v1i64 instead of a v2i32 as the second parameter. // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
if (Name.compare(5,10,"x86.mmx.ps",10) == 0 && if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
(Name.compare(13,4,"psll", 4) == 0 || const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
if (Name.compare(13, 4, "padd", 4) == 0 ||
Name.compare(13, 4, "psub", 4) == 0 ||
Name.compare(13, 4, "pmul", 4) == 0 ||
Name.compare(13, 5, "pmadd", 5) == 0 ||
Name.compare(13, 4, "pand", 4) == 0 ||
Name.compare(13, 3, "por", 3) == 0 ||
Name.compare(13, 4, "pxor", 4) == 0 ||
Name.compare(13, 4, "pavg", 4) == 0 ||
Name.compare(13, 4, "pmax", 4) == 0 ||
Name.compare(13, 4, "pmin", 4) == 0 ||
Name.compare(13, 4, "psad", 4) == 0 ||
Name.compare(13, 4, "psll", 4) == 0 ||
Name.compare(13, 4, "psrl", 4) == 0 ||
Name.compare(13, 4, "psra", 4) == 0 || Name.compare(13, 4, "psra", 4) == 0 ||
Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') { Name.compare(13, 4, "pack", 4) == 0 ||
Name.compare(13, 6, "punpck", 6) == 0 ||
Name.compare(13, 4, "pcmp", 4) == 0) {
assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
const Type *SecondParamTy = X86_MMXTy;
const llvm::Type *VT = if (Name.compare(13, 5, "pslli", 5) == 0 ||
VectorType::get(IntegerType::get(FTy->getContext(), 64), 1); Name.compare(13, 5, "psrli", 5) == 0 ||
Name.compare(13, 5, "psrai", 5) == 0)
SecondParamTy = FTy->getParamType(1);
// We don't have to do anything if the parameter already has // Don't do anything if it has the correct types.
// the correct type. if (FTy->getReturnType() == X86_MMXTy &&
if (FTy->getParamType(1) == VT) FTy->getParamType(0) == X86_MMXTy &&
FTy->getParamType(1) == SecondParamTy)
break; break;
// We first need to change the name of the old (bad) intrinsic, because // We first need to change the name of the old (bad) intrinsic, because
@ -308,17 +329,183 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// and typed function below. // and typed function below.
F->setName(""); F->setName("");
assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
// Now construct the new intrinsic with the correct name and type. We // Now construct the new intrinsic with the correct name and type. We
// leave the old function around in order to query its type, whatever it // leave the old function around in order to query its type, whatever it
// may be, and correctly convert up to the new type. // may be, and correctly convert up to the new type.
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy, X86_MMXTy,
SecondParamTy, (Type*)0));
return true;
}
if (Name.compare(13, 8, "maskmovq", 8) == 0) {
// Don't do anything if it has the correct types.
if (FTy->getParamType(0) == X86_MMXTy &&
FTy->getParamType(1) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFn = cast<Function>(M->getOrInsertFunction(Name,
FTy->getReturnType(), FTy->getReturnType(),
FTy->getParamType(0), X86_MMXTy,
VT, X86_MMXTy,
FTy->getParamType(2),
(Type*)0)); (Type*)0));
return true; return true;
}
if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
if (FTy->getParamType(0) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
FTy->getReturnType(),
X86_MMXTy,
(Type*)0));
return true;
}
if (Name.compare(13, 5, "movnt", 5) == 0) {
if (FTy->getParamType(1) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
FTy->getReturnType(),
FTy->getParamType(0),
X86_MMXTy,
(Type*)0));
return true;
}
if (Name.compare(13, 7, "palignr", 7) == 0) {
if (FTy->getReturnType() == X86_MMXTy &&
FTy->getParamType(0) == X86_MMXTy &&
FTy->getParamType(1) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
X86_MMXTy,
X86_MMXTy,
FTy->getParamType(2),
(Type*)0));
return true;
}
if (Name.compare(13, 5, "pextr", 5) == 0) {
if (FTy->getParamType(0) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
FTy->getReturnType(),
X86_MMXTy,
FTy->getParamType(1),
(Type*)0));
return true;
}
if (Name.compare(13, 5, "pinsr", 5) == 0) {
if (FTy->getReturnType() == X86_MMXTy &&
FTy->getParamType(0) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
X86_MMXTy,
FTy->getParamType(1),
FTy->getParamType(2),
(Type*)0));
return true;
}
if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
if (FTy->getReturnType() == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
FTy->getParamType(0),
(Type*)0));
return true;
}
if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
if (FTy->getParamType(0) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
FTy->getReturnType(),
X86_MMXTy,
(Type*)0));
return true;
}
if (Name.compare(13, 8, "vec.init", 8) == 0) {
if (FTy->getReturnType() == X86_MMXTy)
break;
F->setName("");
if (Name.compare(21, 2, ".b", 2) == 0)
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
FTy->getParamType(0),
FTy->getParamType(1),
FTy->getParamType(2),
FTy->getParamType(3),
FTy->getParamType(4),
FTy->getParamType(5),
FTy->getParamType(6),
FTy->getParamType(7),
(Type*)0));
else if (Name.compare(21, 2, ".w", 2) == 0)
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
FTy->getParamType(0),
FTy->getParamType(1),
FTy->getParamType(2),
FTy->getParamType(3),
(Type*)0));
else if (Name.compare(21, 2, ".d", 2) == 0)
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
FTy->getParamType(0),
FTy->getParamType(1),
(Type*)0));
return true;
}
if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
if (FTy->getReturnType() == X86_MMXTy &&
FTy->getParamType(0) == X86_MMXTy)
break;
F->setName("");
NewFn = cast<Function>(M->getOrInsertFunction(Name,
X86_MMXTy,
X86_MMXTy,
FTy->getParamType(1),
(Type*)0));
return true;
}
if (Name.compare(13, 9, "emms", 4) == 0 ||
Name.compare(13, 9, "femms", 5) == 0) {
NewFn = 0;
break;
}
// We really shouldn't get here ever.
assert(0 && "Invalid MMX intrinsic!");
break;
} else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 || } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 || Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 || Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
@ -432,6 +619,38 @@ static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
"upgraded."+CI->getName(), CI); "upgraded."+CI->getName(), CI);
} }
/// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
Value **Operands, unsigned NumOps,
bool AssignName = true) {
// Construct a new CallInst.
CallInst *NewCI =
CallInst::Create(NewFn, Operands, Operands + NumOps,
AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
NewCI->setTailCall(OldCI->isTailCall());
NewCI->setCallingConv(OldCI->getCallingConv());
// Handle any uses of the old CallInst.
if (!OldCI->use_empty()) {
// If the type has changed, add a cast.
Instruction *I = OldCI;
if (OldCI->getType() != NewCI->getType()) {
Function *OldFn = OldCI->getCalledFunction();
CastInst *RetCast =
CastInst::Create(CastInst::getCastOpcode(NewCI, true,
OldFn->getReturnType(), true),
NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
I = RetCast;
}
// Replace all uses of the old call with the new cast which has the
// correct type.
OldCI->replaceAllUsesWith(I);
}
// Clean up the old call now that it has been completely upgraded.
OldCI->eraseFromParent();
}
// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
// upgraded intrinsic. All argument and return casting must be provided in // upgraded intrinsic. All argument and return casting must be provided in
// order to seamlessly integrate with existing context. // order to seamlessly integrate with existing context.
@ -759,40 +978,246 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
break; break;
} }
case Intrinsic::x86_mmx_padd_b:
case Intrinsic::x86_mmx_padd_w:
case Intrinsic::x86_mmx_padd_d:
case Intrinsic::x86_mmx_padd_q:
case Intrinsic::x86_mmx_padds_b:
case Intrinsic::x86_mmx_padds_w:
case Intrinsic::x86_mmx_paddus_b:
case Intrinsic::x86_mmx_paddus_w:
case Intrinsic::x86_mmx_psub_b:
case Intrinsic::x86_mmx_psub_w:
case Intrinsic::x86_mmx_psub_d:
case Intrinsic::x86_mmx_psub_q:
case Intrinsic::x86_mmx_psubs_b:
case Intrinsic::x86_mmx_psubs_w:
case Intrinsic::x86_mmx_psubus_b:
case Intrinsic::x86_mmx_psubus_w:
case Intrinsic::x86_mmx_pmulh_w:
case Intrinsic::x86_mmx_pmull_w:
case Intrinsic::x86_mmx_pmulhu_w:
case Intrinsic::x86_mmx_pmulu_dq:
case Intrinsic::x86_mmx_pmadd_wd:
case Intrinsic::x86_mmx_pand:
case Intrinsic::x86_mmx_pandn:
case Intrinsic::x86_mmx_por:
case Intrinsic::x86_mmx_pxor:
case Intrinsic::x86_mmx_pavg_b:
case Intrinsic::x86_mmx_pavg_w:
case Intrinsic::x86_mmx_pmaxu_b:
case Intrinsic::x86_mmx_pmaxs_w:
case Intrinsic::x86_mmx_pminu_b:
case Intrinsic::x86_mmx_pmins_w:
case Intrinsic::x86_mmx_psad_bw:
case Intrinsic::x86_mmx_psll_w:
case Intrinsic::x86_mmx_psll_d: case Intrinsic::x86_mmx_psll_d:
case Intrinsic::x86_mmx_psll_q: case Intrinsic::x86_mmx_psll_q:
case Intrinsic::x86_mmx_psll_w: case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_psra_d: case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_psra_w: case Intrinsic::x86_mmx_pslli_q:
case Intrinsic::x86_mmx_psrl_w:
case Intrinsic::x86_mmx_psrl_d: case Intrinsic::x86_mmx_psrl_d:
case Intrinsic::x86_mmx_psrl_q: case Intrinsic::x86_mmx_psrl_q:
case Intrinsic::x86_mmx_psrl_w: { case Intrinsic::x86_mmx_psrli_w:
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psra_w:
case Intrinsic::x86_mmx_psra_d:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d:
case Intrinsic::x86_mmx_packsswb:
case Intrinsic::x86_mmx_packssdw:
case Intrinsic::x86_mmx_packuswb:
case Intrinsic::x86_mmx_punpckhbw:
case Intrinsic::x86_mmx_punpckhwd:
case Intrinsic::x86_mmx_punpckhdq:
case Intrinsic::x86_mmx_punpcklbw:
case Intrinsic::x86_mmx_punpcklwd:
case Intrinsic::x86_mmx_punpckldq:
case Intrinsic::x86_mmx_pcmpeq_b:
case Intrinsic::x86_mmx_pcmpeq_w:
case Intrinsic::x86_mmx_pcmpeq_d:
case Intrinsic::x86_mmx_pcmpgt_b:
case Intrinsic::x86_mmx_pcmpgt_w:
case Intrinsic::x86_mmx_pcmpgt_d: {
Value *Operands[2];
// Cast the operand to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
switch (NewFn->getIntrinsicID()) {
default:
// Cast to the X86 MMX type.
Operands[1] = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI);
break;
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_pslli_q:
case Intrinsic::x86_mmx_psrli_w:
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d:
// These take an i32 as their second parameter.
Operands[1] = CI->getArgOperand(1);
break;
}
ConstructNewCallInst(NewFn, CI, Operands, 2);
break;
}
case Intrinsic::x86_mmx_maskmovq: {
Value *Operands[3];
// Cast the operands to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
Operands[1] = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI);
Operands[2] = CI->getArgOperand(2);
ConstructNewCallInst(NewFn, CI, Operands, 3, false);
break;
}
case Intrinsic::x86_mmx_pmovmskb: {
Value *Operands[1];
// Cast the operand to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
ConstructNewCallInst(NewFn, CI, Operands, 1);
break;
}
case Intrinsic::x86_mmx_movnt_dq: {
Value *Operands[2]; Value *Operands[2];
Operands[0] = CI->getArgOperand(0); Operands[0] = CI->getArgOperand(0);
// Cast the second parameter to the correct type. // Cast the operand to the X86 MMX type.
BitCastInst *BC = new BitCastInst(CI->getArgOperand(1), Operands[1] = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1), NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI); "upgraded.", CI);
Operands[1] = BC;
// Construct a new CallInst ConstructNewCallInst(NewFn, CI, Operands, 2, false);
CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2,
"upgraded."+CI->getName(), CI);
NewCI->setTailCall(CI->isTailCall());
NewCI->setCallingConv(CI->getCallingConv());
// Handle any uses of the old CallInst.
if (!CI->use_empty())
// Replace all uses of the old call with the new cast which has the
// correct type.
CI->replaceAllUsesWith(NewCI);
// Clean up the old call now that it has been completely upgraded.
CI->eraseFromParent();
break; break;
} }
case Intrinsic::x86_mmx_palignr_b: {
Value *Operands[3];
// Cast the operands to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
Operands[1] = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI);
Operands[2] = CI->getArgOperand(2);
ConstructNewCallInst(NewFn, CI, Operands, 3);
break;
}
case Intrinsic::x86_mmx_pextr_w: {
Value *Operands[2];
// Cast the operands to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
Operands[1] = CI->getArgOperand(1);
ConstructNewCallInst(NewFn, CI, Operands, 2);
break;
}
case Intrinsic::x86_mmx_pinsr_w: {
Value *Operands[3];
// Cast the operands to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
Operands[1] = CI->getArgOperand(1);
Operands[2] = CI->getArgOperand(2);
ConstructNewCallInst(NewFn, CI, Operands, 3);
break;
}
#if 0
case Intrinsic::x86_mmx_cvtsi32_si64: {
// The return type needs to be changed.
Value *Operands[1];
Operands[0] = CI->getArgOperand(0);
ConstructNewCallInst(NewFn, CI, Operands, 1);
break;
}
case Intrinsic::x86_mmx_cvtsi64_si32: {
Value *Operands[1];
// Cast the operand to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
ConstructNewCallInst(NewFn, CI, Operands, 1);
break;
}
case Intrinsic::x86_mmx_vec_init_b:
case Intrinsic::x86_mmx_vec_init_w:
case Intrinsic::x86_mmx_vec_init_d: {
// The return type needs to be changed.
Value *Operands[8];
unsigned NumOps = 0;
switch (NewFn->getIntrinsicID()) {
default: break;
case Intrinsic::x86_mmx_vec_init_b: NumOps = 8; break;
case Intrinsic::x86_mmx_vec_init_w: NumOps = 4; break;
case Intrinsic::x86_mmx_vec_init_d: NumOps = 2; break;
}
switch (NewFn->getIntrinsicID()) {
default: break;
case Intrinsic::x86_mmx_vec_init_b:
Operands[7] = CI->getArgOperand(7);
Operands[6] = CI->getArgOperand(6);
Operands[5] = CI->getArgOperand(5);
Operands[4] = CI->getArgOperand(4);
// FALLTHRU
case Intrinsic::x86_mmx_vec_init_w:
Operands[3] = CI->getArgOperand(3);
Operands[2] = CI->getArgOperand(2);
// FALLTHRU
case Intrinsic::x86_mmx_vec_init_d:
Operands[1] = CI->getArgOperand(1);
Operands[0] = CI->getArgOperand(0);
break;
}
ConstructNewCallInst(NewFn, CI, Operands, NumOps);
break;
}
case Intrinsic::x86_mmx_vec_ext_d: {
Value *Operands[2];
// Cast the operand to the X86 MMX type.
Operands[0] = new BitCastInst(CI->getArgOperand(0),
NewFn->getFunctionType()->getParamType(0),
"upgraded.", CI);
Operands[1] = CI->getArgOperand(1);
ConstructNewCallInst(NewFn, CI, Operands, 2);
break;
}
#endif
case Intrinsic::ctlz: case Intrinsic::ctlz:
case Intrinsic::ctpop: case Intrinsic::ctpop:
case Intrinsic::cttz: { case Intrinsic::cttz: {

View File

@ -2360,6 +2360,8 @@ bool CastInst::isCastable(const Type *SrcTy, const Type *DestTy) {
} else { // Casting from something else } else { // Casting from something else
return false; return false;
} }
} else if (DestTy->isX86_MMXTy()) {
return SrcBits == 64;
} else { // Casting to something else } else { // Casting to something else
return false; return false;
} }
@ -2441,6 +2443,10 @@ CastInst::getCastOpcode(
return BitCast; // vector -> vector return BitCast; // vector -> vector
} else if (DestPTy->getBitWidth() == SrcBits) { } else if (DestPTy->getBitWidth() == SrcBits) {
return BitCast; // float/int -> vector return BitCast; // float/int -> vector
} else if (SrcTy->isX86_MMXTy()) {
assert(DestPTy->getBitWidth()==64 &&
"Casting X86_MMX to vector of wrong width");
return BitCast; // MMX to 64-bit vector
} else { } else {
assert(!"Illegal cast to vector (wrong type or size)"); assert(!"Illegal cast to vector (wrong type or size)");
} }
@ -2452,6 +2458,14 @@ CastInst::getCastOpcode(
} else { } else {
assert(!"Casting pointer to other than pointer or int"); assert(!"Casting pointer to other than pointer or int");
} }
} else if (DestTy->isX86_MMXTy()) {
if (const VectorType *SrcPTy = dyn_cast<VectorType>(SrcTy)) {
assert(SrcPTy->getBitWidth()==64 &&
"Casting vector of wrong width to X86_MMX");
return BitCast; // 64-bit vector to MMX
} else {
assert(!"Illegal cast to X86_MMX");
}
} else { } else {
assert(!"Casting to type that is not first-class"); assert(!"Casting to type that is not first-class");
} }

View File

@ -173,10 +173,20 @@ bool Type::canLosslesslyBitCastTo(const Type *Ty) const {
return false; return false;
// Vector -> Vector conversions are always lossless if the two vector types // Vector -> Vector conversions are always lossless if the two vector types
// have the same size, otherwise not. // have the same size, otherwise not. Also, 64-bit vector types can be
if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) // converted to x86mmx.
if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) {
if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty)) if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
return thisPTy->getBitWidth() == thatPTy->getBitWidth(); return thisPTy->getBitWidth() == thatPTy->getBitWidth();
if (Ty->getTypeID() == Type::X86_MMXTyID &&
thisPTy->getBitWidth() == 64)
return true;
}
if (this->getTypeID() == Type::X86_MMXTyID)
if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
if (thatPTy->getBitWidth() == 64)
return true;
// At this point we have only various mismatches of the first class types // At this point we have only various mismatches of the first class types
// remaining and ptr->ptr. Just select the lossless conversions. Everything // remaining and ptr->ptr. Just select the lossless conversions. Everything

View File

@ -7,7 +7,7 @@
; RUN: llvm-as < %s | llvm-dis | \ ; RUN: llvm-as < %s | llvm-dis | \
; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*} ; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
; RUN: llvm-as < %s | llvm-dis | \ ; RUN: llvm-as < %s | llvm-dis | \
; RUN: grep {llvm\\.x86\\.mmx\\.ps} | grep {\\\<2 x i32\\\>} | count 6 ; RUN: grep {llvm\\.x86\\.mmx\\.ps} | grep {x86_mmx} | count 16
declare i32 @llvm.ctpop.i28(i28 %val) declare i32 @llvm.ctpop.i28(i28 %val)
declare i32 @llvm.cttz.i29(i29 %val) declare i32 @llvm.cttz.i29(i29 %val)

Binary file not shown.

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s ; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s
; There are no MMX instructions here. We use add+adcl for the adds.
define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind { define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
entry: entry:
@ -7,9 +8,8 @@ entry:
bb26: ; preds = %bb26, %entry bb26: ; preds = %bb26, %entry
; CHECK: movq ({{.*}},8), %mm ; CHECK: addl %eax, %ebx
; CHECK: paddq ({{.*}},8), %mm ; CHECK: adcl %edx, %ebp
; CHECK: paddq %mm{{[0-7]}}, %mm
%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3] %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3]
%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1] %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
@ -27,3 +27,38 @@ bb31: ; preds = %bb26, %entry
%sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1] %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
ret <1 x i64> %sum.035.1 ret <1 x i64> %sum.035.1
} }
; This is the original test converted to use MMX intrinsics.
define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind {
entry:
%tmp2943 = bitcast <1 x i64><i64 0> to x86_mmx
%tmp2942 = icmp eq i32 %count, 0 ; <i1> [#uses=1]
br i1 %tmp2942, label %bb31, label %bb26
bb26: ; preds = %bb26, %entry
; CHECK: movq ({{.*}},8), %mm
; CHECK: paddq ({{.*}},8), %mm
; CHECK: paddq %mm{{[0-7]}}, %mm
%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3]
%sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; <x86_mmx> [#uses=1]
%tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0 ; <x86_mmx*> [#uses=1]
%tmp14 = load x86_mmx* %tmp13 ; <x86_mmx> [#uses=1]
%tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0 ; <x86_mmx*> [#uses=1]
%tmp19 = load x86_mmx* %tmp18 ; <x86_mmx> [#uses=1]
%tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14) ; <x86_mmx> [#uses=1]
%tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0) ; <x86_mmx> [#uses=2]
%tmp25 = add i32 %i.037.0, 1 ; <i32> [#uses=2]
%tmp29 = icmp ult i32 %tmp25, %count ; <i1> [#uses=1]
br i1 %tmp29, label %bb26, label %bb31
bb31: ; preds = %bb26, %entry
%sum.035.1 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; <x86_mmx> [#uses=1]
%t = bitcast x86_mmx %sum.035.1 to <1 x i64>
ret <1 x i64> %t
}
declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

View File

@ -5,10 +5,10 @@ target triple = "i686-apple-darwin8"
define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) { define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) {
entry: entry:
%tmp4 = bitcast <1 x i64> %mask1 to <8 x i8> ; <<8 x i8>> [#uses=1] %tmp4 = bitcast <1 x i64> %mask1 to x86_mmx ; <x86_mmx> [#uses=1]
%tmp6 = bitcast <1 x i64> %c64 to <8 x i8> ; <<8 x i8>> [#uses=1] %tmp6 = bitcast <1 x i64> %c64 to x86_mmx ; <x86_mmx> [#uses=1]
tail call void @llvm.x86.mmx.maskmovq( <8 x i8> %tmp6, <8 x i8> %tmp4, i8* %P ) tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P )
ret void ret void
} }
declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*) declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)

View File

@ -1,17 +1,16 @@
; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep paddusw ; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep paddusw
@R = external global <1 x i64> ; <<1 x i64>*> [#uses=1] @R = external global x86_mmx ; <x86_mmx*> [#uses=1]
define void @foo(<1 x i64> %A, <1 x i64> %B) { define void @foo(<1 x i64> %A, <1 x i64> %B) {
entry: entry:
%tmp4 = bitcast <1 x i64> %B to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp2 = bitcast <1 x i64> %A to x86_mmx
%tmp6 = bitcast <1 x i64> %A to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp3 = bitcast <1 x i64> %B to x86_mmx
%tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 ) ; <<4 x i16>> [#uses=1] %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp2, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=1]
%tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64> ; <<1 x i64>> [#uses=1] store x86_mmx %tmp7, x86_mmx* @R
store <1 x i64> %tmp8, <1 x i64>* @R
tail call void @llvm.x86.mmx.emms( ) tail call void @llvm.x86.mmx.emms( )
ret void ret void
} }
declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms() declare void @llvm.x86.mmx.emms()

View File

@ -2,19 +2,17 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {movd %rdi, %mm1} ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {movd %rdi, %mm1}
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {paddusw %mm0, %mm1} ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {paddusw %mm0, %mm1}
@R = external global <1 x i64> ; <<1 x i64>*> [#uses=1] @R = external global x86_mmx ; <x86_mmx*> [#uses=1]
define void @foo(<1 x i64> %A, <1 x i64> %B) nounwind { define void @foo(<1 x i64> %A, <1 x i64> %B) nounwind {
entry: entry:
%tmp4 = bitcast <1 x i64> %B to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp4 = bitcast <1 x i64> %B to x86_mmx ; <<4 x i16>> [#uses=1]
%tmp6 = bitcast <1 x i64> %A to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp6 = bitcast <1 x i64> %A to x86_mmx ; <<4 x i16>> [#uses=1]
%tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 ) ; <<4 x i16>> [#uses=1] %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp6, x86_mmx %tmp4 ) ; <x86_mmx> [#uses=1]
%tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64> ; <<1 x i64>> [#uses=1] store x86_mmx %tmp7, x86_mmx* @R
store <1 x i64> %tmp8, <1 x i64>* @R
tail call void @llvm.x86.mmx.emms( ) tail call void @llvm.x86.mmx.emms( )
ret void ret void
} }
declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms() declare void @llvm.x86.mmx.emms()

View File

@ -5,15 +5,15 @@ entry:
tail call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{di},~{si},~{dx},~{cx},~{ax}"( ) nounwind tail call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{di},~{si},~{dx},~{cx},~{ax}"( ) nounwind
tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
tail call void asm sideeffect ".line 8", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".line 8", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
%tmp1 = tail call <2 x i32> asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind ; <<2 x i32>> [#uses=1] %tmp1 = tail call x86_mmx asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind ; <x86_mmx> [#uses=1]
tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
tail call void asm sideeffect ".line 9", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".line 9", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
%tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef ) nounwind ; <i32> [#uses=1] %tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef ) nounwind ; <i32> [#uses=1]
tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
tail call void asm sideeffect ".line 10", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".line 10", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef, i32 undef, i32 %tmp3 ) nounwind tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef, i32 undef, i32 %tmp3 ) nounwind
tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
tail call void asm sideeffect ".line 11", "~{dirflag},~{fpsr},~{flags}"( ) nounwind tail call void asm sideeffect ".line 11", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
%tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> %tmp1 ) nounwind ; <i32> [#uses=0] %tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx %tmp1 ) nounwind ; <i32> [#uses=0]
ret i32 undef ret i32 undef
} }

View File

@ -17,11 +17,13 @@ entry:
br i1 false, label %bb.nph144.split, label %bb133 br i1 false, label %bb.nph144.split, label %bb133
bb.nph144.split: ; preds = %entry bb.nph144.split: ; preds = %entry
tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> zeroinitializer, i8* null ) nounwind %tmp = bitcast <8 x i8> zeroinitializer to x86_mmx
%tmp2 = bitcast <8 x i8> zeroinitializer to x86_mmx
tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp, x86_mmx %tmp2, i8* null ) nounwind
unreachable unreachable
bb133: ; preds = %entry bb133: ; preds = %entry
ret void ret void
} }
declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*) nounwind declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) nounwind

View File

@ -1,6 +1,9 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpcklpd
; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpckhpd
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvttpd2pi | count 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvttpd2pi | count 1
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvtpi2pd | count 1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvtpi2pd | count 1
; PR2687 ; originally from PR2687, but things don't work that way any more.
; there are no MMX instructions here; we use XMM.
define <2 x double> @a(<2 x i32> %x) nounwind { define <2 x double> @a(<2 x i32> %x) nounwind {
entry: entry:
@ -13,3 +16,20 @@ entry:
%y = fptosi <2 x double> %x to <2 x i32> %y = fptosi <2 x double> %x to <2 x i32>
ret <2 x i32> %y ret <2 x i32> %y
} }
; This is how to get MMX instructions.
define <2 x double> @a2(x86_mmx %x) nounwind {
entry:
%y = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %x)
ret <2 x double> %y
}
define x86_mmx @b2(<2 x double> %x) nounwind {
entry:
%y = tail call x86_mmx @llvm.x86.sse.cvttpd2pi (<2 x double> %x)
ret x86_mmx %y
}
declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx)
declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>)

View File

@ -1,10 +1,12 @@
; RUN: llc < %s -march=x86-64 ; RUN: llc < %s -march=x86-64
; PR4669 ; PR4669
declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
define <1 x i64> @test(i64 %t) { define <1 x i64> @test(i64 %t) {
entry: entry:
%t1 = insertelement <1 x i64> undef, i64 %t, i32 0 %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
%t2 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %t1, i32 48) %t0 = bitcast <1 x i64> %t1 to x86_mmx
ret <1 x i64> %t2 %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
%t3 = bitcast x86_mmx %t2 to <1 x i64>
ret <1 x i64> %t3
} }

View File

@ -1,12 +1,12 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
; There are no MMX operations here, so we use XMM or i64.
define void @ti8(double %a, double %b) nounwind { define void @ti8(double %a, double %b) nounwind {
entry: entry:
%tmp1 = bitcast double %a to <8 x i8> %tmp1 = bitcast double %a to <8 x i8>
; CHECK: movdq2q
%tmp2 = bitcast double %b to <8 x i8> %tmp2 = bitcast double %b to <8 x i8>
; CHECK: movdq2q
%tmp3 = add <8 x i8> %tmp1, %tmp2 %tmp3 = add <8 x i8> %tmp1, %tmp2
; CHECK: paddb %xmm1, %xmm0
store <8 x i8> %tmp3, <8 x i8>* null store <8 x i8> %tmp3, <8 x i8>* null
ret void ret void
} }
@ -14,10 +14,9 @@ entry:
define void @ti16(double %a, double %b) nounwind { define void @ti16(double %a, double %b) nounwind {
entry: entry:
%tmp1 = bitcast double %a to <4 x i16> %tmp1 = bitcast double %a to <4 x i16>
; CHECK: movdq2q
%tmp2 = bitcast double %b to <4 x i16> %tmp2 = bitcast double %b to <4 x i16>
; CHECK: movdq2q
%tmp3 = add <4 x i16> %tmp1, %tmp2 %tmp3 = add <4 x i16> %tmp1, %tmp2
; CHECK: paddw %xmm1, %xmm0
store <4 x i16> %tmp3, <4 x i16>* null store <4 x i16> %tmp3, <4 x i16>* null
ret void ret void
} }
@ -25,10 +24,9 @@ entry:
define void @ti32(double %a, double %b) nounwind { define void @ti32(double %a, double %b) nounwind {
entry: entry:
%tmp1 = bitcast double %a to <2 x i32> %tmp1 = bitcast double %a to <2 x i32>
; CHECK: movdq2q
%tmp2 = bitcast double %b to <2 x i32> %tmp2 = bitcast double %b to <2 x i32>
; CHECK: movdq2q
%tmp3 = add <2 x i32> %tmp1, %tmp2 %tmp3 = add <2 x i32> %tmp1, %tmp2
; CHECK: paddd %xmm1, %xmm0
store <2 x i32> %tmp3, <2 x i32>* null store <2 x i32> %tmp3, <2 x i32>* null
ret void ret void
} }
@ -36,10 +34,60 @@ entry:
define void @ti64(double %a, double %b) nounwind { define void @ti64(double %a, double %b) nounwind {
entry: entry:
%tmp1 = bitcast double %a to <1 x i64> %tmp1 = bitcast double %a to <1 x i64>
; CHECK: movdq2q
%tmp2 = bitcast double %b to <1 x i64> %tmp2 = bitcast double %b to <1 x i64>
; CHECK: movdq2q
%tmp3 = add <1 x i64> %tmp1, %tmp2 %tmp3 = add <1 x i64> %tmp1, %tmp2
; CHECK: addq %rax, %rcx
store <1 x i64> %tmp3, <1 x i64>* null store <1 x i64> %tmp3, <1 x i64>* null
ret void ret void
} }
; MMX intrinsics calls get us MMX instructions.
define void @ti8a(double %a, double %b) nounwind {
entry:
%tmp1 = bitcast double %a to x86_mmx
; CHECK: movdq2q
%tmp2 = bitcast double %b to x86_mmx
; CHECK: movdq2q
%tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
store x86_mmx %tmp3, x86_mmx* null
ret void
}
define void @ti16a(double %a, double %b) nounwind {
entry:
%tmp1 = bitcast double %a to x86_mmx
; CHECK: movdq2q
%tmp2 = bitcast double %b to x86_mmx
; CHECK: movdq2q
%tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
store x86_mmx %tmp3, x86_mmx* null
ret void
}
define void @ti32a(double %a, double %b) nounwind {
entry:
%tmp1 = bitcast double %a to x86_mmx
; CHECK: movdq2q
%tmp2 = bitcast double %b to x86_mmx
; CHECK: movdq2q
%tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
store x86_mmx %tmp3, x86_mmx* null
ret void
}
define void @ti64a(double %a, double %b) nounwind {
entry:
%tmp1 = bitcast double %a to x86_mmx
; CHECK: movdq2q
%tmp2 = bitcast double %b to x86_mmx
; CHECK: movdq2q
%tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
store x86_mmx %tmp3, x86_mmx* null
ret void
}
declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

View File

@ -5,15 +5,19 @@ target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin9.8" target triple = "x86_64-apple-darwin9.8"
declare void @func2(<1 x i64>) declare void @func2(x86_mmx)
define void @func1() nounwind { define void @func1() nounwind {
; This isn't spectacular, but it's MMX code at -O0... ; This isn't spectacular, but it's MMX code at -O0...
; CHECK: movl $2, %eax ; CHECK: movq2dq %mm0, %xmm0
; CHECK: movd %rax, %mm0 ; For now, handling of x86_mmx parameters in fast Isel is unimplemented,
; CHECK: movd %mm0, %rdi ; so we get pretty poor code. The below is preferable.
; CHEK: movl $2, %eax
; CHEK: movd %rax, %mm0
; CHEK: movd %mm0, %rdi
call void @func2(<1 x i64> <i64 2>) %tmp0 = bitcast <2 x i32><i32 0, i32 2> to x86_mmx
call void @func2(x86_mmx %tmp0)
ret void ret void
} }

View File

@ -1,24 +1,27 @@
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep mm0 | count 3 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep mm0 | count 1
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep esp | count 1 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep esp | count 2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep xmm0 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep xmm0
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep rdi ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep rdi
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | not grep movups ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | not grep movups
; ;
; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2]. ; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2].
; On Darwin x86-32, v1i64 values are passed in memory. ; On Darwin x86-32, v1i64 values are passed in memory. In this example, they
; are never moved into an MM register at all.
; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7]. ; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7].
; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs. ; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
@u1 = external global <8 x i8> @u1 = external global x86_mmx
define void @t1(<8 x i8> %v1) nounwind { define void @t1(x86_mmx %v1) nounwind {
store <8 x i8> %v1, <8 x i8>* @u1, align 8 store x86_mmx %v1, x86_mmx* @u1, align 8
ret void ret void
} }
@u2 = external global <1 x i64> @u2 = external global x86_mmx
define void @t2(<1 x i64> %v1) nounwind { define void @t2(<1 x i64> %v1) nounwind {
store <1 x i64> %v1, <1 x i64>* @u2, align 8 %tmp = bitcast <1 x i64> %v1 to x86_mmx
store x86_mmx %tmp, x86_mmx* @u2, align 8
ret void ret void
} }

View File

@ -1,17 +1,21 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq2dq | count 1
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2
; Since the add is not an MMX add, we don't have a movq2dq any more.
@g_v8qi = external global <8 x i8> @g_v8qi = external global <8 x i8>
define void @t1() nounwind { define void @t1() nounwind {
%tmp3 = load <8 x i8>* @g_v8qi, align 8 %tmp3 = load <8 x i8>* @g_v8qi, align 8
%tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void ret void
} }
define void @t2(<8 x i8> %v1, <8 x i8> %v2) nounwind { define void @t2(x86_mmx %v1, x86_mmx %v2) nounwind {
%tmp3 = add <8 x i8> %v1, %v2 %v1a = bitcast x86_mmx %v1 to <8 x i8>
%tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind %v2b = bitcast x86_mmx %v2 to <8 x i8>
%tmp3 = add <8 x i8> %v1a, %v2b
%tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void ret void
} }

View File

@ -1,131 +1,309 @@
; RUN: llc < %s -march=x86 -mattr=+mmx ; RUN: llc < %s -march=x86 -mattr=+mmx
;; A basic sanity check to make sure that MMX arithmetic actually compiles. ;; A basic sanity check to make sure that MMX arithmetic actually compiles.
;; First is a straight translation of the original with bitcasts as needed.
define void @foo(<8 x i8>* %A, <8 x i8>* %B) { define void @foo(x86_mmx* %A, x86_mmx* %B) {
entry: entry:
%tmp1 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1] %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = add <8 x i8> %tmp1, %tmp3 ; <<8 x i8>> [#uses=2] %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
store <8 x i8> %tmp4, <8 x i8>* %A %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
%tmp7 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp4 = add <8 x i8> %tmp1a, %tmp3a ; <<8 x i8>> [#uses=2]
%tmp12 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp4, <8 x i8> %tmp7 ) ; <<8 x i8>> [#uses=2] %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
store <8 x i8> %tmp12, <8 x i8>* %A store x86_mmx %tmp4a, x86_mmx* %A
%tmp16 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp21 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp12, <8 x i8> %tmp16 ) ; <<8 x i8>> [#uses=2] %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
store <8 x i8> %tmp21, <8 x i8>* %A store x86_mmx %tmp12, x86_mmx* %A
%tmp27 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28 = sub <8 x i8> %tmp21, %tmp27 ; <<8 x i8>> [#uses=2] %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
store <8 x i8> %tmp28, <8 x i8>* %A store x86_mmx %tmp21, x86_mmx* %A
%tmp31 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp36 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp28, <8 x i8> %tmp31 ) ; <<8 x i8>> [#uses=2] %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
store <8 x i8> %tmp36, <8 x i8>* %A %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
%tmp40 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp28 = sub <8 x i8> %tmp21a, %tmp27a ; <<8 x i8>> [#uses=2]
%tmp45 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp36, <8 x i8> %tmp40 ) ; <<8 x i8>> [#uses=2] %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
store <8 x i8> %tmp45, <8 x i8>* %A store x86_mmx %tmp28a, x86_mmx* %A
%tmp51 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp52 = mul <8 x i8> %tmp45, %tmp51 ; <<8 x i8>> [#uses=2] %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
store <8 x i8> %tmp52, <8 x i8>* %A store x86_mmx %tmp36, x86_mmx* %A
%tmp57 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp58 = and <8 x i8> %tmp52, %tmp57 ; <<8 x i8>> [#uses=2] %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
store <8 x i8> %tmp58, <8 x i8>* %A store x86_mmx %tmp45, x86_mmx* %A
%tmp63 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp64 = or <8 x i8> %tmp58, %tmp63 ; <<8 x i8>> [#uses=2] %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
store <8 x i8> %tmp64, <8 x i8>* %A %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
%tmp69 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1] %tmp52 = mul <8 x i8> %tmp45a, %tmp51a ; <<8 x i8>> [#uses=2]
%tmp70 = xor <8 x i8> %tmp64, %tmp69 ; <<8 x i8>> [#uses=1] %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
store <8 x i8> %tmp70, <8 x i8>* %A store x86_mmx %tmp52a, x86_mmx* %A
%tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
%tmp58 = and <8 x i8> %tmp52, %tmp57a ; <<8 x i8>> [#uses=2]
%tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
store x86_mmx %tmp58a, x86_mmx* %A
%tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
%tmp64 = or <8 x i8> %tmp58, %tmp63a ; <<8 x i8>> [#uses=2]
%tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
store x86_mmx %tmp64a, x86_mmx* %A
%tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
%tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
%tmp70 = xor <8 x i8> %tmp64b, %tmp69a ; <<8 x i8>> [#uses=1]
%tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
store x86_mmx %tmp70a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( ) tail call void @llvm.x86.mmx.emms( )
ret void ret void
} }
define void @baz(<2 x i32>* %A, <2 x i32>* %B) { define void @baz(x86_mmx* %A, x86_mmx* %B) {
entry: entry:
%tmp1 = load <2 x i32>* %A ; <<2 x i32>> [#uses=1] %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = add <2 x i32> %tmp1, %tmp3 ; <<2 x i32>> [#uses=2] %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
store <2 x i32> %tmp4, <2 x i32>* %A %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
%tmp9 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp4 = add <2 x i32> %tmp1a, %tmp3a ; <<2 x i32>> [#uses=2]
%tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=2] %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
store <2 x i32> %tmp10, <2 x i32>* %A store x86_mmx %tmp4a, x86_mmx* %A
%tmp15 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp16 = mul <2 x i32> %tmp10, %tmp15 ; <<2 x i32>> [#uses=2] %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
store <2 x i32> %tmp16, <2 x i32>* %A %tmp10 = sub <2 x i32> %tmp4, %tmp9a ; <<2 x i32>> [#uses=2]
%tmp21 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
%tmp22 = and <2 x i32> %tmp16, %tmp21 ; <<2 x i32>> [#uses=2] store x86_mmx %tmp10a, x86_mmx* %A
store <2 x i32> %tmp22, <2 x i32>* %A %tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp27 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
%tmp28 = or <2 x i32> %tmp22, %tmp27 ; <<2 x i32>> [#uses=2] %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
store <2 x i32> %tmp28, <2 x i32>* %A %tmp16 = mul <2 x i32> %tmp10b, %tmp15a ; <<2 x i32>> [#uses=2]
%tmp33 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1] %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
%tmp34 = xor <2 x i32> %tmp28, %tmp33 ; <<2 x i32>> [#uses=1] store x86_mmx %tmp16a, x86_mmx* %A
store <2 x i32> %tmp34, <2 x i32>* %A %tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
%tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
%tmp22 = and <2 x i32> %tmp16b, %tmp21a ; <<2 x i32>> [#uses=2]
%tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
store x86_mmx %tmp22a, x86_mmx* %A
%tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
%tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
%tmp28 = or <2 x i32> %tmp22b, %tmp27a ; <<2 x i32>> [#uses=2]
%tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
%tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
%tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
%tmp34 = xor <2 x i32> %tmp28b, %tmp33a ; <<2 x i32>> [#uses=1]
%tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
store x86_mmx %tmp34a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( ) tail call void @llvm.x86.mmx.emms( )
ret void ret void
} }
define void @bar(<4 x i16>* %A, <4 x i16>* %B) { define void @bar(x86_mmx* %A, x86_mmx* %B) {
entry: entry:
%tmp1 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1] %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = add <4 x i16> %tmp1, %tmp3 ; <<4 x i16>> [#uses=2] %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
store <4 x i16> %tmp4, <4 x i16>* %A %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
%tmp7 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp4 = add <4 x i16> %tmp1a, %tmp3a ; <<4 x i16>> [#uses=2]
%tmp12 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp4, <4 x i16> %tmp7 ) ; <<4 x i16>> [#uses=2] %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
store <4 x i16> %tmp12, <4 x i16>* %A store x86_mmx %tmp4a, x86_mmx* %A
%tmp16 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp21 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp12, <4 x i16> %tmp16 ) ; <<4 x i16>> [#uses=2] %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
store <4 x i16> %tmp21, <4 x i16>* %A store x86_mmx %tmp12, x86_mmx* %A
%tmp27 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28 = sub <4 x i16> %tmp21, %tmp27 ; <<4 x i16>> [#uses=2] %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
store <4 x i16> %tmp28, <4 x i16>* %A store x86_mmx %tmp21, x86_mmx* %A
%tmp31 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp36 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp28, <4 x i16> %tmp31 ) ; <<4 x i16>> [#uses=2] %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
store <4 x i16> %tmp36, <4 x i16>* %A %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
%tmp40 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp28 = sub <4 x i16> %tmp21a, %tmp27a ; <<4 x i16>> [#uses=2]
%tmp45 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp36, <4 x i16> %tmp40 ) ; <<4 x i16>> [#uses=2] %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
store <4 x i16> %tmp45, <4 x i16>* %A store x86_mmx %tmp28a, x86_mmx* %A
%tmp51 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp52 = mul <4 x i16> %tmp45, %tmp51 ; <<4 x i16>> [#uses=2] %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
store <4 x i16> %tmp52, <4 x i16>* %A store x86_mmx %tmp36, x86_mmx* %A
%tmp55 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp60 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp52, <4 x i16> %tmp55 ) ; <<4 x i16>> [#uses=2] %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
store <4 x i16> %tmp60, <4 x i16>* %A store x86_mmx %tmp45, x86_mmx* %A
%tmp64 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp69 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp60, <4 x i16> %tmp64 ) ; <<2 x i32>> [#uses=1] %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
%tmp70 = bitcast <2 x i32> %tmp69 to <4 x i16> ; <<4 x i16>> [#uses=2] %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
store <4 x i16> %tmp70, <4 x i16>* %A %tmp52 = mul <4 x i16> %tmp45a, %tmp51a ; <<4 x i16>> [#uses=2]
%tmp75 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
%tmp76 = and <4 x i16> %tmp70, %tmp75 ; <<4 x i16>> [#uses=2] store x86_mmx %tmp52a, x86_mmx* %A
store <4 x i16> %tmp76, <4 x i16>* %A %tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp81 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
%tmp82 = or <4 x i16> %tmp76, %tmp81 ; <<4 x i16>> [#uses=2] store x86_mmx %tmp60, x86_mmx* %A
store <4 x i16> %tmp82, <4 x i16>* %A %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp87 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1] %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
%tmp88 = xor <4 x i16> %tmp82, %tmp87 ; <<4 x i16>> [#uses=1] %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
store <4 x i16> %tmp88, <4 x i16>* %A store x86_mmx %tmp70, x86_mmx* %A
%tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
%tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
%tmp76 = and <4 x i16> %tmp70a, %tmp75a ; <<4 x i16>> [#uses=2]
%tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
store x86_mmx %tmp76a, x86_mmx* %A
%tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
%tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
%tmp82 = or <4 x i16> %tmp76b, %tmp81a ; <<4 x i16>> [#uses=2]
%tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
store x86_mmx %tmp82a, x86_mmx* %A
%tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
%tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
%tmp88 = xor <4 x i16> %tmp82b, %tmp87a ; <<4 x i16>> [#uses=1]
%tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
store x86_mmx %tmp88a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( ) tail call void @llvm.x86.mmx.emms( )
ret void ret void
} }
declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>) ;; The following is modified to use MMX intrinsics everywhere they work.
declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>) define void @fooa(x86_mmx* %A, x86_mmx* %B) {
entry:
%tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp4, x86_mmx* %A
%tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp12, x86_mmx* %A
%tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp21, x86_mmx* %A
%tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp28, x86_mmx* %A
%tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp36, x86_mmx* %A
%tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp45, x86_mmx* %A
%tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp51a = bitcast x86_mmx %tmp51 to i64
%tmp51aa = bitcast i64 %tmp51a to <8 x i8>
%tmp51b = bitcast x86_mmx %tmp45 to <8 x i8>
%tmp52 = mul <8 x i8> %tmp51b, %tmp51aa ; <x86_mmx> [#uses=2]
%tmp52a = bitcast <8 x i8> %tmp52 to i64
%tmp52aa = bitcast i64 %tmp52a to x86_mmx
store x86_mmx %tmp52aa, x86_mmx* %A
%tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp58, x86_mmx* %A
%tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp64, x86_mmx* %A
%tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp70, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>) define void @baza(x86_mmx* %A, x86_mmx* %B) {
entry:
%tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp4, x86_mmx* %A
%tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp10, x86_mmx* %A
%tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp10a = bitcast x86_mmx %tmp10 to <2 x i32>
%tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
%tmp16 = mul <2 x i32> %tmp10a, %tmp15a ; <x86_mmx> [#uses=2]
%tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
store x86_mmx %tmp16a, x86_mmx* %A
%tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp22, x86_mmx* %A
%tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp28, x86_mmx* %A
%tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp34, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>) define void @bara(x86_mmx* %A, x86_mmx* %B) {
entry:
%tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
%tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp4, x86_mmx* %A
%tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp12, x86_mmx* %A
%tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp21, x86_mmx* %A
%tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp28, x86_mmx* %A
%tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp36, x86_mmx* %A
%tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp45, x86_mmx* %A
%tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp52, x86_mmx* %A
%tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp60, x86_mmx* %A
%tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
store x86_mmx %tmp70, x86_mmx* %A
%tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp76, x86_mmx* %A
%tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp82, x86_mmx* %A
%tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
%tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 ) ; <x86_mmx> [#uses=2]
store x86_mmx %tmp88, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
declare <4 x i16> @llvm.x86.mmx.pmulh.w(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
declare <2 x i32> @llvm.x86.mmx.pmadd.wd(<4 x i16>, <4 x i16>) declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms() declare void @llvm.x86.mmx.emms()
declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx)

View File

@ -1,26 +1,31 @@
; RUN: llc < %s -march=x86-64 | grep movd | count 4 ; RUN: llc < %s -march=x86-64 | grep movd | count 4
define i64 @foo(<1 x i64>* %p) { define i64 @foo(x86_mmx* %p) {
%t = load <1 x i64>* %p %t = load x86_mmx* %p
%u = add <1 x i64> %t, %t %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
%s = bitcast <1 x i64> %u to i64 %s = bitcast x86_mmx %u to i64
ret i64 %s ret i64 %s
} }
define i64 @goo(<2 x i32>* %p) { define i64 @goo(x86_mmx* %p) {
%t = load <2 x i32>* %p %t = load x86_mmx* %p
%u = add <2 x i32> %t, %t %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
%s = bitcast <2 x i32> %u to i64 %s = bitcast x86_mmx %u to i64
ret i64 %s ret i64 %s
} }
define i64 @hoo(<4 x i16>* %p) { define i64 @hoo(x86_mmx* %p) {
%t = load <4 x i16>* %p %t = load x86_mmx* %p
%u = add <4 x i16> %t, %t %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
%s = bitcast <4 x i16> %u to i64 %s = bitcast x86_mmx %u to i64
ret i64 %s ret i64 %s
} }
define i64 @ioo(<8 x i8>* %p) { define i64 @ioo(x86_mmx* %p) {
%t = load <8 x i8>* %p %t = load x86_mmx* %p
%u = add <8 x i8> %t, %t %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
%s = bitcast <8 x i8> %u to i64 %s = bitcast x86_mmx %u to i64
ret i64 %s ret i64 %s
} }
declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

View File

@ -1,7 +1,9 @@
; RUN: llc < %s -march=x86 -mattr=+mmx | not grep movq ; RUN: llc < %s -march=x86 -mattr=+mmx,+sse | grep movq
; RUN: llc < %s -march=x86 -mattr=+mmx | grep psllq ; RUN: llc < %s -march=x86 -mattr=+mmx,+sse | grep pshufd
; This is not an MMX operation; promoted to XMM.
define <2 x i32> @qux(i32 %A) nounwind { define x86_mmx @qux(i32 %A) nounwind {
%tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1 ; <<2 x i32>> [#uses=1] %tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1 ; <<2 x i32>> [#uses=1]
ret <2 x i32> %tmp3 %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
ret x86_mmx %tmp4
} }

View File

@ -1,4 +1,6 @@
; RUN: llc < %s -march=x86 -mattr=+mmx | grep pextrd
; RUN: llc < %s -march=x86 -mattr=+mmx | grep punpckhdq | count 1 ; RUN: llc < %s -march=x86 -mattr=+mmx | grep punpckhdq | count 1
; There are no MMX operations in bork; promoted to XMM.
define void @bork(<1 x i64>* %x) { define void @bork(<1 x i64>* %x) {
entry: entry:
@ -11,4 +13,16 @@ entry:
ret void ret void
} }
; pork uses MMX.
define void @pork(x86_mmx* %x) {
entry:
%tmp2 = load x86_mmx* %x ; <x86_mmx> [#uses=1]
%tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
store x86_mmx %tmp9, x86_mmx* %x
tail call void @llvm.x86.mmx.emms( )
ret void
}
declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms() declare void @llvm.x86.mmx.emms()

View File

@ -5,28 +5,28 @@
define i64 @t1(<1 x i64> %mm1) nounwind { define i64 @t1(<1 x i64> %mm1) nounwind {
entry: entry:
%tmp6 = tail call <1 x i64> @llvm.x86.mmx.pslli.q( <1 x i64> %mm1, i32 32 ) ; <<1 x i64>> [#uses=1] %tmp = bitcast <1 x i64> %mm1 to x86_mmx
%retval1112 = bitcast <1 x i64> %tmp6 to i64 ; <i64> [#uses=1] %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; <x86_mmx> [#uses=1]
%retval1112 = bitcast x86_mmx %tmp6 to i64
ret i64 %retval1112 ret i64 %retval1112
} }
declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) nounwind readnone declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone
define i64 @t2(<2 x i32> %mm1, <2 x i32> %mm2) nounwind { define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind {
entry: entry:
%tmp7 = tail call <2 x i32> @llvm.x86.mmx.psra.d( <2 x i32> %mm1, <2 x i32> %mm2 ) nounwind readnone ; <<2 x i32>> [#uses=1] %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; <x86_mmx> [#uses=1]
%retval1112 = bitcast <2 x i32> %tmp7 to i64 ; <i64> [#uses=1] %retval1112 = bitcast x86_mmx %tmp7 to i64
ret i64 %retval1112 ret i64 %retval1112
} }
declare <2 x i32> @llvm.x86.mmx.psra.d(<2 x i32>, <2 x i32>) nounwind readnone declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone
define i64 @t3(<1 x i64> %mm1, i32 %bits) nounwind { define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind {
entry: entry:
%tmp6 = bitcast <1 x i64> %mm1 to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone ; <x86_mmx> [#uses=1]
%tmp8 = tail call <4 x i16> @llvm.x86.mmx.psrli.w( <4 x i16> %tmp6, i32 %bits ) nounwind readnone ; <<4 x i16>> [#uses=1] %retval1314 = bitcast x86_mmx %tmp8 to i64
%retval1314 = bitcast <4 x i16> %tmp8 to i64 ; <i64> [#uses=1]
ret i64 %retval1314 ret i64 %retval1314
} }
declare <4 x i16> @llvm.x86.mmx.psrli.w(<4 x i16>, i32) nounwind readnone declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone

View File

@ -22,8 +22,10 @@ entry:
%tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> ; <<4 x i16>> [#uses=1] %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> ; <<4 x i16>> [#uses=1]
%tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > ; <<4 x i16>> [#uses=1] %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > ; <<4 x i16>> [#uses=1]
%tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> ; <<8 x i8>> [#uses=1] %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> ; <<8 x i8>> [#uses=1]
tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> %tmp555, i8* null ) %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
%tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null )
ret void ret void
} }
declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*) declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)

View File

@ -4,7 +4,7 @@
%struct.vS1024 = type { [8 x <4 x i32>] } %struct.vS1024 = type { [8 x <4 x i32>] }
%struct.vS512 = type { [4 x <4 x i32>] } %struct.vS512 = type { [4 x <4 x i32>] }
declare <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64>, i32) nounwind readnone declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
define void @t() nounwind { define void @t() nounwind {
entry: entry:
@ -12,14 +12,18 @@ entry:
bb554: ; preds = %bb554, %entry bb554: ; preds = %bb554, %entry
%sum.0.reg2mem.0 = phi <1 x i64> [ %tmp562, %bb554 ], [ zeroinitializer, %entry ] ; <<1 x i64>> [#uses=1] %sum.0.reg2mem.0 = phi <1 x i64> [ %tmp562, %bb554 ], [ zeroinitializer, %entry ] ; <<1 x i64>> [#uses=1]
%0 = load <1 x i64>* null, align 8 ; <<1 x i64>> [#uses=2] %0 = load x86_mmx* null, align 8 ; <<1 x i64>> [#uses=2]
%1 = bitcast <1 x i64> %0 to <2 x i32> ; <<2 x i32>> [#uses=1] %1 = bitcast x86_mmx %0 to <2 x i32> ; <<2 x i32>> [#uses=1]
%tmp555 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1] %tmp555 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1]
%2 = bitcast <2 x i32> %tmp555 to <1 x i64> ; <<1 x i64>> [#uses=1] %2 = bitcast <2 x i32> %tmp555 to x86_mmx ; <<1 x i64>> [#uses=1]
%3 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %0, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1] %3 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %0, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1]
store <1 x i64> %sum.0.reg2mem.0, <1 x i64>* null store <1 x i64> %sum.0.reg2mem.0, <1 x i64>* null
%tmp558 = add <1 x i64> %sum.0.reg2mem.0, %2 ; <<1 x i64>> [#uses=1] %tmp3 = bitcast x86_mmx %2 to <1 x i64>
%4 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %tmp558, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1] %tmp558 = add <1 x i64> %sum.0.reg2mem.0, %tmp3 ; <<1 x i64>> [#uses=1]
%tmp562 = add <1 x i64> %4, %3 ; <<1 x i64>> [#uses=1] %tmp5 = bitcast <1 x i64> %tmp558 to x86_mmx
%4 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %tmp5, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1]
%tmp6 = bitcast x86_mmx %4 to <1 x i64>
%tmp7 = bitcast x86_mmx %3 to <1 x i64>
%tmp562 = add <1 x i64> %tmp6, %tmp7 ; <<1 x i64>> [#uses=1]
br label %bb554 br label %bb554
} }

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movd ; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movq | count 2
; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movq ; There are no MMX operations here; this is promoted to XMM.
define void @foo(<1 x i64>* %a, <1 x i64>* %b) nounwind { define void @foo(<1 x i64>* %a, <1 x i64>* %b) nounwind {
entry: entry:

View File

@ -1,15 +1,16 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 > %t ; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
; RUN: grep psllq %t | grep 32 ; RUN: grep shll %t | grep 12
; RUN: grep pslldq %t | grep 12 ; RUN: grep pslldq %t | grep 12
; RUN: grep psrldq %t | grep 8 ; RUN: grep psrldq %t | grep 8
; RUN: grep psrldq %t | grep 12 ; RUN: grep psrldq %t | grep 12
; There are no MMX operations in @t1
define void @t1(i32 %a, <1 x i64>* %P) nounwind { define void @t1(i32 %a, x86_mmx* %P) nounwind {
%tmp12 = shl i32 %a, 12 %tmp12 = shl i32 %a, 12
%tmp21 = insertelement <2 x i32> undef, i32 %tmp12, i32 1 %tmp21 = insertelement <2 x i32> undef, i32 %tmp12, i32 1
%tmp22 = insertelement <2 x i32> %tmp21, i32 0, i32 0 %tmp22 = insertelement <2 x i32> %tmp21, i32 0, i32 0
%tmp23 = bitcast <2 x i32> %tmp22 to <1 x i64> %tmp23 = bitcast <2 x i32> %tmp22 to x86_mmx
store <1 x i64> %tmp23, <1 x i64>* %P store x86_mmx %tmp23, x86_mmx* %P
ret void ret void
} }

View File

@ -1,8 +1,12 @@
; RUN: llc < %s -march=x86 -mattr=+mmx -mtriple=i686-apple-darwin9 -o - | grep punpckldq ; RUN: llc < %s -march=x86 -mattr=+mmx,+sse -mtriple=i686-apple-darwin9 -o - | grep pinsrd | count 2
; MMX insertelement is not available; these are promoted to XMM.
; (Without SSE they are split to two ints, and the code is much better.)
define <2 x i32> @mmx_movzl(<2 x i32> %x) nounwind { define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
entry: entry:
%tmp3 = insertelement <2 x i32> %x, i32 32, i32 0 ; <<2 x i32>> [#uses=1] %tmp = bitcast x86_mmx %x to <2 x i32>
%tmp3 = insertelement <2 x i32> %tmp, i32 32, i32 0 ; <<2 x i32>> [#uses=1]
%tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1 ; <<2 x i32>> [#uses=1] %tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1 ; <<2 x i32>> [#uses=1]
ret <2 x i32> %tmp8 %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx
ret x86_mmx %tmp9
} }

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 2 ; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 1
; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 2 ; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 1
; 64-bit stores here do not use MMX.
@M1 = external global <1 x i64> @M1 = external global <1 x i64>
@M2 = external global <2 x i32> @M2 = external global <2 x i32>

View File

@ -368,12 +368,6 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
(Name.find("to") != Name.npos))) (Name.find("to") != Name.npos)))
return FILTER_WEAK; return FILTER_WEAK;
// Filter out the intrinsic form of instructions that also have an llvm
// operator form. FIXME this is temporary.
if (Name.find("irm") != Name.npos ||
Name.find("irr") != Name.npos)
return FILTER_WEAK;
return FILTER_NORMAL; return FILTER_NORMAL;
} }