AVX 256-bit conversion instructions

Add the x86 VEX_L form to handle special cases where VEX_L must be set.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@108274 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bruno Cardoso Lopes 2010-07-13 21:07:28 +00:00
parent db20d257bc
commit 87a85c7ef0
6 changed files with 313 additions and 15 deletions

View File

@ -106,6 +106,7 @@ class VEX { bit hasVEXPrefix = 1; }
class VEX_W { bit hasVEX_WPrefix = 1; }
class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
class VEX_L { bit hasVEX_L = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
string AsmStr, Domain d = GenericDomain>
@ -138,6 +139,7 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasVEX_4VPrefix = 0; // Does this inst requires the VEX.VVVV field?
bit hasVEX_i8ImmReg = 0; // Does this inst requires the last source register
// to be encoded in a immediate field?
bit hasVEX_L = 0; // Does this inst uses large (256-bit) registers?
// TSFlags layout should be kept in sync with X86InstrInfo.h.
let TSFlags{5-0} = FormBits;
@ -155,6 +157,7 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
let TSFlags{33} = hasVEX_WPrefix;
let TSFlags{34} = hasVEX_4VPrefix;
let TSFlags{35} = hasVEX_i8ImmReg;
let TSFlags{36} = hasVEX_L;
}
class I<bits<8> o, Format f, dag outs, dag ins, string asm,

View File

@ -453,7 +453,13 @@ namespace X86II {
// VEX_I8IMM - Specifies that the last register used in a AVX instruction,
// must be encoded in the i8 immediate field. This usually happens in
// instructions with 4 operands.
VEX_I8IMM = 1ULL << 35
VEX_I8IMM = 1ULL << 35,
// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
// instruction uses 256-bit wide registers. This is usually auto detected if
// a VR256 register is used, but some AVX instructions also have this field
// marked when using a f256 memory references.
VEX_L = 1ULL << 36
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the

View File

@ -666,6 +666,9 @@ defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle>, TB, VEX;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, f256mem, load,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle>, TB, VEX;
}
let Pattern = []<dag> in {
defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
@ -806,9 +809,13 @@ def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
// Convert packed single/double fp to doubleword
let isAsmParserOnly = 1 in {
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
}
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}", []>;
@ -862,6 +869,10 @@ def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
}
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
@ -912,14 +923,39 @@ def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
(memop addr:$src)))]>;
let isAsmParserOnly = 1 in {
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
// XMM only
def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
}
// Convert packed single to packed double
let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
let isAsmParserOnly = 1, Predicates = [HasAVX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
Requires<[HasAVX]>;
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX,
Requires<[HasAVX]>;
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
}
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
@ -949,10 +985,25 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
// Convert packed double to packed single
let isAsmParserOnly = 1 in {
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
// FIXME: the memory form of this instruction should described using
// use extra asm syntax
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
// XMM only
def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
// YMM only
def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
}
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
@ -2935,19 +2986,46 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// SSE3 - Conversion Instructions
//===---------------------------------------------------------------------===//
// Convert Packed Double FP to Packed DW Integers
let isAsmParserOnly = 1, Predicates = [HasAVX] in {
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
// XMM only
def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
// YMM only
def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
}
def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
// Convert Packed DW Integers to Packed Double FP
let isAsmParserOnly = 1, Predicates = [HasAVX] in {
def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
}
def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),

View File

@ -432,6 +432,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if (TSFlags & X86II::VEX_W)
VEX_W = 1;
if (TSFlags & X86II::VEX_L)
VEX_L = 1;
switch (TSFlags & X86II::Op0Mask) {
default: assert(0 && "Invalid prefix!");
case X86II::T8: // 0F 38

View File

@ -12598,3 +12598,107 @@
// CHECK: encoding: [0xc5,0xed,0x55,0x6c,0xcb,0xfc]
vandnpd -4(%ebx,%ecx,8), %ymm2, %ymm5
// CHECK: vcvtps2pd %xmm3, %ymm2
// CHECK: encoding: [0xc5,0xfc,0x5a,0xd3]
vcvtps2pd %xmm3, %ymm2
// CHECK: vcvtps2pd (%eax), %ymm2
// CHECK: encoding: [0xc5,0xfc,0x5a,0x10]
vcvtps2pd (%eax), %ymm2
// CHECK: vcvtdq2pd %xmm3, %ymm2
// CHECK: encoding: [0xc5,0xfe,0xe6,0xd3]
vcvtdq2pd %xmm3, %ymm2
// CHECK: vcvtdq2pd (%eax), %ymm2
// CHECK: encoding: [0xc5,0xfe,0xe6,0x10]
vcvtdq2pd (%eax), %ymm2
// CHECK: vcvtdq2ps %ymm2, %ymm5
// CHECK: encoding: [0xc5,0xfc,0x5b,0xea]
vcvtdq2ps %ymm2, %ymm5
// CHECK: vcvtdq2ps (%eax), %ymm2
// CHECK: encoding: [0xc5,0xfc,0x5b,0x10]
vcvtdq2ps (%eax), %ymm2
// CHECK: vcvtps2dq %ymm2, %ymm5
// CHECK: encoding: [0xc5,0xfd,0x5b,0xea]
vcvtps2dq %ymm2, %ymm5
// CHECK: vcvtps2dq (%eax), %ymm5
// CHECK: encoding: [0xc5,0xfd,0x5b,0x28]
vcvtps2dq (%eax), %ymm5
// CHECK: vcvttps2dq %ymm2, %ymm5
// CHECK: encoding: [0xc5,0xfe,0x5b,0xea]
vcvttps2dq %ymm2, %ymm5
// CHECK: vcvttps2dq (%eax), %ymm5
// CHECK: encoding: [0xc5,0xfe,0x5b,0x28]
vcvttps2dq (%eax), %ymm5
// CHECK: vcvttpd2dq %xmm1, %xmm5
// CHECK: encoding: [0xc5,0xf9,0xe6,0xe9]
vcvttpd2dq %xmm1, %xmm5
// CHECK: vcvttpd2dq %ymm2, %xmm5
// CHECK: encoding: [0xc5,0xfd,0xe6,0xea]
vcvttpd2dq %ymm2, %xmm5
// CHECK: vcvttpd2dqx %xmm1, %xmm5
// CHECK: encoding: [0xc5,0xf9,0xe6,0xe9]
vcvttpd2dqx %xmm1, %xmm5
// CHECK: vcvttpd2dqx (%eax), %xmm1
// CHECK: encoding: [0xc5,0xf9,0xe6,0x08]
vcvttpd2dqx (%eax), %xmm1
// CHECK: vcvttpd2dqy %ymm2, %xmm1
// CHECK: encoding: [0xc5,0xfd,0xe6,0xca]
vcvttpd2dqy %ymm2, %xmm1
// CHECK: vcvttpd2dqy (%eax), %xmm1
// CHECK: encoding: [0xc5,0xfd,0xe6,0x08]
vcvttpd2dqy (%eax), %xmm1
// CHECK: vcvtpd2ps %ymm2, %xmm5
// CHECK: encoding: [0xc5,0xfd,0x5a,0xea]
vcvtpd2ps %ymm2, %xmm5
// CHECK: vcvtpd2psx %xmm1, %xmm5
// CHECK: encoding: [0xc5,0xf9,0x5a,0xe9]
vcvtpd2psx %xmm1, %xmm5
// CHECK: vcvtpd2psx (%eax), %xmm1
// CHECK: encoding: [0xc5,0xf9,0x5a,0x08]
vcvtpd2psx (%eax), %xmm1
// CHECK: vcvtpd2psy %ymm2, %xmm1
// CHECK: encoding: [0xc5,0xfd,0x5a,0xca]
vcvtpd2psy %ymm2, %xmm1
// CHECK: vcvtpd2psy (%eax), %xmm1
// CHECK: encoding: [0xc5,0xfd,0x5a,0x08]
vcvtpd2psy (%eax), %xmm1
// CHECK: vcvtpd2dq %ymm2, %xmm5
// CHECK: encoding: [0xc5,0xff,0xe6,0xea]
vcvtpd2dq %ymm2, %xmm5
// CHECK: vcvtpd2dqy %ymm2, %xmm1
// CHECK: encoding: [0xc5,0xff,0xe6,0xca]
vcvtpd2dqy %ymm2, %xmm1
// CHECK: vcvtpd2dqy (%eax), %xmm1
// CHECK: encoding: [0xc5,0xff,0xe6,0x08]
vcvtpd2dqy (%eax), %xmm1
// CHECK: vcvtpd2dqx %xmm1, %xmm5
// CHECK: encoding: [0xc5,0xfb,0xe6,0xe9]
vcvtpd2dqx %xmm1, %xmm5
// CHECK: vcvtpd2dqx (%eax), %xmm1
// CHECK: encoding: [0xc5,0xfb,0xe6,0x08]
vcvtpd2dqx (%eax), %xmm1

View File

@ -2672,3 +2672,107 @@ pshufb CPI1_0(%rip), %xmm1
// CHECK: encoding: [0xc5,0x1d,0x55,0x54,0xcb,0xfc]
vandnpd -4(%rbx,%rcx,8), %ymm12, %ymm10
// CHECK: vcvtps2pd %xmm13, %ymm12
// CHECK: encoding: [0xc4,0x41,0x7c,0x5a,0xe5]
vcvtps2pd %xmm13, %ymm12
// CHECK: vcvtps2pd (%rax), %ymm12
// CHECK: encoding: [0xc5,0x7c,0x5a,0x20]
vcvtps2pd (%rax), %ymm12
// CHECK: vcvtdq2pd %xmm13, %ymm12
// CHECK: encoding: [0xc4,0x41,0x7e,0xe6,0xe5]
vcvtdq2pd %xmm13, %ymm12
// CHECK: vcvtdq2pd (%rax), %ymm12
// CHECK: encoding: [0xc5,0x7e,0xe6,0x20]
vcvtdq2pd (%rax), %ymm12
// CHECK: vcvtdq2ps %ymm12, %ymm10
// CHECK: encoding: [0xc4,0x41,0x7c,0x5b,0xd4]
vcvtdq2ps %ymm12, %ymm10
// CHECK: vcvtdq2ps (%rax), %ymm12
// CHECK: encoding: [0xc5,0x7c,0x5b,0x20]
vcvtdq2ps (%rax), %ymm12
// CHECK: vcvtps2dq %ymm12, %ymm10
// CHECK: encoding: [0xc4,0x41,0x7d,0x5b,0xd4]
vcvtps2dq %ymm12, %ymm10
// CHECK: vcvtps2dq (%rax), %ymm10
// CHECK: encoding: [0xc5,0x7d,0x5b,0x10]
vcvtps2dq (%rax), %ymm10
// CHECK: vcvttps2dq %ymm12, %ymm10
// CHECK: encoding: [0xc4,0x41,0x7e,0x5b,0xd4]
vcvttps2dq %ymm12, %ymm10
// CHECK: vcvttps2dq (%rax), %ymm10
// CHECK: encoding: [0xc5,0x7e,0x5b,0x10]
vcvttps2dq (%rax), %ymm10
// CHECK: vcvttpd2dq %xmm11, %xmm10
// CHECK: encoding: [0xc4,0x41,0x79,0xe6,0xd3]
vcvttpd2dq %xmm11, %xmm10
// CHECK: vcvttpd2dq %ymm12, %xmm10
// CHECK: encoding: [0xc4,0x41,0x7d,0xe6,0xd4]
vcvttpd2dq %ymm12, %xmm10
// CHECK: vcvttpd2dqx %xmm11, %xmm10
// CHECK: encoding: [0xc4,0x41,0x79,0xe6,0xd3]
vcvttpd2dqx %xmm11, %xmm10
// CHECK: vcvttpd2dqx (%rax), %xmm11
// CHECK: encoding: [0xc5,0x79,0xe6,0x18]
vcvttpd2dqx (%rax), %xmm11
// CHECK: vcvttpd2dqy %ymm12, %xmm11
// CHECK: encoding: [0xc4,0x41,0x7d,0xe6,0xdc]
vcvttpd2dqy %ymm12, %xmm11
// CHECK: vcvttpd2dqy (%rax), %xmm11
// CHECK: encoding: [0xc5,0x7d,0xe6,0x18]
vcvttpd2dqy (%rax), %xmm11
// CHECK: vcvtpd2ps %ymm12, %xmm10
// CHECK: encoding: [0xc4,0x41,0x7d,0x5a,0xd4]
vcvtpd2ps %ymm12, %xmm10
// CHECK: vcvtpd2psx %xmm11, %xmm10
// CHECK: encoding: [0xc4,0x41,0x79,0x5a,0xd3]
vcvtpd2psx %xmm11, %xmm10
// CHECK: vcvtpd2psx (%rax), %xmm11
// CHECK: encoding: [0xc5,0x79,0x5a,0x18]
vcvtpd2psx (%rax), %xmm11
// CHECK: vcvtpd2psy %ymm12, %xmm11
// CHECK: encoding: [0xc4,0x41,0x7d,0x5a,0xdc]
vcvtpd2psy %ymm12, %xmm11
// CHECK: vcvtpd2psy (%rax), %xmm11
// CHECK: encoding: [0xc5,0x7d,0x5a,0x18]
vcvtpd2psy (%rax), %xmm11
// CHECK: vcvtpd2dq %ymm12, %xmm10
// CHECK: encoding: [0xc4,0x41,0x7f,0xe6,0xd4]
vcvtpd2dq %ymm12, %xmm10
// CHECK: vcvtpd2dqy %ymm12, %xmm11
// CHECK: encoding: [0xc4,0x41,0x7f,0xe6,0xdc]
vcvtpd2dqy %ymm12, %xmm11
// CHECK: vcvtpd2dqy (%rax), %xmm11
// CHECK: encoding: [0xc5,0x7f,0xe6,0x18]
vcvtpd2dqy (%rax), %xmm11
// CHECK: vcvtpd2dqx %xmm11, %xmm10
// CHECK: encoding: [0xc4,0x41,0x7b,0xe6,0xd3]
vcvtpd2dqx %xmm11, %xmm10
// CHECK: vcvtpd2dqx (%rax), %xmm11
// CHECK: encoding: [0xc5,0x7b,0xe6,0x18]
vcvtpd2dqx (%rax), %xmm11