[x86] Teach both sext and zext vector tests to cover a nice wide range

of architectures: SSE2, SSSE3, SSE4.1, AVX, and AVX2.

Unfortunately, this exposses the absolute horror of the code we generate
for many of these patterns. Anyone wanting to familiarize themselves
with the x86 backend and improve performance could do a lot of good
sitting down and making these test cases not look so terrible. While the
new vector shuffle code I'm working on well help some, it won't fix all
of the crimes here.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218807 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chandler Carruth 2014-10-01 20:41:36 +00:00
parent c9038d9c1b
commit 3916c2642d
2 changed files with 662 additions and 184 deletions

View File

@ -1,20 +1,44 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=pentium4 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE-LABEL: sext_8i16_to_8i32:
; SSE: ## BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: ## kill: XMM0<def> XMM1<kill>
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: pslld $16, %xmm1
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: retq
; SSE2-LABEL: sext_8i16_to_8i32:
; SSE2: ## BB#0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: ## kill: XMM0<def> XMM1<kill>
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_8i32:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: ## kill: XMM0<def> XMM1<kill>
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: pslld $16, %xmm0
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: pslld $16, %xmm1
; SSSE3-NEXT: psrad $16, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_8i32:
; SSE41: ## BB#0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovzxwd %xmm1, %xmm0
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE41-NEXT: pslld $16, %xmm1
; SSE41-NEXT: psrad $16, %xmm1
; SSE41-NEXT: pslld $16, %xmm0
; SSE41-NEXT: psrad $16, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i16_to_8i32:
; AVX1: ## BB#0:
@ -34,28 +58,72 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
}
define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE-LABEL: sext_4i32_to_4i64:
; SSE: ## BB#0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
; SSE2-LABEL: sext_4i32_to_4i64:
; SSE2: ## BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i32_to_4i64:
; SSSE3: ## BB#0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i32_to_4i64:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i32_to_4i64:
; AVX1: ## BB#0:
@ -75,12 +143,24 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
}
define <4 x i32> @load_sext_test1(<4 x i16> *%ptr) {
; SSE-LABEL: load_sext_test1:
; SSE: ## BB#0:
; SSE-NEXT: movq (%rdi), %xmm0
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_test1:
; SSE2: ## BB#0:
; SSE2-NEXT: movq (%rdi), %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test1:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movq (%rdi), %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test1:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test1:
; AVX: ## BB#0:
@ -116,6 +196,11 @@ define <4 x i32> @load_sext_test2(<4 x i8> *%ptr) {
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test2:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test2:
; AVX: ## BB#0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
@ -126,14 +211,28 @@ define <4 x i32> @load_sext_test2(<4 x i8> *%ptr) {
}
define <2 x i64> @load_sext_test3(<2 x i8> *%ptr) {
; SSE-LABEL: load_sext_test3:
; SSE: ## BB#0:
; SSE-NEXT: movsbq 1(%rdi), %rax
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: movsbq (%rdi), %rax
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_test3:
; SSE2: ## BB#0:
; SSE2-NEXT: movsbq 1(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movsbq (%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test3:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movsbq 1(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movsbq (%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test3:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test3:
; AVX: ## BB#0:
@ -145,14 +244,28 @@ define <2 x i64> @load_sext_test3(<2 x i8> *%ptr) {
}
define <2 x i64> @load_sext_test4(<2 x i16> *%ptr) {
; SSE-LABEL: load_sext_test4:
; SSE: ## BB#0:
; SSE-NEXT: movswq 2(%rdi), %rax
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: movswq (%rdi), %rax
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_test4:
; SSE2: ## BB#0:
; SSE2-NEXT: movswq 2(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movswq (%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test4:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movswq 2(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movswq (%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test4:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test4:
; AVX: ## BB#0:
@ -164,14 +277,28 @@ define <2 x i64> @load_sext_test4(<2 x i16> *%ptr) {
}
define <2 x i64> @load_sext_test5(<2 x i32> *%ptr) {
; SSE-LABEL: load_sext_test5:
; SSE: ## BB#0:
; SSE-NEXT: movslq 4(%rdi), %rax
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: movslq (%rdi), %rax
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_test5:
; SSE2: ## BB#0:
; SSE2-NEXT: movslq 4(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movslq (%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test5:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movslq 4(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movslq (%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test5:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test5:
; AVX: ## BB#0:
@ -183,12 +310,24 @@ define <2 x i64> @load_sext_test5(<2 x i32> *%ptr) {
}
define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) {
; SSE-LABEL: load_sext_test6:
; SSE: ## BB#0:
; SSE-NEXT: movq (%rdi), %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: psraw $8, %xmm0
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_test6:
; SSE2: ## BB#0:
; SSE2-NEXT: movq (%rdi), %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test6:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movq (%rdi), %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test6:
; SSE41: ## BB#0:
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test6:
; AVX: ## BB#0:
@ -200,30 +339,78 @@ define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) {
}
define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSE-LABEL: sext_4i1_to_4i64:
; SSE: ## BB#0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
; SSE2-LABEL: sext_4i1_to_4i64:
; SSE2: ## BB#0:
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i1_to_4i64:
; SSSE3: ## BB#0:
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i1_to_4i64:
; SSE41: ## BB#0:
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i1_to_4i64:
; AVX1: ## BB#0:
@ -246,17 +433,40 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
}
define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE-LABEL: sext_16i8_to_16i16:
; SSE: ## BB#0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: psllw $8, %xmm0
; SSE-NEXT: psraw $8, %xmm0
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE-NEXT: psllw $8, %xmm1
; SSE-NEXT: psraw $8, %xmm1
; SSE-NEXT: retq
; SSE2-LABEL: sext_16i8_to_16i16:
; SSE2: ## BB#0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psllw $8, %xmm0
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE2-NEXT: psllw $8, %xmm1
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_16i16:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psllw $8, %xmm0
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSSE3-NEXT: psllw $8, %xmm1
; SSSE3-NEXT: psraw $8, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_16i16:
; SSE41: ## BB#0:
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: pmovzxbw %xmm1, %xmm0
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; SSE41-NEXT: psllw $8, %xmm1
; SSE41-NEXT: psraw $8, %xmm1
; SSE41-NEXT: psllw $8, %xmm0
; SSE41-NEXT: psraw $8, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_16i16:
; AVX1: ## BB#0:
@ -278,30 +488,78 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) {
}
define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSE-LABEL: sext_4i8_to_4i64:
; SSE: ## BB#0:
; SSE-NEXT: pslld $24, %xmm0
; SSE-NEXT: psrad $24, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: movd %xmm1, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: movd %xmm0, %rax
; SSE-NEXT: cltq
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
; SSE2-LABEL: sext_4i8_to_4i64:
; SSE2: ## BB#0:
; SSE2-NEXT: pslld $24, %xmm0
; SSE2-NEXT: psrad $24, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: movd %xmm0, %rax
; SSE2-NEXT: cltq
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i8_to_4i64:
; SSSE3: ## BB#0:
; SSSE3-NEXT: pslld $24, %xmm0
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3-NEXT: movd %xmm0, %rax
; SSSE3-NEXT: cltq
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i8_to_4i64:
; SSE41: ## BB#0:
; SSE41-NEXT: pslld $24, %xmm0
; SSE41-NEXT: psrad $24, %xmm0
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm1, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,3,0]
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm3
; SSE41-NEXT: movd %xmm0, %rax
; SSE41-NEXT: cltq
; SSE41-NEXT: movd %rax, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i8_to_4i64:
; AVX1: ## BB#0:
@ -379,6 +637,28 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i64:
; SSE41: ## BB#0:
; SSE41-NEXT: movd (%rdi), %xmm0
; SSE41-NEXT: pmovzxbd %xmm0, %xmm1
; SSE41-NEXT: pmovzxbq %xmm0, %xmm0
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: movsbq %al, %rax
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: movd %xmm0, %rax
; SSE41-NEXT: movsbq %al, %rax
; SSE41-NEXT: movd %rax, %xmm0
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,0,3,0]
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: movsbq %al, %rax
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: movd %xmm1, %rax
; SSE41-NEXT: movsbq %al, %rax
; SSE41-NEXT: movd %rax, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i8_to_4i64:
; AVX1: ## BB#0:
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
@ -398,29 +678,75 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
}
define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE-LABEL: load_sext_4i16_to_4i64:
; SSE: ## BB#0:
; SSE-NEXT: movq (%rdi), %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
; SSE-NEXT: movd %xmm2, %rax
; SSE-NEXT: movswq %ax, %rax
; SSE-NEXT: movd %rax, %xmm0
; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movd %xmm2, %rax
; SSE-NEXT: movswq %ax, %rax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,0,3,0]
; SSE-NEXT: movd %xmm2, %rax
; SSE-NEXT: movswq %ax, %rax
; SSE-NEXT: movd %rax, %xmm1
; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: movd %xmm2, %rax
; SSE-NEXT: movswq %ax, %rax
; SSE-NEXT: movd %rax, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT: retq
; SSE2-LABEL: load_sext_4i16_to_4i64:
; SSE2: ## BB#0:
; SSE2-NEXT: movq (%rdi), %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
; SSE2-NEXT: movd %xmm2, %rax
; SSE2-NEXT: movswq %ax, %rax
; SSE2-NEXT: movd %rax, %xmm0
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSE2-NEXT: movd %xmm2, %rax
; SSE2-NEXT: movswq %ax, %rax
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,0,3,0]
; SSE2-NEXT: movd %xmm2, %rax
; SSE2-NEXT: movswq %ax, %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSE2-NEXT: movd %xmm2, %rax
; SSE2-NEXT: movswq %ax, %rax
; SSE2-NEXT: movd %rax, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i16_to_4i64:
; SSSE3: ## BB#0:
; SSSE3-NEXT: movq (%rdi), %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
; SSSE3-NEXT: movd %xmm2, %rax
; SSSE3-NEXT: movswq %ax, %rax
; SSSE3-NEXT: movd %rax, %xmm0
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSSE3-NEXT: movd %xmm2, %rax
; SSSE3-NEXT: movswq %ax, %rax
; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,0,3,0]
; SSSE3-NEXT: movd %xmm2, %rax
; SSSE3-NEXT: movswq %ax, %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1,1]
; SSSE3-NEXT: movd %xmm2, %rax
; SSSE3-NEXT: movswq %ax, %rax
; SSSE3-NEXT: movd %rax, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i16_to_4i64:
; SSE41: ## BB#0:
; SSE41-NEXT: movq (%rdi), %xmm0
; SSE41-NEXT: pmovzxwd %xmm0, %xmm1
; SSE41-NEXT: pmovzxwq %xmm0, %xmm0
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: movswq %ax, %rax
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: movd %xmm0, %rax
; SSE41-NEXT: movswq %ax, %rax
; SSE41-NEXT: movd %rax, %xmm0
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,0,3,0]
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: movswq %ax, %rax
; SSE41-NEXT: movd %rax, %xmm2
; SSE41-NEXT: movd %xmm1, %rax
; SSE41-NEXT: movswq %ax, %rax
; SSE41-NEXT: movd %rax, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i16_to_4i64:
; AVX1: ## BB#0:

View File

@ -1,39 +1,154 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; CHECK-LABEL: zext_8i16_to_8i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; CHECK-NEXT: retq
; SSE2-LABEL: zext_8i16_to_8i32:
; SSE2: ## BB#0: ## %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i32:
; SSSE3: ## BB#0: ## %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSSE3-NEXT: pand %xmm0, %xmm1
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i32:
; SSE41: ## BB#0: ## %entry
; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
%B = zext <8 x i16> %A to <8 x i32>
ret <8 x i32>%B
}
define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; CHECK-LABEL: zext_4i32_to_4i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; CHECK-NEXT: retq
; SSE2-LABEL: zext_4i32_to_4i64:
; SSE2: ## BB#0: ## %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,0,3,0]
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_4i64:
; SSSE3: ## BB#0: ## %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,0]
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSSE3-NEXT: pand %xmm3, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,0,3,0]
; SSSE3-NEXT: pand %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_4i64:
; SSE41: ## BB#0: ## %entry
; SSE41-NEXT: pmovzxdq %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE41-NEXT: pand %xmm3, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,0,3,0]
; SSE41-NEXT: pand %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_4i32_to_4i64:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vpmovzxdq %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
%B = zext <4 x i32> %A to <4 x i64>
ret <4 x i64>%B
}
define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; CHECK-LABEL: zext_8i8_to_8i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpmovzxwd %xmm0, %xmm0
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vandps {{.*}}, %ymm0, %ymm0
; CHECK-NEXT: retq
; SSE2-LABEL: zext_8i8_to_8i32:
; SSE2: ## BB#0: ## %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i8_to_8i32:
; SSSE3: ## BB#0: ## %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSSE3-NEXT: pand %xmm0, %xmm1
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i8_to_8i32:
; SSE41: ## BB#0: ## %entry
; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i8_to_8i32:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpmovzxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vandps LCPI2_0(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
; AVX2-NEXT: vpbroadcastd LCPI2_0(%rip), %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
entry:
%t = zext <8 x i8> %z to <8 x i32>
ret <8 x i32> %t
@ -41,13 +156,50 @@ entry:
; PR17654
define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) {
; CHECK-LABEL: zext_16i8_to_16i16:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; CHECK-NEXT: retq
; SSE2-LABEL: zext_16i8_to_16i16:
; SSE2: ## BB#0: ## %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i16:
; SSSE3: ## BB#0: ## %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSSE3-NEXT: pand %xmm0, %xmm1
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i16:
; SSE41: ## BB#0: ## %entry
; SSE41-NEXT: pmovzxbw %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vpmovzxbw %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
%t = zext <16 x i8> %z to <16 x i16>
ret <16 x i16> %t