[x86] Add an SSSE3 run and check mode to the 128-bit v2 tests of the new

vector shuffle lowering. This will be needed for up-coming palignr
tests.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218037 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chandler Carruth 2014-09-18 08:33:04 +00:00
parent b2f2aa9329
commit 088aa097d5

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE41
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@ -57,6 +58,10 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
; SSE3: unpcklpd {{.*}} # xmm0 = xmm0[0,0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2f64_00
; SSSE3: unpcklpd {{.*}} # xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2f64_00
; SSE41: unpcklpd {{.*}} # xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
@ -88,6 +93,11 @@ define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2f64_22
; SSSE3: unpcklpd {{.*}} # xmm1 = xmm1[0,0]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2f64_22
; SSE41: unpcklpd {{.*}} # xmm1 = xmm1[0,0]
; SSE41-NEXT: movapd %xmm1, %xmm0
@ -119,6 +129,10 @@ define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
; SSE3: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2f64_03
; SSSE3: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2f64_03
; SSE41: blendpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
@ -136,6 +150,11 @@ define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2f64_21
; SSSE3: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2f64_21
; SSE41: blendpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
; SSE41-NEXT: movapd %xmm1, %xmm0
@ -169,6 +188,10 @@ define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {
; SSE3: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2i64_03
; SSSE3: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2i64_03
; SSE41: pblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
@ -186,6 +209,11 @@ define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2i64_03_copy
; SSSE3: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2i64_03_copy
; SSE41: pblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -250,6 +278,11 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {
; SSE3-NEXT: movapd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2i64_21
; SSSE3: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2i64_21
; SSE41: pblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: movdqa %xmm1, %xmm0
@ -268,6 +301,11 @@ define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64
; SSE3-NEXT: movapd %xmm2, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @shuffle_v2i64_21_copy
; SSSE3: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @shuffle_v2i64_21_copy
; SSE41: pblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: movdqa %xmm2, %xmm0
@ -320,6 +358,11 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {
; SSE3-NEXT: retq
;
; FIXME: This should match movddup as well!
; SSSE3-LABEL: @insert_dup_reg_v2f64
; SSSE3: unpcklpd {{.*}} # xmm0 = xmm0[0,0]
; SSSE3-NEXT: retq
;
; FIXME: This should match movddup as well!
; SSE41-LABEL: @insert_dup_reg_v2f64
; SSE41: unpcklpd {{.*}} # xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
@ -337,6 +380,10 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
; SSE3: movddup {{.*}}, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: @insert_dup_mem_v2f64
; SSSE3: movddup {{.*}}, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: @insert_dup_mem_v2f64
; SSE41: movddup {{.*}}, %xmm0
; SSE41-NEXT: retq