2014-08-14 12:13:59 +00:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=AVX1
|
|
|
|
|
|
|
|
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
|
|
|
target triple = "x86_64-unknown-unknown"
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_0001(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0001
|
|
|
|
; AVX1: # BB#0:
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_0020(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0020
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2014-08-16 09:42:15 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_0112(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0112
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1],xmm1[0]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 1, i32 2>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_0300(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0300
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[0],xmm1[1]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_1000(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_1000
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm0[2,3,0,1]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_2200(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_2200
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0]
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_3330(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_3330
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpckhqdq {{.*}} # xmm1 = xmm1[1,1]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_3210(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_3210
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @shuffle_v4f64_0001(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0001
|
|
|
|
; AVX1: # BB#0:
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm1 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0020(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0020
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2014-08-16 09:42:15 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0300(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0300
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[0],xmm1[1]
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 3, i32 0, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1000
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1,0]
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 0, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_2200(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_2200
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0,0]
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_3330(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_3330
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm1[1],xmm0[0]
|
[x86] Tweak the rules surrounding 0,0 and 1,1 v2f64 shuffles and add
support for MOVDDUP which is really important for matrix multiply style
operations that do lots of non-vector-aligned load and splats.
The original motivation was to add support for MOVDDUP as the lack of it
regresses matmul_f64_4x4 by 5% or so. However, all of the rules here
were somewhat suspicious.
First, we should always be using the floating point domain shuffles,
regardless of how many copies we have to make as a movapd is *crazy*
faster than the domain switching cost on some chips. (Mostly because
movapd is crazy cheap.) Because SHUFPD can't do the copy-for-free trick
of the PSHUF instructions, there is no need to avoid canonicalizing on
UNPCK variants, so do that canonicalizing. This also ensures we have the
chance to form MOVDDUP. =]
Second, we assume SSE2 support when doing any vector lowering, and given
that we should just use UNPCKLPD and UNPCKHPD as they can operate on
registers or memory. If vectors get spilled or come from memory at all
this is going to allow the load to be folded into the operation. If we
want to optimize for encoding size (the only difference, and only
a 2 byte difference) it should be done *much* later, likely after RA.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@217332 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-07 12:02:14 +00:00
|
|
|
; AVX1-NEXT: vunpckhpd {{.*}} # xmm1 = xmm1[1,1]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 3, i32 3, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_3210(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_3210
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[1,0]
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[1,0]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
2014-08-15 11:01:40 +00:00
|
|
|
define <4 x double> @shuffle_v4f64_0023(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0023
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[0,0,2,3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0022(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0022
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[0,0,2,2]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1032(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1032
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[1,0,3,2]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1133(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1133
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[1,1,3,3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1023(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1023
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[1,0,2,3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1022(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1022
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[1,0,2,2]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 0, i32 2, i32 2>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0423(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0423
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm1 = ymm1[{{[0-9]}},0,{{[0-9],[0-9]}}]
|
|
|
|
; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0462(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0462
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm1 = ymm1[{{[0-9]}},0,2,{{[0-9]}}]
|
|
|
|
; AVX1-NEXT: vpermilpd {{.*}} # ymm0 = ymm0[0,{{[0-9],[0-9]}},2]
|
|
|
|
; AVX1-NEXT: vblendpd {{.*}} # ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 6, i32 2>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_0426(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_0426
|
|
|
|
; AVX1: # BB#0:
|
2014-08-15 17:42:00 +00:00
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
2014-08-15 11:01:40 +00:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
|
|
|
|
ret <4 x double> %shuffle
|
2014-08-15 17:42:00 +00:00
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_1537(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_1537
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*}} # ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_4062(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_4062
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vunpcklpd {{.*}} # ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 4, i32 0, i32 6, i32 2>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_5173(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_5173
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vunpckhpd {{.*}} # ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 5, i32 1, i32 7, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
2014-08-15 11:01:40 +00:00
|
|
|
}
|
|
|
|
define <4 x double> @shuffle_v4f64_5163(<4 x double> %a, <4 x double> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4f64_5163
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # ymm0 = ymm1[1],ymm0[1],ymm1[2],ymm0[3]
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 5, i32 1, i32 6, i32 3>
|
|
|
|
ret <4 x double> %shuffle
|
|
|
|
}
|
2014-08-14 12:13:59 +00:00
|
|
|
|
|
|
|
define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0124
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm2[0],xmm1[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0142
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm2 = xmm2[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[0],xmm2[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0412
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 2>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_4012
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm0[1],xmm2[0]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm1[0],xmm0[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_0145(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0145
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_0451(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_0451
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm1[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm2[0],xmm0[1]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 5, i32 1>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_4501(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_4501
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
|
|
|
define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @shuffle_v4i64_4015
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm2 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm2 = xmm2[0],xmm1[1]
|
2014-08-15 03:54:49 +00:00
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
|
2014-08-14 12:13:59 +00:00
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm1[0],xmm0[1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 0, i32 1, i32 5>
|
|
|
|
ret <4 x i64> %shuffle
|
|
|
|
}
|
[x86] Start fixing a really subtle and terrible form of miscompile in
these DAG combines.
The DAG auto-CSE thing is truly terrible. Due to it, when RAUW-ing
a node with its operand, you can cause its uses to CSE to itself, which
then causes their uses to become your uses which causes them to be
picked up by the RAUW. For nodes that are determined to be "no-ops",
this is "fine". But if the RAUW is one of several steps to enact
a transformation, this causes the DAG to really silently eat an discard
nodes that you would never expect. It took days for me to actually
pinpoint a test case triggering this and a really frustrating amount of
time to even comprehend the bug because I never even thought about the
ability of RAUW to iteratively consume nodes due to CSE-ing them into
itself.
To fix this, we have to build up a brand-new chain of operations any
time we are combining across (potentially) intervening nodes. But once
the logic is added to do this, another issue surfaces: CombineTo eagerly
deletes the one node combined, *but no others*. This is... really
frustrating. If deleting it makes its operands become dead, those
operand nodes often won't go onto the worklist in the
order you would want -- they're already on it and not near the top. That
means things higher on the worklist will get combined prior to these
dead nodes being GCed out of the worklist, and if the chain is long, the
immediate users won't be enough to re-detect where the root of the chain
is that became single-use again after deleting the dead nodes. The
better way to do this is to never immediately delete nodes, and instead
to just enqueue them so we can recursively delete them. The
combined-from node is typically not on the worklist anyways by virtue of
having been popped off.... But that in turn breaks other tests that
*require* CombineTo to delete unused nodes. :: sigh ::
Fortunately, there is a better way. This whole routine should have been
returning the replacement rather than using CombineTo which is quite
hacky. Switch to that, and all the pieces fall together.
I suspect the same kind of miscompile is possible in the half-shuffle
folding code, and potentially the recursive folding code. I'll be
switching those over to a pattern more like this one for safety's sake
even though I don't immediately have any test cases for them. Note that
the only way I got a test case for this instance was with *heavily* DAG
combined 256-bit shuffle sequences generated by my fuzzer. ;]
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216319 91177308-0d34-0410-b5e6-96231b3b80d8
2014-08-23 10:25:15 +00:00
|
|
|
|
|
|
|
define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; AVX1-LABEL: @stress_test1
|
|
|
|
; AVX1: # BB#0:
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpckhqdq {{.*}} # xmm0 = xmm0[1,1]
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]
|
|
|
|
; AVX1-NEXT: vpshufd {{.*}} # xmm1 = xmm1[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
%c = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 1, i32 1, i32 0>
|
|
|
|
%d = shufflevector <4 x i64> %c, <4 x i64> undef, <4 x i32> <i32 3, i32 undef, i32 2, i32 undef>
|
|
|
|
%e = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> <i32 3, i32 3, i32 1, i32 undef>
|
|
|
|
%f = shufflevector <4 x i64> %d, <4 x i64> %e, <4 x i32> <i32 5, i32 1, i32 1, i32 0>
|
|
|
|
|
|
|
|
ret <4 x i64> %f
|
|
|
|
}
|