llvm-6502/test/CodeGen/X86/avx2-conversions.ll
Chandler Carruth 3d4542ce3d [x86] Remove the insanely over-aggressive unpack lowering strategy for
v16i8 shuffles, and replace it with new facilities.

This uses precise patterns to match exact unpacks, and the new
generalized unpack lowering only when we detect a case where we will
have to shuffle both inputs anyways and they terminate in exactly
a blend.

This fixes all of the blend horrors that I uncovered by always lowering
blends through the vector shuffle lowering. It also removes *sooooo*
much of the crazy instruction sequences required for v16i8 lowering
previously. Much cleaner now.

The only "meh" aspect is that we sometimes use pshufb+pshufb+unpck when
it would be marginally nicer to use pshufb+pshufb+por. However, the
difference there is *tiny*. In many cases its a win because we re-use
the pshufb mask. In others, we get to avoid the pshufb entirely. I've
left a FIXME, but I'm dubious we can really do better than this. I'm
actually pretty happy with this lowering now.

For SSE2 this exposes some horrors that were really already there. Those
will have to fixed by changing a different path through the v16i8
lowering.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@229846 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-19 12:10:37 +00:00

138 lines
3.0 KiB
LLVM

; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s
; CHECK: trunc4
; CHECK: vpermd
; CHECK-NOT: vinsert
; CHECK: ret
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
%B = trunc <4 x i64> %A to <4 x i32>
ret <4 x i32>%B
}
; CHECK: trunc8
; CHECK: vpshufb
; CHECK-NOT: vinsert
; CHECK: ret
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
%B = trunc <8 x i32> %A to <8 x i16>
ret <8 x i16>%B
}
; CHECK: sext4
; CHECK: vpmovsxdq
; CHECK-NOT: vinsert
; CHECK: ret
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
%B = sext <4 x i32> %A to <4 x i64>
ret <4 x i64>%B
}
; CHECK: sext8
; CHECK: vpmovsxwd
; CHECK-NOT: vinsert
; CHECK: ret
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
%B = sext <8 x i16> %A to <8 x i32>
ret <8 x i32>%B
}
; CHECK: zext4
; CHECK: vpmovzxdq
; CHECK-NOT: vinsert
; CHECK: ret
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
%B = zext <4 x i32> %A to <4 x i64>
ret <4 x i64>%B
}
; CHECK: zext8
; CHECK: vpmovzxwd
; CHECK-NOT: vinsert
; CHECK: ret
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
%B = zext <8 x i16> %A to <8 x i32>
ret <8 x i32>%B
}
; CHECK: zext_8i8_8i32
; CHECK: vpmovzxwd
; CHECK: vpand
; CHECK: ret
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
%B = zext <8 x i8> %A to <8 x i32>
ret <8 x i32>%B
}
; CHECK-LABEL: zext_16i8_16i16:
; CHECK: vpmovzxbw
; CHECK-NOT: vinsert
; CHECK: ret
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
%t = zext <16 x i8> %z to <16 x i16>
ret <16 x i16> %t
}
; CHECK-LABEL: sext_16i8_16i16:
; CHECK: vpmovsxbw
; CHECK-NOT: vinsert
; CHECK: ret
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
%t = sext <16 x i8> %z to <16 x i16>
ret <16 x i16> %t
}
; CHECK-LABEL: trunc_16i16_16i8:
; CHECK: vpshufb
; CHECK: vpshufb
; CHECK: vpunpcklqdq
; CHECK: ret
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
%t = trunc <16 x i16> %z to <16 x i8>
ret <16 x i8> %t
}
; CHECK: load_sext_test1
; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
%X = load <4 x i32>* %ptr
%Y = sext <4 x i32> %X to <4 x i64>
ret <4 x i64>%Y
}
; CHECK: load_sext_test2
; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
%X = load <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i64>
ret <4 x i64>%Y
}
; CHECK: load_sext_test3
; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
%X = load <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i64>
ret <4 x i64>%Y
}
; CHECK: load_sext_test4
; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
%X = load <8 x i16>* %ptr
%Y = sext <8 x i16> %X to <8 x i32>
ret <8 x i32>%Y
}
; CHECK: load_sext_test5
; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
%X = load <8 x i8>* %ptr
%Y = sext <8 x i8> %X to <8 x i32>
ret <8 x i32>%Y
}