llvm-6502/test/CodeGen/PowerPC/vec_shuffle_le.ll

210 lines
8.0 KiB
LLVM
Raw Normal View History

[PowerPC 1/4] Little-endian adjustments for VSX loads/stores This patch addresses the inherent big-endian bias in the lxvd2x, lxvw4x, stxvd2x, and stxvw4x instructions. These instructions load vector elements into registers left-to-right (with the first element loaded into the high-order bits of the register), regardless of the endian setting of the processor. However, these are the only vector memory instructions that permit unaligned storage accesses, so we want to use them for little-endian. To make this work, a lxvd2x or lxvw4x is replaced with an lxvd2x followed by an xxswapd, which swaps the doublewords. This works for lxvw4x as well as lxvd2x, because for lxvw4x on an LE system the vector elements are in LE order (right-to-left) within each doubleword. (Thus after lxvw2x of a <4 x float> the elements will appear as 1, 0, 3, 2. Following the swap, they will appear as 3, 2, 0, 1, as desired.) For stores, an stxvd2x or stxvw4x is replaced with an stxvd2x preceded by an xxswapd. Introduction of extra swap instructions provides correctness, but obviously is not ideal from a performance perspective. Future patches will address this with optimizations to remove most of the introduced swaps, which have proven effective in other implementations. The introduction of the swaps is performed during lowering of LOAD, STORE, INTRINSIC_W_CHAIN, and INTRINSIC_VOID operations. The latter are used to translate intrinsics that specify the VSX loads and stores directly into equivalent sequences for little endian. Thus code that uses vec_vsx_ld and vec_vsx_st does not have to be modified to be ported from BE to LE. We introduce new PPCISD opcodes for LXVD2X, STXVD2X, and XXSWAPD for use during this lowering step. In PPCInstrVSX.td, we add new SDType and SDNode definitions for these (PPClxvd2x, PPCstxvd2x, PPCxxswapd). These are recognized during instruction selection and mapped to the correct instructions. Several tests that were written to use -mcpu=pwr7 or pwr8 are modified to disable VSX on LE variants because code generation changes with this and subsequent patches in this set. I chose to include all of these in the first patch than try to rigorously sort out which tests were broken by one or another of the patches. Sorry about that. The new test vsx-ldst-builtin-le.ll, and the changes to vsx-ldst.ll, are disabled until LE support is enabled because of breakages that occur as noted in those tests. They are re-enabled in patch 4/4. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223783 91177308-0d34-0410-b5e6-96231b3b80d8
2014-12-09 16:35:51 +00:00
; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VPKUHUM_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vpkuhum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VPKUHUM_xx(<16 x i8>* %A) {
entry:
; CHECK: VPKUHUM_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; CHECK: vpkuhum
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VPKUWUM_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vpkuwum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VPKUWUM_xx(<16 x i8>* %A) {
entry:
; CHECK: VPKUWUM_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
; CHECK: vpkuwum
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLB_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrglb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGLB_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLB_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
; CHECK: vmrglb
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHB_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrghb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGHB_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHB_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
; CHECK: vmrghb
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLH_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrglh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGLH_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLH_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
; CHECK: vmrglh
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHH_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrghh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGHH_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHH_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
; CHECK: vmrghh
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGLW_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrglw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGLW_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGLW_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
; CHECK: vmrglw
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VMRGHW_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vmrghw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VMRGHW_xx(<16 x i8>* %A) {
entry:
; CHECK: VMRGHW_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
; CHECK: vmrghw
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}
define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
entry:
; CHECK: VSLDOI_xy:
%tmp = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
; CHECK: lvx [[REG1:[0-9]+]]
; CHECK: lvx [[REG2:[0-9]+]]
; CHECK: vsldoi [[REG3:[0-9]+]], [[REG2]], [[REG1]], 4
store <16 x i8> %tmp3, <16 x i8>* %A
ret void
}
define void @VSLDOI_xx(<16 x i8>* %A) {
entry:
; CHECK: VSLDOI_xx:
%tmp = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK: vsldoi
store <16 x i8> %tmp2, <16 x i8>* %A
ret void
}