[mips][msa] Added support for matching vshf from normal IR (i.e. not intrinsics)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191301 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Daniel Sanders 2013-09-24 14:02:15 +00:00
parent acfa5a203c
commit 7e0df9aa29
8 changed files with 450 additions and 20 deletions

View File

@ -1728,13 +1728,17 @@ def int_mips_subvi_d : GCCBuiltin<"__builtin_msa_subvi_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_vshf_b : GCCBuiltin<"__builtin_msa_vshf_b">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
def int_mips_vshf_h : GCCBuiltin<"__builtin_msa_vshf_h">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
def int_mips_vshf_w : GCCBuiltin<"__builtin_msa_vshf_w">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
def int_mips_vshf_d : GCCBuiltin<"__builtin_msa_vshf_d">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
def int_mips_xor_v : GCCBuiltin<"__builtin_msa_xor_v">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

View File

@ -224,6 +224,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
case MipsISD::VNOR: return "MipsISD::VNOR";
case MipsISD::VSHF: return "MipsISD::VSHF";
default: return NULL;
}
}

View File

@ -172,6 +172,9 @@ namespace llvm {
VUMAX,
VUMIN,
// Vector Shuffle with mask as an operand
VSHF, // Generic shuffle
// Combined (XOR (OR $a, $b), -1)
VNOR,

View File

@ -20,6 +20,9 @@ def SDT_VFSetCC : SDTypeProfile<1, 3, [SDTCisInt<0>,
SDTCisFP<1>,
SDTCisSameAs<1, 2>,
SDTCisVT<3, OtherVT>]>;
def SDT_VSHF : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>,
SDTCisInt<1>, SDTCisVec<1>,
SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>]>;
def MipsVAllNonZero : SDNode<"MipsISD::VALL_NONZERO", SDT_MipsVecCond>;
def MipsVAnyNonZero : SDNode<"MipsISD::VANY_NONZERO", SDT_MipsVecCond>;
@ -35,6 +38,7 @@ def MipsVUMin : SDNode<"MipsISD::VUMIN", SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def MipsVNOR : SDNode<"MipsISD::VNOR", SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def MipsVSHF : SDNode<"MipsISD::VSHF", SDT_VSHF>;
def vsetcc : SDNode<"ISD::SETCC", SDT_VSetCC>;
def vfsetcc : SDNode<"ISD::SETCC", SDT_VFSetCC>;
@ -1119,6 +1123,19 @@ class MSA_3R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass Itinerary = itin;
}
class MSA_3R_VSHF_DESC_BASE<string instr_asm, RegisterClass RCWD,
RegisterClass RCWS = RCWD,
RegisterClass RCWT = RCWD,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs RCWD:$wd);
dag InOperandList = (ins RCWD:$wd_in, RCWS:$ws, RCWT:$wt);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
list<dag> Pattern = [(set RCWD:$wd, (MipsVSHF RCWD:$wd_in, RCWS:$ws,
RCWT:$wt))];
string Constraints = "$wd = $wd_in";
InstrItinClass Itinerary = itin;
}
class MSA_3R_4R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
RegisterClass RCWD, RegisterClass RCWS = RCWD,
RegisterClass RCWT = RCWD,
@ -2213,10 +2230,10 @@ class SUBVI_H_DESC : MSA_I5_DESC_BASE<"subvi.h", sub, vsplati16_uimm5, MSA128H>;
class SUBVI_W_DESC : MSA_I5_DESC_BASE<"subvi.w", sub, vsplati32_uimm5, MSA128W>;
class SUBVI_D_DESC : MSA_I5_DESC_BASE<"subvi.d", sub, vsplati64_uimm5, MSA128D>;
class VSHF_B_DESC : MSA_3R_DESC_BASE<"vshf.b", int_mips_vshf_b, MSA128B>;
class VSHF_H_DESC : MSA_3R_DESC_BASE<"vshf.h", int_mips_vshf_h, MSA128H>;
class VSHF_W_DESC : MSA_3R_DESC_BASE<"vshf.w", int_mips_vshf_w, MSA128W>;
class VSHF_D_DESC : MSA_3R_DESC_BASE<"vshf.d", int_mips_vshf_d, MSA128D>;
class VSHF_B_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.b", MSA128B>;
class VSHF_H_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.h", MSA128H>;
class VSHF_W_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.w", MSA128W>;
class VSHF_D_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.d", MSA128D>;
class XOR_V_DESC : MSA_VEC_DESC_BASE<"xor.v", xor, MSA128B>;
class XOR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128H>;

View File

@ -180,6 +180,7 @@ addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
setOperationAction(ISD::SRL, Ty, Legal);
setOperationAction(ISD::SUB, Ty, Legal);
setOperationAction(ISD::UDIV, Ty, Legal);
setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
setOperationAction(ISD::VSELECT, Ty, Legal);
setOperationAction(ISD::XOR, Ty, Legal);
@ -259,6 +260,7 @@ SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
case ISD::INTRINSIC_VOID: return lowerINTRINSIC_VOID(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG);
}
return MipsTargetLowering::LowerOperation(Op, DAG);
@ -1470,6 +1472,12 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_subvi_d:
return lowerMSABinaryImmIntr(Op, DAG, ISD::SUB,
lowerMSASplatImm(Op, 2, DAG));
case Intrinsic::mips_vshf_b:
case Intrinsic::mips_vshf_h:
case Intrinsic::mips_vshf_w:
case Intrinsic::mips_vshf_d:
return DAG.getNode(MipsISD::VSHF, SDLoc(Op), Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
case Intrinsic::mips_xor_v:
return lowerMSABinaryIntr(Op, DAG, ISD::XOR);
case Intrinsic::mips_xori_b:
@ -1727,6 +1735,76 @@ SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
return SDValue();
}
// Lower VECTOR_SHUFFLE into VSHF.
//
// This mostly consists of converting the shuffle indices in Indices into a
// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
// if the type is v8i16 and all the indices are less than 8 then the second
// operand is unused and can be replaced with anything. We choose to replace it
// with the used operand since this reduces the number of instructions overall.
static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
SmallVector<SDValue, 16> Ops;
SDValue Op0;
SDValue Op1;
EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
EVT MaskEltTy = MaskVecTy.getVectorElementType();
bool Using1stVec = false;
bool Using2ndVec = false;
SDLoc DL(Op);
int ResTyNumElts = ResTy.getVectorNumElements();
for (int i = 0; i < ResTyNumElts; ++i) {
// Idx == -1 means UNDEF
int Idx = Indices[i];
if (0 <= Idx && Idx < ResTyNumElts)
Using1stVec = true;
if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
Using2ndVec = true;
}
for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
++I)
Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
Ops.size());
if (Using1stVec && Using2ndVec) {
Op0 = Op->getOperand(0);
Op1 = Op->getOperand(1);
} else if (Using1stVec)
Op0 = Op1 = Op->getOperand(0);
else if (Using2ndVec)
Op0 = Op1 = Op->getOperand(1);
else
llvm_unreachable("shuffle vector mask references neither vector operand?");
return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
}
// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
// indices in the shuffle.
SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const {
ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
EVT ResTy = Op->getValueType(0);
if (!ResTy.is128BitVector())
return SDValue();
int ResTyNumElts = ResTy.getVectorNumElements();
SmallVector<int, 16> Indices;
for (int i = 0; i < ResTyNumElts; ++i)
Indices.push_back(Node->getMaskElt(i));
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
}
MachineBasicBlock * MipsSETargetLowering::
emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// $bb:

View File

@ -75,6 +75,9 @@ namespace llvm {
SDValue lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
/// \brief Lower VECTOR_SHUFFLE into one of a number of instructions
/// depending on the indices in the shuffle.
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
MachineBasicBlock *emitBPOSGE32(MachineInstr *MI,
MachineBasicBlock *BB) const;

View File

@ -5,84 +5,95 @@
@llvm_mips_vshf_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_vshf_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
@llvm_mips_vshf_b_ARG3 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
@llvm_mips_vshf_b_RES = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
define void @llvm_mips_vshf_b_test() nounwind {
entry:
%0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1
%1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2
%2 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1)
store <16 x i8> %2, <16 x i8>* @llvm_mips_vshf_b_RES
%2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3
%3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
ret void
}
declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>) nounwind
declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
; CHECK: llvm_mips_vshf_b_test:
; CHECK: ld.b
; CHECK: ld.b
; CHECK: ld.b
; CHECK: vshf.b
; CHECK: st.b
; CHECK: .size llvm_mips_vshf_b_test
;
@llvm_mips_vshf_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
@llvm_mips_vshf_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
@llvm_mips_vshf_h_ARG3 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
@llvm_mips_vshf_h_RES = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
define void @llvm_mips_vshf_h_test() nounwind {
entry:
%0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1
%1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2
%2 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1)
store <8 x i16> %2, <8 x i16>* @llvm_mips_vshf_h_RES
%2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3
%3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
ret void
}
declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>) nounwind
declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
; CHECK: llvm_mips_vshf_h_test:
; CHECK: ld.h
; CHECK: ld.h
; CHECK: ld.h
; CHECK: vshf.h
; CHECK: st.h
; CHECK: .size llvm_mips_vshf_h_test
;
@llvm_mips_vshf_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
@llvm_mips_vshf_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
@llvm_mips_vshf_w_ARG3 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
@llvm_mips_vshf_w_RES = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
define void @llvm_mips_vshf_w_test() nounwind {
entry:
%0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1
%1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2
%2 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1)
store <4 x i32> %2, <4 x i32>* @llvm_mips_vshf_w_RES
%2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3
%3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
ret void
}
declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>) nounwind
declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
; CHECK: llvm_mips_vshf_w_test:
; CHECK: ld.w
; CHECK: ld.w
; CHECK: ld.w
; CHECK: vshf.w
; CHECK: st.w
; CHECK: .size llvm_mips_vshf_w_test
;
@llvm_mips_vshf_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_vshf_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
@llvm_mips_vshf_d_ARG3 = global <2 x i64> <i64 2, i64 3>, align 16
@llvm_mips_vshf_d_RES = global <2 x i64> <i64 0, i64 0>, align 16
define void @llvm_mips_vshf_d_test() nounwind {
entry:
%0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1
%1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2
%2 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1)
store <2 x i64> %2, <2 x i64>* @llvm_mips_vshf_d_RES
%2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3
%3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
ret void
}
declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>) nounwind
declare <2 x i64> @llvm.mips.vshf.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
; CHECK: llvm_mips_vshf_d_test:
; CHECK: ld.d

View File

@ -0,0 +1,313 @@
; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s
define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_0:
%1 = load <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
store <16 x i8> %2, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size vshf_v16i8_0
}
define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_1:
%1 = load <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
store <16 x i8> %2, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size vshf_v16i8_1
}
define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_2:
%1 = load <16 x i8>* %a
%2 = load <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
store <16 x i8> %3, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size vshf_v16i8_2
}
define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_3:
%1 = load <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = load <16 x i8>* %b
; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R2]]
store <16 x i8> %3, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size vshf_v16i8_3
}
define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
; CHECK: vshf_v16i8_4:
%1 = load <16 x i8>* %a
; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
; CHECK-DAG: ldi.b [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
store <16 x i8> %2, <16 x i8>* %c
; CHECK-DAG: st.b [[R3]], 0($4)
ret void
; CHECK: .size vshf_v16i8_4
}
define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_0:
%1 = load <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
store <8 x i16> %2, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size vshf_v8i16_0
}
define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_1:
%1 = load <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
store <8 x i16> %2, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size vshf_v8i16_1
}
define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_2:
%1 = load <8 x i16>* %a
%2 = load <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
store <8 x i16> %3, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size vshf_v8i16_2
}
define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_3:
%1 = load <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = load <8 x i16>* %b
; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R2]]
store <8 x i16> %3, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size vshf_v8i16_3
}
define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
; CHECK: vshf_v8i16_4:
%1 = load <8 x i16>* %a
; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
store <8 x i16> %2, <8 x i16>* %c
; CHECK-DAG: st.h [[R3]], 0($4)
ret void
; CHECK: .size vshf_v8i16_4
}
define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_0:
%1 = load <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
store <4 x i32> %2, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size vshf_v4i32_0
}
define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_1:
%1 = load <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
store <4 x i32> %2, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size vshf_v4i32_1
}
define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_2:
%1 = load <4 x i32>* %a
%2 = load <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
store <4 x i32> %3, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size vshf_v4i32_2
}
define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_3:
%1 = load <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = load <4 x i32>* %b
; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.w [[R3]], [[R1]], [[R2]]
store <4 x i32> %3, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size vshf_v4i32_3
}
define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
; CHECK: vshf_v4i32_4:
%1 = load <4 x i32>* %a
; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
store <4 x i32> %2, <4 x i32>* %c
; CHECK-DAG: st.w [[R3]], 0($4)
ret void
; CHECK: .size vshf_v4i32_4
}
define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_0:
%1 = load <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
store <2 x i64> %2, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size vshf_v2i64_0
}
define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_1:
%1 = load <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
store <2 x i64> %2, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size vshf_v2i64_1
}
define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_2:
%1 = load <2 x i64>* %a
%2 = load <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
store <2 x i64> %3, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size vshf_v2i64_2
}
define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_3:
%1 = load <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = load <2 x i64>* %b
; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], %lo
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R2]]
store <2 x i64> %3, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size vshf_v2i64_3
}
define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
; CHECK: vshf_v2i64_4:
%1 = load <2 x i64>* %a
; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
%2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
store <2 x i64> %2, <2 x i64>* %c
; CHECK-DAG: st.d [[R3]], 0($4)
ret void
; CHECK: .size vshf_v2i64_4
}