From bc4e0586bdd7f75352889e776e7c9470b9137e7a Mon Sep 17 00:00:00 2001 From: Daniel Sanders Date: Tue, 27 Aug 2013 10:16:17 +0000 Subject: [PATCH] [mips][msa] Added tests for and.v, bmnz.v, bmz.v, bsel.v, nor.v, or.v, xor.v when non-byte vectors are used. Note that all of these tests use ld.b and st.b for the loads and stores regardless of the data size. This is because the definition of bitcast is equivalent to a store/load sequence and DAG combiner accordingly folds bitcasts to/from v16i8 into the load/store nodes to product load/store nodes with type v16i8. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189333 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/Mips/msa/vec.ll | 566 ++++++++++++++++++++++++++++++++--- 1 file changed, 523 insertions(+), 43 deletions(-) diff --git a/test/CodeGen/Mips/msa/vec.ll b/test/CodeGen/Mips/msa/vec.ll index 8a9d3896872..eb293e8607b 100644 --- a/test/CodeGen/Mips/msa/vec.ll +++ b/test/CodeGen/Mips/msa/vec.ll @@ -1,4 +1,5 @@ -; RUN: llc -march=mips -mattr=+msa < %s | FileCheck %s +; RUN: llc -march=mips -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s +; RUN: llc -march=mipsel -mattr=+msa < %s | FileCheck -check-prefix=ANYENDIAN %s ; ; Test the MSA intrinsics that are encoded with the VEC instruction format. @@ -18,12 +19,81 @@ entry: ret void } -; CHECK: llvm_mips_and_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: and.v -; CHECK: st.b -; CHECK: .size llvm_mips_and_v_b_test +; ANYENDIAN: llvm_mips_and_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: and.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_and_v_b_test +; +@llvm_mips_and_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_and_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_and_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_and_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_and_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: and.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_and_v_h_test +; +@llvm_mips_and_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_and_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_and_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_and_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_and_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: and.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_and_v_w_test +; +@llvm_mips_and_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_and_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_and_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_and_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_and_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: and.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_and_v_d_test ; @llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> , align 16 @@ -41,12 +111,81 @@ entry: ret void } -; CHECK: llvm_mips_bmnz_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: bmnz.v -; CHECK: st.b -; CHECK: .size llvm_mips_bmnz_v_b_test +; ANYENDIAN: llvm_mips_bmnz_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmnz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmnz_v_b_test +; +@llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_bmnz_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_bmnz_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_bmnz_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmnz_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmnz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmnz_v_h_test +; +@llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_bmnz_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_bmnz_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_bmnz_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmnz_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmnz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmnz_v_w_test +; +@llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_bmnz_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_bmnz_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_bmnz_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmnz_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmnz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmnz_v_d_test ; @llvm_mips_bmz_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bmz_v_b_ARG2 = global <16 x i8> , align 16 @@ -64,17 +203,82 @@ entry: ret void } -; CHECK: llvm_mips_bmz_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: bmz.v -; CHECK: st.b -; CHECK: .size llvm_mips_bmz_v_b_test +; ANYENDIAN: llvm_mips_bmz_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmz_v_b_test ; @llvm_mips_bmz_v_h_ARG1 = global <8 x i16> , align 16 @llvm_mips_bmz_v_h_ARG2 = global <8 x i16> , align 16 @llvm_mips_bmz_v_h_RES = global <8 x i16> , align 16 +define void @llvm_mips_bmz_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_bmz_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmz_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmz_v_h_test +; +@llvm_mips_bmz_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_bmz_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_bmz_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_bmz_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_bmz_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmz_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmz_v_w_test +; +@llvm_mips_bmz_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_bmz_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_bmz_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_bmz_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_bmz_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_bmz_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bmz.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bmz_v_d_test +; @llvm_mips_bsel_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_bsel_v_b_ARG2 = global <16 x i8> , align 16 @llvm_mips_bsel_v_b_RES = global <16 x i8> , align 16 @@ -91,12 +295,81 @@ entry: ret void } -; CHECK: llvm_mips_bsel_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: bsel.v -; CHECK: st.b -; CHECK: .size llvm_mips_bsel_v_b_test +; ANYENDIAN: llvm_mips_bsel_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bsel.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bsel_v_b_test +; +@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_bsel_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_bsel_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_bsel_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_bsel_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_bsel_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bsel.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bsel_v_h_test +; +@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_bsel_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_bsel_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_bsel_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_bsel_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_bsel_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bsel.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bsel_v_w_test +; +@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_bsel_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_bsel_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_bsel_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_bsel_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_bsel_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: bsel.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_bsel_v_d_test ; @llvm_mips_nor_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_nor_v_b_ARG2 = global <16 x i8> , align 16 @@ -114,12 +387,81 @@ entry: ret void } -; CHECK: llvm_mips_nor_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: nor.v -; CHECK: st.b -; CHECK: .size llvm_mips_nor_v_b_test +; ANYENDIAN: llvm_mips_nor_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: nor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_nor_v_b_test +; +@llvm_mips_nor_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_nor_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_nor_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_nor_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_nor_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: nor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_nor_v_h_test +; +@llvm_mips_nor_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_nor_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_nor_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_nor_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_nor_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: nor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_nor_v_w_test +; +@llvm_mips_nor_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_nor_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_nor_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_nor_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_nor_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: nor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_nor_v_d_test ; @llvm_mips_or_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_or_v_b_ARG2 = global <16 x i8> , align 16 @@ -137,12 +479,81 @@ entry: ret void } -; CHECK: llvm_mips_or_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: or.v -; CHECK: st.b -; CHECK: .size llvm_mips_or_v_b_test +; ANYENDIAN: llvm_mips_or_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: or.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_or_v_b_test +; +@llvm_mips_or_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_or_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_or_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_or_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_or_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: or.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_or_v_h_test +; +@llvm_mips_or_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_or_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_or_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_or_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_or_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: or.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_or_v_w_test +; +@llvm_mips_or_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_or_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_or_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_or_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_or_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: or.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_or_v_d_test ; @llvm_mips_xor_v_b_ARG1 = global <16 x i8> , align 16 @llvm_mips_xor_v_b_ARG2 = global <16 x i8> , align 16 @@ -160,12 +571,81 @@ entry: ret void } -; CHECK: llvm_mips_xor_v_b_test: -; CHECK: ld.b -; CHECK: ld.b -; CHECK: xor.v -; CHECK: st.b -; CHECK: .size llvm_mips_xor_v_b_test +; ANYENDIAN: llvm_mips_xor_v_b_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: xor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_xor_v_b_test +; +@llvm_mips_xor_v_h_ARG1 = global <8 x i16> , align 16 +@llvm_mips_xor_v_h_ARG2 = global <8 x i16> , align 16 +@llvm_mips_xor_v_h_RES = global <8 x i16> , align 16 + +define void @llvm_mips_xor_v_h_test() nounwind { +entry: + %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 + %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 + %2 = bitcast <8 x i16> %0 to <16 x i8> + %3 = bitcast <8 x i16> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <8 x i16> + store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES + ret void +} + +; ANYENDIAN: llvm_mips_xor_v_h_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: xor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_xor_v_h_test +; +@llvm_mips_xor_v_w_ARG1 = global <4 x i32> , align 16 +@llvm_mips_xor_v_w_ARG2 = global <4 x i32> , align 16 +@llvm_mips_xor_v_w_RES = global <4 x i32> , align 16 + +define void @llvm_mips_xor_v_w_test() nounwind { +entry: + %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 + %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 + %2 = bitcast <4 x i32> %0 to <16 x i8> + %3 = bitcast <4 x i32> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <4 x i32> + store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES + ret void +} + +; ANYENDIAN: llvm_mips_xor_v_w_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: xor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_xor_v_w_test +; +@llvm_mips_xor_v_d_ARG1 = global <2 x i64> , align 16 +@llvm_mips_xor_v_d_ARG2 = global <2 x i64> , align 16 +@llvm_mips_xor_v_d_RES = global <2 x i64> , align 16 + +define void @llvm_mips_xor_v_d_test() nounwind { +entry: + %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 + %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 + %2 = bitcast <2 x i64> %0 to <16 x i8> + %3 = bitcast <2 x i64> %1 to <16 x i8> + %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) + %5 = bitcast <16 x i8> %4 to <2 x i64> + store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES + ret void +} + +; ANYENDIAN: llvm_mips_xor_v_d_test: +; ANYENDIAN: ld.b +; ANYENDIAN: ld.b +; ANYENDIAN: xor.v +; ANYENDIAN: st.b +; ANYENDIAN: .size llvm_mips_xor_v_d_test ; declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>) nounwind