From d0c5b6170f97aff20dbc1e7f24e56a7cfdcb653c Mon Sep 17 00:00:00 2001 From: Owen Anderson Date: Mon, 25 Oct 2010 18:03:59 +0000 Subject: [PATCH] Add NEON encoding tests for vcgt and vacgt. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117276 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMInstrNEON.td | 2 + test/MC/ARM/neon-cmp-encoding.ll | 163 +++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+) diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 1e123c5fe47..60750edab7d 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -2824,9 +2824,11 @@ def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32, def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32, NEONvcgt, 0>; // For disassembly only. +// FIXME: This instruction's encoding MAY NOT BE correct. defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s", "$dst, $src, #0">; // For disassembly only. +// FIXME: This instruction's encoding MAY NOT BE correct. defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s", "$dst, $src, #0">; diff --git a/test/MC/ARM/neon-cmp-encoding.ll b/test/MC/ARM/neon-cmp-encoding.ll index 590535abd5e..4f7337434fd 100644 --- a/test/MC/ARM/neon-cmp-encoding.ll +++ b/test/MC/ARM/neon-cmp-encoding.ll @@ -4,6 +4,7 @@ ; currently marked as for-disassembly only in the .td files: ; - VCEQz ; - VCGEz, VCLEz +; - VCGTz, VCLTz ; CHECK: vceq_8xi8 define <8 x i8> @vceq_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { @@ -245,3 +246,165 @@ define <4 x i32> @vacge_4xfloat(<4 x float>* %A, <4 x float>* %B) nounwind { %tmp3 = call <4 x i32> @llvm.arm.neon.vacgeq(<4 x float> %tmp1, <4 x float> %tmp2) ret <4 x i32> %tmp3 } + +; CHECK: vcgts_8xi8 +define <8 x i8> @vcgts_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vcgt.s8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf2] + %tmp3 = icmp sgt <8 x i8> %tmp1, %tmp2 + %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> + ret <8 x i8> %tmp4 +} + +; CHECK: vcgts_4xi16 +define <4 x i16> @vcgts_4xi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vcgt.s16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf2] + %tmp3 = icmp sgt <4 x i16> %tmp1, %tmp2 + %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> + ret <4 x i16> %tmp4 +} + +; CHECK: vcgts_2xi32 +define <2 x i32> @vcgts_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B +; CHECK: vcgt.s32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf2] + %tmp3 = icmp sgt <2 x i32> %tmp1, %tmp2 + %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> + ret <2 x i32> %tmp4 +} + +; CHECK: vcgtu_8xi8 +define <8 x i8> @vcgtu_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vcgt.u8 d16, d16, d17 @ encoding: [0xa1,0x03,0x40,0xf3] + %tmp3 = icmp ugt <8 x i8> %tmp1, %tmp2 + %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> + ret <8 x i8> %tmp4 +} + +; CHECK: vcgtu_4xi16 +define <4 x i16> @vcgtu_4xi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vcgt.u16 d16, d16, d17 @ encoding: [0xa1,0x03,0x50,0xf3] + %tmp3 = icmp ugt <4 x i16> %tmp1, %tmp2 + %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> + ret <4 x i16> %tmp4 +} + +; CHECK: vcgtu_2xi32 +define <2 x i32> @vcgtu_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B +; CHECK: vcgt.u32 d16, d16, d17 @ encoding: [0xa1,0x03,0x60,0xf3] + %tmp3 = icmp ugt <2 x i32> %tmp1, %tmp2 + %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> + ret <2 x i32> %tmp4 +} + +; CHECK: vcgt_2xfloat +define <2 x i32> @vcgt_2xfloat(<2 x float>* %A, <2 x float>* %B) nounwind { + %tmp1 = load <2 x float>* %A + %tmp2 = load <2 x float>* %B +; CHECK: vcgt.f32 d16, d16, d17 @ encoding: [0xa1,0x0e,0x60,0xf3] + %tmp3 = fcmp ogt <2 x float> %tmp1, %tmp2 + %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> + ret <2 x i32> %tmp4 +} + +; CHECK: vcgts_16xi8 +define <16 x i8> @vcgts_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vcgt.s8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf2] + %tmp3 = icmp sgt <16 x i8> %tmp1, %tmp2 + %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> + ret <16 x i8> %tmp4 +} + +; CHECK: vcgts_8xi16 +define <8 x i16> @vcgts_8xi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B +; CHECK: vcgt.s16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf2] + %tmp3 = icmp sgt <8 x i16> %tmp1, %tmp2 + %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> + ret <8 x i16> %tmp4 +} + +; CHECK: vcgts_4xi32 +define <4 x i32> @vcgts_4xi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vcgt.s32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf2] + %tmp3 = icmp sgt <4 x i32> %tmp1, %tmp2 + %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> + ret <4 x i32> %tmp4 +} + +; CHECK: vcgtu_16xi8 +define <16 x i8> @vcgtu_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vcgt.u8 q8, q8, q9 @ encoding: [0xe2,0x03,0x40,0xf3] + %tmp3 = icmp ugt <16 x i8> %tmp1, %tmp2 + %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> + ret <16 x i8> %tmp4 +} + +; CHECK: vcgtu_8xi16 +define <8 x i16> @vcgtu_8xi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B +; CHECK: vcgt.u16 q8, q8, q9 @ encoding: [0xe2,0x03,0x50,0xf3] + %tmp3 = icmp ugt <8 x i16> %tmp1, %tmp2 + %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> + ret <8 x i16> %tmp4 +} + +; CHECK: vcgtu_4xi32 +define <4 x i32> @vcgtu_4xi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vcgt.u32 q8, q8, q9 @ encoding: [0xe2,0x03,0x60,0xf3] + %tmp3 = icmp ugt <4 x i32> %tmp1, %tmp2 + %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> + ret <4 x i32> %tmp4 +} + +; CHECK: vcgt_4xfloat +define <4 x i32> @vcgt_4xfloat(<4 x float>* %A, <4 x float>* %B) nounwind { + %tmp1 = load <4 x float>* %A + %tmp2 = load <4 x float>* %B +; CHECK: vcgt.f32 q8, q8, q9 @ encoding: [0xe2,0x0e,0x60,0xf3] + %tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2 + %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> + ret <4 x i32> %tmp4 +} + +declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>) nounwind readnone +declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>) nounwind readnone + +; CHECK: vacgt_2xfloat +define <2 x i32> @vacgt_2xfloat(<2 x float>* %A, <2 x float>* %B) nounwind { + %tmp1 = load <2 x float>* %A + %tmp2 = load <2 x float>* %B +; CHECK: vacgt.f32 d16, d16, d17 @ encoding: [0xb1,0x0e,0x60,0xf3] + %tmp3 = call <2 x i32> @llvm.arm.neon.vacgtd(<2 x float> %tmp1, <2 x float> %tmp2) + ret <2 x i32> %tmp3 +} + +; CHECK: vacgt_4xfloat +define <4 x i32> @vacgt_4xfloat(<4 x float>* %A, <4 x float>* %B) nounwind { + %tmp1 = load <4 x float>* %A + %tmp2 = load <4 x float>* %B +; CHECK: vacgt.f32 q8, q8, q9 @ encoding: [0xf2,0x0e,0x60,0xf3] + %tmp3 = call <4 x i32> @llvm.arm.neon.vacgtq(<4 x float> %tmp1, <4 x float> %tmp2) + ret <4 x i32> %tmp3 +} +