From 78c47fa50b903d5dcb4e07a5c048a35cbc2add9e Mon Sep 17 00:00:00 2001 From: Scott Michel Date: Mon, 10 Mar 2008 16:58:52 +0000 Subject: [PATCH] Integer comparison tests for CellSPU. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48152 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/CellSPU/SPUISelDAGToDAG.cpp | 4 +- lib/Target/CellSPU/SPUISelLowering.cpp | 24 +- lib/Target/CellSPU/SPUISelLowering.h | 2 +- lib/Target/CellSPU/SPUInstrInfo.td | 215 ++++++++++++---- test/CodeGen/CellSPU/icmp16.ll | 340 +++++++++++++++++++++++++ test/CodeGen/CellSPU/icmp32.ll | 340 +++++++++++++++++++++++++ test/CodeGen/CellSPU/icmp8.ll | 277 ++++++++++++++++++++ 7 files changed, 1133 insertions(+), 69 deletions(-) create mode 100644 test/CodeGen/CellSPU/icmp16.ll create mode 100644 test/CodeGen/CellSPU/icmp32.ll create mode 100644 test/CodeGen/CellSPU/icmp8.ll diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index 6fad71471dc..1d4b28b4c53 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -41,14 +41,14 @@ namespace { bool isI64IntS10Immediate(ConstantSDNode *CN) { - return isS10Constant(CN->getValue()); + return isS10Constant(CN->getSignExtended()); } //! ConstantSDNode predicate for i32 sign-extended, 10-bit immediates bool isI32IntS10Immediate(ConstantSDNode *CN) { - return isS10Constant((int) CN->getValue()); + return isS10Constant(CN->getSignExtended()); } #if 0 diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 6d0bd779e30..e04722ffe1e 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -244,21 +244,21 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setOperationAction(ISD::CTLZ , MVT::i32, Legal); // SPU has a version of select - setOperationAction(ISD::SELECT, MVT::i1, Expand); - setOperationAction(ISD::SELECT, MVT::i8, Expand); + setOperationAction(ISD::SELECT, MVT::i1, Promote); + setOperationAction(ISD::SELECT, MVT::i8, Legal); setOperationAction(ISD::SELECT, MVT::i16, Legal); setOperationAction(ISD::SELECT, MVT::i32, Legal); setOperationAction(ISD::SELECT, MVT::i64, Expand); setOperationAction(ISD::SELECT, MVT::f32, Expand); setOperationAction(ISD::SELECT, MVT::f64, Expand); - setOperationAction(ISD::SETCC, MVT::i1, Expand); - setOperationAction(ISD::SETCC, MVT::i8, Expand); - setOperationAction(ISD::SETCC, MVT::i16, Legal); - setOperationAction(ISD::SETCC, MVT::i32, Legal); - setOperationAction(ISD::SETCC, MVT::i64, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Expand); - setOperationAction(ISD::SETCC, MVT::f64, Expand); + setOperationAction(ISD::SETCC, MVT::i1, Promote); + setOperationAction(ISD::SETCC, MVT::i8, Legal); + setOperationAction(ISD::SETCC, MVT::i16, Legal); + setOperationAction(ISD::SETCC, MVT::i32, Legal); + setOperationAction(ISD::SETCC, MVT::i64, Expand); + setOperationAction(ISD::SETCC, MVT::f32, Expand); + setOperationAction(ISD::SETCC, MVT::f64, Expand); // Zero extension and sign extension for i64 have to be // custom legalized @@ -380,7 +380,6 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM) setOperationAction(ISD::XOR, MVT::v16i8, Custom); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); - setSetCCResultType(MVT::i32); setShiftAmountType(MVT::i32); setSetCCResultContents(ZeroOrOneSetCCResult); @@ -449,6 +448,11 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const return ((i != node_names.end()) ? i->second : 0); } +MVT::ValueType +SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { + return Op.getValueType(); +} + //===----------------------------------------------------------------------===// // Calling convention code: //===----------------------------------------------------------------------===// diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index b8f6ee3ea5b..d5bfac4ca75 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -104,7 +104,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ValueType for ISD::SETCC - MVT::ValueType getSetCCResultType(const SDOperand &) const; + virtual MVT::ValueType getSetCCResultType(const SDOperand &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td index b76e03dc345..bd288d3bd6a 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.td +++ b/lib/Target/CellSPU/SPUInstrInfo.td @@ -1392,7 +1392,7 @@ def XORHIr16: [(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>; def XORIv4i32: - RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val), + RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm_i32:$val), "xori\t$rT, $rA, $val", IntegerOp, [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>; @@ -1515,7 +1515,7 @@ def : SPUselbPat; class SelectConditional: Pat<(select rclass:$rCond, rclass:$rTrue, rclass:$rFalse), - (inst rclass:$rCond, rclass:$rFalse, rclass:$rTrue)>; + (inst rclass:$rFalse, rclass:$rTrue, rclass:$rCond)>; def : SelectConditional; def : SelectConditional; @@ -2875,12 +2875,33 @@ defm CLGTHI : CmpLGtrHalfwordImm; defm CLGT : CmpLGtrWord; defm CLGTI : CmpLGtrWordImm; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // For SETCC primitives not supported above (setlt, setle, setge, etc.) // define a pattern to generate the right code, as a binary operator // (in a manner of speaking.) +// +// N.B.: This only matches the setcc set of conditionals. Special pattern +// matching is used for select conditionals. +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ -class SETCCNegCond: - Pat<(cond rclass:$rA, rclass:$rB), pattern>; +class SETCCNegCondReg: + Pat<(cond rclass:$rA, rclass:$rB), + (xorinst (cmpare rclass:$rA, rclass:$rB), (inttype -1))>; + +class SETCCNegCondImm: + Pat<(cond rclass:$rA, (inttype immpred:$imm)), + (xorinst (cmpare rclass:$rA, (inttype immpred:$imm)), (inttype -1))>; + +def : SETCCNegCondReg; +def : SETCCNegCondImm; + +def : SETCCNegCondReg; +def : SETCCNegCondImm; + +def : SETCCNegCondReg; +def : SETCCNegCondImm; class SETCCBinOpReg: @@ -2895,64 +2916,146 @@ class SETCCBinOpImm; -def CGTEQBr8: SETCCBinOpReg; -def CGTEQBIr8: SETCCBinOpImm; -def CLTBr8: SETCCBinOpReg; -def CLTBIr8: SETCCBinOpImm; -def CLTEQr8: Pat<(setle R8C:$rA, R8C:$rB), - (XORBIr8 (CGTBr8 R8C:$rA, R8C:$rB), 0xff)>; -def CLTEQIr8: Pat<(setle R8C:$rA, immU8:$imm), - (XORBIr8 (CGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setle R8C:$rA, R8C:$rB), + (XORBIr8 (CGTBr8 R8C:$rA, R8C:$rB), 0xff)>; +def : Pat<(setle R8C:$rA, immU8:$imm), + (XORBIr8 (CGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; -def CGTEQHr16: SETCCBinOpReg; -def CGTEQHIr16: SETCCBinOpImm; -def CLTHr16: SETCCBinOpReg; -def CLTHIr16: SETCCBinOpImm; -def CLTEQr16: Pat<(setle R16C:$rA, R16C:$rB), - (XORHIr16 (CGTHr16 R16C:$rA, R16C:$rB), 0xffff)>; -def CLTEQIr16: Pat<(setle R16C:$rA, i16ImmSExt10:$imm), - (XORHIr16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setle R16C:$rA, R16C:$rB), + (XORHIr16 (CGTHr16 R16C:$rA, R16C:$rB), 0xffff)>; +def : Pat<(setle R16C:$rA, i16ImmSExt10:$imm), + (XORHIr16 (CGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; -def CGTEQHr32: SETCCBinOpReg; -def CGTEQHIr32: SETCCBinOpImm; -def CLTr32: SETCCBinOpReg; -def CLTIr32: SETCCBinOpImm; -def CLTEQr32: Pat<(setle R32C:$rA, R32C:$rB), - (XORIr32 (CGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>; -def CLTEQIr32: Pat<(setle R32C:$rA, i32ImmSExt10:$imm), - (XORIr32 (CGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setle R32C:$rA, R32C:$rB), + (XORIr32 (CGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>; +def : Pat<(setle R32C:$rA, i32ImmSExt10:$imm), + (XORIr32 (CGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; -def CLGTEQBr8: SETCCBinOpReg; -def CLGTEQBIr8: SETCCBinOpImm; -def CLLTBr8: SETCCBinOpReg; -def CLLTBIr8: SETCCBinOpImm; -def CLLTEQr8: Pat<(setule R8C:$rA, R8C:$rB), - (XORBIr8 (CLGTBr8 R8C:$rA, R8C:$rB), 0xff)>; -def CLLTEQIr8: Pat<(setule R8C:$rA, immU8:$imm), - (XORBIr8 (CLGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setule R8C:$rA, R8C:$rB), + (XORBIr8 (CLGTBr8 R8C:$rA, R8C:$rB), 0xff)>; +def : Pat<(setule R8C:$rA, immU8:$imm), + (XORBIr8 (CLGTBIr8 R8C:$rA, immU8:$imm), 0xff)>; -def CLGTEQHr16: SETCCBinOpReg; -def CLGTEQHIr16: SETCCBinOpImm; -def CLLTHr16: SETCCBinOpReg; -def CLLTHIr16: SETCCBinOpImm; -def CLLTEQr16: Pat<(setule R16C:$rA, R16C:$rB), - (XORHIr16 (CLGTHr16 R16C:$rA, R16C:$rB), 0xffff)>; -def CLLTEQIr16: Pat<(setule R16C:$rA, i16ImmUns10:$imm), - (XORHIr16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setule R16C:$rA, R16C:$rB), + (XORHIr16 (CLGTHr16 R16C:$rA, R16C:$rB), 0xffff)>; +def : Pat<(setule R16C:$rA, i16ImmUns10:$imm), + (XORHIr16 (CLGTHIr16 R16C:$rA, i16ImmSExt10:$imm), 0xffff)>; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : SETCCBinOpReg; +def : SETCCBinOpImm; +def : Pat<(setule R32C:$rA, R32C:$rB), + (XORIr32 (CLGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>; +def : Pat<(setule R32C:$rA, i32ImmSExt10:$imm), + (XORIr32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; -def CLGTEQHr32: SETCCBinOpReg; -def CLGTEQHIr32: SETCCBinOpImm; -def CLLTr32: SETCCBinOpReg; -def CLLTIr32: SETCCBinOpImm; -def CLLTEQr32: Pat<(setule R32C:$rA, R32C:$rB), - (XORIr32 (CLGTr32 R32C:$rA, R32C:$rB), 0xffffffff)>; -def CLLTEQIr32: Pat<(setule R32C:$rA, i32ImmSExt10:$imm), - (XORIr32 (CLGTIr32 R32C:$rA, i32ImmSExt10:$imm), 0xffffffff)>; +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ +// select conditional patterns: +//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ + +class SELECTNegCondReg: + Pat<(select (inttype (cond rclass:$rA, rclass:$rB)), + rclass:$rTrue, rclass:$rFalse), + (selinstr rclass:$rTrue, rclass:$rFalse, + (cmpare rclass:$rA, rclass:$rB))>; + +class SELECTNegCondImm: + Pat<(select (inttype (cond rclass:$rA, immpred:$imm)), + rclass:$rTrue, rclass:$rFalse), + (selinstr rclass:$rTrue, rclass:$rFalse, + (cmpare rclass:$rA, immpred:$imm))>; + +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; + +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; + +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; +def : SELECTNegCondReg; +def : SELECTNegCondImm; + +class SELECTBinOpReg: + Pat<(select (inttype (cond rclass:$rA, rclass:$rB)), + rclass:$rFalse, rclass:$rTrue), + (selinstr rclass:$rTrue, rclass:$rFalse, + (binop (cmpOp1 rclass:$rA, rclass:$rB), + (cmpOp2 rclass:$rA, rclass:$rB)))>; + +class SELECTBinOpImm: + Pat<(select (inttype (cond rclass:$rA, (inttype immpred:$imm))), + rclass:$rTrue, rclass:$rFalse), + (selinstr rclass:$rFalse, rclass:$rTrue, + (binop (cmpOp1 rclass:$rA, (inttype immpred:$imm)), + (cmpOp2 rclass:$rA, (inttype immpred:$imm))))>; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; + +def : SELECTBinOpReg; +def : SELECTBinOpImm; //-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ diff --git a/test/CodeGen/CellSPU/icmp16.ll b/test/CodeGen/CellSPU/icmp16.ll new file mode 100644 index 00000000000..4dc0ad89a4e --- /dev/null +++ b/test/CodeGen/CellSPU/icmp16.ll @@ -0,0 +1,340 @@ +; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s + +target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" +target triple = "spu" + +; $3 = %arg1, $4 = %arg2, $5 = %val1, $6 = %val2 +; $3 = %arg1, $4 = %val1, $5 = %val2 +; +; For "positive" comparisons: +; selb $3, $6, $5, +; selb $3, $5, $4, +; +; For "negative" comparisons, i.e., those where the result of the comparison +; must be inverted (setne, for example): +; selb $3, $5, $6, +; selb $3, $4, $5, + +; i16 integer comparisons: +define i16 @icmp_eq_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_eq_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_eq_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_eq_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, -512 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_eq_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, -1 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_eq_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp eq i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ne_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_ne_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_ne_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ne_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, -512 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ne_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, -1 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ne_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ne i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ugt_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_ugt_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_ugt_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ugt_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, 65534 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ugt_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, 65024 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ugt_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ugt i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_uge_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp uge i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_uge_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp uge i16 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp uge i16 %arg1, can always be transformed into +;; icmp ugt i16 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i16 @icmp_ult_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_ult_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_ult_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ult_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, 65534 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ult_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, 65024 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ult_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ult i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_ule_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ule i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_ule_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp ule i16 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp ule i16 %arg1, can always be transformed into +;; icmp ult i16 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i16 @icmp_sgt_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_sgt_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_sgt_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_sgt_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, -1 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_sgt_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, -512 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_sgt_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sgt i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_sge_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sge i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_sge_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sge i16 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sge i16 %arg1, can always be transformed into +;; icmp sgt i16 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i16 @icmp_slt_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_slt_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, %arg2 + ret i1 %A +} + +define i16 @icmp_slt_immed01_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, 511 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_slt_immed02_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, -512 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_slt_immed03_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, -1 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_slt_immed04_i16(i16 %arg1, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp slt i16 %arg1, 32768 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i16 @icmp_sle_select_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sle i16 %arg1, %arg2 + %B = select i1 %A, i16 %val1, i16 %val2 + ret i16 %B +} + +define i1 @icmp_sle_setcc_i16(i16 %arg1, i16 %arg2, i16 %val1, i16 %val2) nounwind { +entry: + %A = icmp sle i16 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sle i16 %arg1, can always be transformed into +;; icmp slt i16 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + diff --git a/test/CodeGen/CellSPU/icmp32.ll b/test/CodeGen/CellSPU/icmp32.ll new file mode 100644 index 00000000000..daa673b8d61 --- /dev/null +++ b/test/CodeGen/CellSPU/icmp32.ll @@ -0,0 +1,340 @@ +; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s + +target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" +target triple = "spu" + +; $3 = %arg1, $4 = %arg2, $5 = %val1, $6 = %val2 +; $3 = %arg1, $4 = %val1, $5 = %val2 +; +; For "positive" comparisons: +; selb $3, $6, $5, +; selb $3, $5, $4, +; +; For "negative" comparisons, i.e., those where the result of the comparison +; must be inverted (setne, for example): +; selb $3, $5, $6, +; selb $3, $4, $5, + +; i32 integer comparisons: +define i32 @icmp_eq_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_eq_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_eq_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_eq_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, -512 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_eq_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, -1 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_eq_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp eq i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ne_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_ne_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_ne_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ne_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, -512 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ne_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, -1 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ne_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ne i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ugt_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_ugt_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_ugt_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ugt_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, 4294966784 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ugt_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, 4294967293 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ugt_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ugt i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_uge_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp uge i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_uge_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp uge i32 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp uge i32 %arg1, can always be transformed into +;; icmp ugt i32 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i32 @icmp_ult_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_ult_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_ult_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ult_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, 4294966784 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ult_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, 4294967293 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ult_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ult i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_ule_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ule i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_ule_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp ule i32 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp ule i32 %arg1, can always be transformed into +;; icmp ult i32 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i32 @icmp_sgt_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_sgt_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_sgt_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_sgt_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, 4294966784 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_sgt_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, 4294967293 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_sgt_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sgt i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_sge_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sge i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_sge_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sge i32 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sge i32 %arg1, can always be transformed into +;; icmp sgt i32 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i32 @icmp_slt_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_slt_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, %arg2 + ret i1 %A +} + +define i32 @icmp_slt_immed01_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, 511 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_slt_immed02_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, -512 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_slt_immed03_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, -1 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_slt_immed04_i32(i32 %arg1, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp slt i32 %arg1, 32768 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i32 @icmp_sle_select_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sle i32 %arg1, %arg2 + %B = select i1 %A, i32 %val1, i32 %val2 + ret i32 %B +} + +define i1 @icmp_sle_setcc_i32(i32 %arg1, i32 %arg2, i32 %val1, i32 %val2) nounwind { +entry: + %A = icmp sle i32 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sle i32 %arg1, can always be transformed into +;; icmp slt i32 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + diff --git a/test/CodeGen/CellSPU/icmp8.ll b/test/CodeGen/CellSPU/icmp8.ll new file mode 100644 index 00000000000..fd556d01131 --- /dev/null +++ b/test/CodeGen/CellSPU/icmp8.ll @@ -0,0 +1,277 @@ +; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s + +target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" +target triple = "spu" + +; $3 = %arg1, $4 = %arg2, $5 = %val1, $6 = %val2 +; $3 = %arg1, $4 = %val1, $5 = %val2 +; +; For "positive" comparisons: +; selb $3, $6, $5, +; selb $3, $5, $4, +; +; For "negative" comparisons, i.e., those where the result of the comparison +; must be inverted (setne, for example): +; selb $3, $5, $6, +; selb $3, $4, $5, + +; i8 integer comparisons: +define i8 @icmp_eq_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp eq i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_eq_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp eq i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_eq_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp eq i8 %arg1, 127 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_eq_immed02_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp eq i8 %arg1, -128 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_eq_immed03_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp eq i8 %arg1, -1 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ne_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ne i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_ne_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ne i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_ne_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ne i8 %arg1, 127 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ne_immed02_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ne i8 %arg1, -128 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ne_immed03_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ne i8 %arg1, -1 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ugt_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ugt i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_ugt_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ugt i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_ugt_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ugt i8 %arg1, 126 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_uge_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp uge i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_uge_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp uge i8 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp uge i8 %arg1, can always be transformed into +;; icmp ugt i8 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i8 @icmp_ult_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ult i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_ult_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ult i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_ult_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ult i8 %arg1, 253 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ult_immed02_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ult i8 %arg1, 129 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_ule_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ule i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_ule_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp ule i8 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp ule i8 %arg1, can always be transformed into +;; icmp ult i8 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i8 @icmp_sgt_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sgt i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_sgt_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sgt i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_sgt_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sgt i8 %arg1, 127 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_sgt_immed02_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sgt i8 %arg1, -1 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_sgt_immed03_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sgt i8 %arg1, -128 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_sge_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sge i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_sge_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sge i8 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sge i8 %arg1, can always be transformed into +;; icmp sgt i8 %arg1, -1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. + +define i8 @icmp_slt_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp slt i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_slt_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp slt i8 %arg1, %arg2 + ret i1 %A +} + +define i8 @icmp_slt_immed01_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp slt i8 %arg1, 127 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_slt_immed02_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp slt i8 %arg1, -128 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_slt_immed03_i8(i8 %arg1, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp slt i8 %arg1, -1 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i8 @icmp_sle_select_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sle i8 %arg1, %arg2 + %B = select i1 %A, i8 %val1, i8 %val2 + ret i8 %B +} + +define i1 @icmp_sle_setcc_i8(i8 %arg1, i8 %arg2, i8 %val1, i8 %val2) nounwind { +entry: + %A = icmp sle i8 %arg1, %arg2 + ret i1 %A +} + +;; Note: icmp sle i8 %arg1, can always be transformed into +;; icmp slt i8 %arg1, +1 +;; +;; Consequently, even though the patterns exist to match, it's unlikely +;; they'll ever be generated. +