diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 7e75dcab1df..97b7cebbb68 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -4795,8 +4795,9 @@ Instruction *InstCombiner::visitSetCondInstWithCastAndCast(SetCondInst &SCI) { // %B = setgt short %X, 1330 // // because %A may have negative value. - // However, it is OK if SrcTy is bool. See cast-set.ll testcase. - if (isSignSrc == isSignDest || SrcTy == Type::BoolTy) + // However, it is OK if SrcTy is bool (See cast-set.ll testcase) + // OR operation is EQ/NE. + if (isSignSrc == isSignDest || SrcTy == Type::BoolTy || SCI.isEquality()) RHSCIOp = Res; else return 0; diff --git a/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll new file mode 100644 index 00000000000..c10e62db8e8 --- /dev/null +++ b/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll @@ -0,0 +1,9 @@ +; The optimizer should be able to remove cast operation here. +; RUN: llvm-as %s -o - | opt -instcombine | llvm-dis | not grep 'cast.*int' + +bool %eq_signed_to_small_unsigned(sbyte %SB) { + %Y = cast sbyte %SB to uint ; [#uses=1] + %C = seteq uint %Y, 17 ; [#uses=1] + ret bool %C + } +