Constant folding support for calls to umul.with.overflow(), basically identical to the smul.with.overflow() code.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@128379 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Frits van Bommel 2011-03-27 14:26:13 +00:00
parent f0bf9dfc1f
commit 6208610fd6
4 changed files with 52 additions and 11 deletions

View File

@ -818,6 +818,7 @@ public:
APInt usub_ov(const APInt &RHS, bool &Overflow) const; APInt usub_ov(const APInt &RHS, bool &Overflow) const;
APInt sdiv_ov(const APInt &RHS, bool &Overflow) const; APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
APInt smul_ov(const APInt &RHS, bool &Overflow) const; APInt smul_ov(const APInt &RHS, bool &Overflow) const;
APInt umul_ov(const APInt &RHS, bool &Overflow) const;
APInt sshl_ov(unsigned Amt, bool &Overflow) const; APInt sshl_ov(unsigned Amt, bool &Overflow) const;
/// @returns the bit value at bitPosition /// @returns the bit value at bitPosition

View File

@ -1048,11 +1048,12 @@ llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::ctpop: case Intrinsic::ctpop:
case Intrinsic::ctlz: case Intrinsic::ctlz:
case Intrinsic::cttz: case Intrinsic::cttz:
case Intrinsic::uadd_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::sadd_with_overflow: case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow: case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::smul_with_overflow: case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::convert_from_fp16: case Intrinsic::convert_from_fp16:
case Intrinsic::convert_to_fp16: case Intrinsic::convert_to_fp16:
case Intrinsic::x86_sse_cvtss2si: case Intrinsic::x86_sse_cvtss2si:
@ -1362,7 +1363,8 @@ llvm::ConstantFoldCall(Function *F,
case Intrinsic::uadd_with_overflow: case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow: case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow: case Intrinsic::usub_with_overflow:
case Intrinsic::smul_with_overflow: { case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
APInt Res; APInt Res;
bool Overflow; bool Overflow;
switch (F->getIntrinsicID()) { switch (F->getIntrinsicID()) {
@ -1382,6 +1384,9 @@ llvm::ConstantFoldCall(Function *F,
case Intrinsic::smul_with_overflow: case Intrinsic::smul_with_overflow:
Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
break; break;
case Intrinsic::umul_with_overflow:
Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
break;
} }
Constant *Ops[] = { Constant *Ops[] = {
ConstantInt::get(F->getContext(), Res), ConstantInt::get(F->getContext(), Res),

View File

@ -2078,6 +2078,16 @@ APInt APInt::smul_ov(const APInt &RHS, bool &Overflow) const {
return Res; return Res;
} }
APInt APInt::umul_ov(const APInt &RHS, bool &Overflow) const {
APInt Res = *this * RHS;
if (*this != 0 && RHS != 0)
Overflow = Res.udiv(RHS) != *this || Res.udiv(*this) != RHS;
else
Overflow = false;
return Res;
}
APInt APInt::sshl_ov(unsigned ShAmt, bool &Overflow) const { APInt APInt::sshl_ov(unsigned ShAmt, bool &Overflow) const {
Overflow = ShAmt >= getBitWidth(); Overflow = ShAmt >= getBitWidth();
if (Overflow) if (Overflow)

View File

@ -2,6 +2,14 @@
%i8i1 = type {i8, i1} %i8i1 = type {i8, i1}
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.sadd.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.ssub.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8)
;;----------------------------- ;;-----------------------------
;; uadd ;; uadd
;;----------------------------- ;;-----------------------------
@ -46,6 +54,28 @@ entry:
; CHECK: ret %i8i1 { i8 -2, i1 true } ; CHECK: ret %i8i1 { i8 -2, i1 true }
} }
;;-----------------------------
;; umul
;;-----------------------------
define {i8, i1} @umul_1() nounwind {
entry:
%t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 100, i8 3)
ret {i8, i1} %t
; CHECK: @umul_1
; CHECK: ret %i8i1 { i8 44, i1 true }
}
define {i8, i1} @umul_2() nounwind {
entry:
%t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 100, i8 2)
ret {i8, i1} %t
; CHECK: @umul_2
; CHECK: ret %i8i1 { i8 -56, i1 false }
}
;;----------------------------- ;;-----------------------------
;; sadd ;; sadd
;;----------------------------- ;;-----------------------------
@ -163,14 +193,9 @@ entry:
; CHECK: ret %i8i1 { i8 -10, i1 false } ; CHECK: ret %i8i1 { i8 -10, i1 false }
} }
;;-----------------------------
;; smul
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) ;;-----------------------------
declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.sadd.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.ssub.with.overflow.i8(i8, i8)
declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8)
; rdar://8501501 ; rdar://8501501
define {i8, i1} @smul_1() nounwind { define {i8, i1} @smul_1() nounwind {