mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-19 17:33:29 +00:00
optimize cttz and ctlz when we can prove something about the
leading/trailing bits. Patch by Alastair Lynn! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92706 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f87cd93e0c
commit
16507fe9fd
@ -3144,7 +3144,40 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
II->getOperand(1));
|
||||
}
|
||||
break;
|
||||
case Intrinsic::cttz: {
|
||||
// If all bits below the first known one are known zero,
|
||||
// this value is constant.
|
||||
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
|
||||
uint32_t BitWidth = IT->getBitWidth();
|
||||
APInt KnownZero(BitWidth, 0);
|
||||
APInt KnownOne(BitWidth, 0);
|
||||
ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
|
||||
KnownZero, KnownOne);
|
||||
unsigned TrailingZeros = KnownOne.countTrailingZeros();
|
||||
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
|
||||
if ((Mask & KnownZero) == Mask)
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
APInt(BitWidth, TrailingZeros)));
|
||||
|
||||
}
|
||||
break;
|
||||
case Intrinsic::ctlz: {
|
||||
// If all bits above the first known one are known zero,
|
||||
// this value is constant.
|
||||
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
|
||||
uint32_t BitWidth = IT->getBitWidth();
|
||||
APInt KnownZero(BitWidth, 0);
|
||||
APInt KnownOne(BitWidth, 0);
|
||||
ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
|
||||
KnownZero, KnownOne);
|
||||
unsigned LeadingZeros = KnownOne.countLeadingZeros();
|
||||
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
|
||||
if ((Mask & KnownZero) == Mask)
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
APInt(BitWidth, LeadingZeros)));
|
||||
|
||||
}
|
||||
break;
|
||||
case Intrinsic::uadd_with_overflow: {
|
||||
Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
|
||||
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
|
||||
|
@ -5,6 +5,8 @@
|
||||
declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
|
||||
declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
|
||||
declare double @llvm.powi.f64(double, i32) nounwind readonly
|
||||
declare i32 @llvm.cttz.i32(i32) nounwind readnone
|
||||
declare i8 @llvm.ctlz.i8(i8) nounwind readnone
|
||||
|
||||
define i8 @test1(i8 %A, i8 %B) {
|
||||
%x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
|
||||
@ -79,7 +81,6 @@ define i8 @test6(i8 %A, i1* %overflowPtr) {
|
||||
; CHECK-NEXT: ret i8 %A
|
||||
}
|
||||
|
||||
|
||||
define void @powi(double %V, double *%P) {
|
||||
entry:
|
||||
%A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
|
||||
@ -98,4 +99,26 @@ entry:
|
||||
; CHECK: volatile store double %V
|
||||
}
|
||||
|
||||
define i32 @cttz(i32 %a)
|
||||
{
|
||||
entry:
|
||||
%or = or i32 %a, 8
|
||||
%and = and i32 %or, -8
|
||||
%count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
|
||||
ret i32 %count
|
||||
; CHECK: @cttz
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret i32 3
|
||||
}
|
||||
|
||||
define i8 @ctlz(i8 %a)
|
||||
{
|
||||
entry:
|
||||
%or = or i8 %a, 32
|
||||
%and = and i8 %or, 63
|
||||
%count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
|
||||
ret i8 %count
|
||||
; CHECK: @ctlz
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret i8 2
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user