mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Implement shift.ll:test23. If we are shifting right then immediately truncating
the result, turn signed shift rights into unsigned shift rights if possible. This leads to later simplification and happens *often* in 176.gcc. For example, this testcase: struct xxx { unsigned int code : 8; }; enum codes { A, B, C, D, E, F }; int foo(struct xxx *P) { if ((enum codes)P->code == A) bar(); } used to be compiled to: int %foo(%struct.xxx* %P) { %tmp.1 = getelementptr %struct.xxx* %P, int 0, uint 0 ; <uint*> [#uses=1] %tmp.2 = load uint* %tmp.1 ; <uint> [#uses=1] %tmp.3 = cast uint %tmp.2 to int ; <int> [#uses=1] %tmp.4 = shl int %tmp.3, ubyte 24 ; <int> [#uses=1] %tmp.5 = shr int %tmp.4, ubyte 24 ; <int> [#uses=1] %tmp.6 = cast int %tmp.5 to sbyte ; <sbyte> [#uses=1] %tmp.8 = seteq sbyte %tmp.6, 0 ; <bool> [#uses=1] br bool %tmp.8, label %then, label %UnifiedReturnBlock Now it is compiled to: %tmp.1 = getelementptr %struct.xxx* %P, int 0, uint 0 ; <uint*> [#uses=1] %tmp.2 = load uint* %tmp.1 ; <uint> [#uses=1] %tmp.2 = cast uint %tmp.2 to sbyte ; <sbyte> [#uses=1] %tmp.8 = seteq sbyte %tmp.2, 0 ; <bool> [#uses=1] br bool %tmp.8, label %then, label %UnifiedReturnBlock which is the difference between this: foo: subl $4, %esp movl 8(%esp), %eax movl (%eax), %eax shll $24, %eax sarl $24, %eax testb %al, %al jne .LBBfoo_2 and this: foo: subl $4, %esp movl 8(%esp), %eax movl (%eax), %eax testb %al, %al jne .LBBfoo_2 This occurs 3243 times total in the External tests, 215x in povray, 6x in each f2c'd program, 1451x in 176.gcc, 7x in crafty, 20x in perl, 25x in gap, 3x in m88ksim, 25x in ijpeg. Maybe this will cause a little jump on gcc tommorow :) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@21715 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
1b50d062c8
commit
d7115b01a0
@ -3571,6 +3571,25 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
|
|||||||
return new ShiftInst(Instruction::Shl, Op0c, Op1);
|
return new ShiftInst(Instruction::Shl, Op0c, Op1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case Instruction::Shr:
|
||||||
|
// If this is a signed shr, and if all bits shifted in are about to be
|
||||||
|
// truncated off, turn it into an unsigned shr to allow greater
|
||||||
|
// simplifications.
|
||||||
|
if (DestBitSize < SrcBitSize && Src->getType()->isSigned() &&
|
||||||
|
isa<ConstantInt>(Op1)) {
|
||||||
|
unsigned ShiftAmt = cast<ConstantUInt>(Op1)->getValue();
|
||||||
|
if (SrcBitSize > ShiftAmt && SrcBitSize-ShiftAmt >= DestBitSize) {
|
||||||
|
// Convert to unsigned.
|
||||||
|
Value *N1 = InsertOperandCastBefore(Op0,
|
||||||
|
Op0->getType()->getUnsignedVersion(), &CI);
|
||||||
|
// Insert the new shift, which is now unsigned.
|
||||||
|
N1 = InsertNewInstBefore(new ShiftInst(Instruction::Shr, N1,
|
||||||
|
Op1, Src->getName()), CI);
|
||||||
|
return new CastInst(N1, CI.getType());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case Instruction::SetNE:
|
case Instruction::SetNE:
|
||||||
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
||||||
if (Op1C->getRawValue() == 0) {
|
if (Op1C->getRawValue() == 0) {
|
||||||
@ -3601,9 +3620,6 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
|
|||||||
In = InsertNewInstBefore(new ShiftInst(Instruction::Shr, In,
|
In = InsertNewInstBefore(new ShiftInst(Instruction::Shr, In,
|
||||||
ConstantInt::get(Type::UByteTy, ShiftAmt),
|
ConstantInt::get(Type::UByteTy, ShiftAmt),
|
||||||
In->getName()+".lobit"), CI);
|
In->getName()+".lobit"), CI);
|
||||||
std::cerr << "In1 = " << *Op0;
|
|
||||||
std::cerr << "In2 = " << *CI.getOperand(0);
|
|
||||||
std::cerr << "In3 = " << CI;
|
|
||||||
if (CI.getType() == In->getType())
|
if (CI.getType() == In->getType())
|
||||||
return ReplaceInstUsesWith(CI, In);
|
return ReplaceInstUsesWith(CI, In);
|
||||||
else
|
else
|
||||||
|
Loading…
Reference in New Issue
Block a user