Teach the DataLayout aware constant folder to be much more aggressive towards

'and' instructions. This is a pattern that shows up a lot in ubsan binaries.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175128 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nick Lewycky 2013-02-14 03:23:37 +00:00
parent 38f85c5b9f
commit 9d90163bc3
2 changed files with 42 additions and 8 deletions

View File

@ -536,10 +536,10 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
/// these together. If target data info is available, it is provided as DL,
/// otherwise DL is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const DataLayout *TD){
Constant *Op1, const DataLayout *DL){
// SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
@ -547,16 +547,38 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
// bits.
if (Opc == Instruction::And && DL) {
unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType());
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL);
ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL);
if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
// All the bits of Op0 that the 'and' could be masking are already zero.
return Op0;
}
if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
// All the bits of Op1 that the 'and' could be masking are already zero.
return Op1;
}
APInt KnownZero = KnownZero0 | KnownZero1;
APInt KnownOne = KnownOne0 & KnownOne1;
if ((KnownZero | KnownOne).isAllOnesValue()) {
return ConstantInt::get(Op0->getType(), KnownOne);
}
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
if (Opc == Instruction::Sub && DL) {
GlobalValue *GV1, *GV2;
unsigned PtrSize = TD->getPointerSizeInBits();
unsigned OpSize = TD->getTypeSizeInBits(Op0->getType());
unsigned PtrSize = DL->getPointerSizeInBits();
unsigned OpSize = DL->getTypeSizeInBits(Op0->getType());
APInt Offs1(PtrSize, 0), Offs2(PtrSize, 0);
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) &&
GV1 == GV2) {
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
// PtrToInt may change the bitwidth so we have convert to the right size

View File

@ -0,0 +1,12 @@
; RUN: opt -instcombine %s -S -o - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%test1.struct = type { i32, i32 }
@test1.aligned_glbl = global %test1.struct zeroinitializer, align 4
define void @test1(i64 *%ptr) {
store i64 and (i64 ptrtoint (i32* getelementptr (%test1.struct* @test1.aligned_glbl, i32 0, i32 1) to i64), i64 3), i64* %ptr
; CHECK: store i64 0, i64* %ptr
ret void
}