Teach BasicAA about the LLVM IR rules that allow reading past the end of an object given sufficient alignment. Fixes PR12098.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@151553 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eli Friedman 2012-02-27 20:46:07 +00:00
parent 9e2a79c287
commit 1680a24e53
3 changed files with 52 additions and 10 deletions

View File

@ -84,42 +84,59 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
static uint64_t getObjectSize(const Value *V, const TargetData &TD,
bool RoundToAlign = false) {
Type *AccessTy;
unsigned Align;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
AccessTy = GV->getType()->getElementType();
Align = GV->getAlignment();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
if (!AI->isArrayAllocation())
AccessTy = AI->getType()->getElementType();
else
return AliasAnalysis::UnknownSize;
Align = AI->getAlignment();
} else if (const CallInst* CI = extractMallocCall(V)) {
if (!isArrayMalloc(V, &TD))
if (!RoundToAlign && !isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return C->getZExtValue();
return AliasAnalysis::UnknownSize;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr())
if (A->hasByValAttr()) {
AccessTy = cast<PointerType>(A->getType())->getElementType();
else
Align = A->getParamAlignment();
} else {
return AliasAnalysis::UnknownSize;
}
} else {
return AliasAnalysis::UnknownSize;
}
if (AccessTy->isSized())
return TD.getTypeAllocSize(AccessTy);
return AliasAnalysis::UnknownSize;
if (!AccessTy->isSized())
return AliasAnalysis::UnknownSize;
uint64_t Size = TD.getTypeAllocSize(AccessTy);
if (RoundToAlign) {
if (!Align)
return AliasAnalysis::UnknownSize;
Size = RoundUpToAlignment(Size, Align);
}
return Size;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const TargetData &TD) {
uint64_t ObjectSize = getObjectSize(V, TD);
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}

View File

@ -0,0 +1,25 @@
; RUN: opt < %s -basicaa -dse -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
%struct.S0 = type <{ i8, [4 x i8] }>
@a = global { i8, i8, i8, i8, i8 } { i8 undef, i8 0, i8 0, i8 0, i8 0 }, align 8
define i32 @main() nounwind uwtable ssp {
entry:
%tmp = load i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp1 = or i8 %tmp, -128
store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp2 = load i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8
store i8 11, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp3 = trunc i64 %tmp2 to i32
ret i32 %tmp3
; Make sure we don't delete either store here
; CHECK: @main
; CHECK: store i8 %tmp1
; CHECK: store i8 11
}

View File

@ -4,7 +4,7 @@
; RUN: opt < %s -basicaa -gvn -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
@B = global i16 8
@B = global i16 8, align 2
; CHECK: @test1
define i16 @test1(i32* %P) {