mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 04:30:23 +00:00
Revert r176408 and r176407 to address PR15540.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179111 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b976e407dc
commit
8e4df489d0
@ -146,14 +146,6 @@ static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
|
||||
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI, bool RoundToAlign = false);
|
||||
|
||||
/// \brief Compute the size of the underlying object pointed by Ptr. Returns
|
||||
/// true and the object size in Size if successful, and false otherwise.
|
||||
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
|
||||
/// byval arguments, and global variables.
|
||||
bool getUnderlyingObjectSize(const Value *Ptr, uint64_t &Size,
|
||||
const DataLayout *TD, const TargetLibraryInfo *TLI,
|
||||
bool RoundToAlign = false);
|
||||
|
||||
|
||||
|
||||
typedef std::pair<APInt, APInt> SizeOffsetType;
|
||||
@ -163,14 +155,12 @@ typedef std::pair<APInt, APInt> SizeOffsetType;
|
||||
class ObjectSizeOffsetVisitor
|
||||
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
|
||||
|
||||
typedef DenseMap<const Value*, SizeOffsetType> CacheMapTy;
|
||||
|
||||
const DataLayout *TD;
|
||||
const TargetLibraryInfo *TLI;
|
||||
bool RoundToAlign;
|
||||
unsigned IntTyBits;
|
||||
APInt Zero;
|
||||
CacheMapTy CacheMap;
|
||||
SmallPtrSet<Instruction *, 8> SeenInsts;
|
||||
|
||||
APInt align(APInt Size, uint64_t Align);
|
||||
|
||||
|
@ -88,7 +88,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
|
||||
const TargetLibraryInfo &TLI,
|
||||
bool RoundToAlign = false) {
|
||||
uint64_t Size;
|
||||
if (getUnderlyingObjectSize(V, Size, &TD, &TLI, RoundToAlign))
|
||||
if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
|
||||
return Size;
|
||||
return AliasAnalysis::UnknownSize;
|
||||
}
|
||||
@ -98,6 +98,35 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
|
||||
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
|
||||
const DataLayout &TD,
|
||||
const TargetLibraryInfo &TLI) {
|
||||
// Note that the meanings of the "object" are slightly different in the
|
||||
// following contexts:
|
||||
// c1: llvm::getObjectSize()
|
||||
// c2: llvm.objectsize() intrinsic
|
||||
// c3: isObjectSmallerThan()
|
||||
// c1 and c2 share the same meaning; however, the meaning of "object" in c3
|
||||
// refers to the "entire object".
|
||||
//
|
||||
// Consider this example:
|
||||
// char *p = (char*)malloc(100)
|
||||
// char *q = p+80;
|
||||
//
|
||||
// In the context of c1 and c2, the "object" pointed by q refers to the
|
||||
// stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
|
||||
//
|
||||
// However, in the context of c3, the "object" refers to the chunk of memory
|
||||
// being allocated. So, the "object" has 100 bytes, and q points to the middle
|
||||
// the "object". In case q is passed to isObjectSmallerThan() as the 1st
|
||||
// parameter, before the llvm::getObjectSize() is called to get the size of
|
||||
// entire object, we should:
|
||||
// - either rewind the pointer q to the base-address of the object in
|
||||
// question (in this case rewind to p), or
|
||||
// - just give up. It is up to caller to make sure the pointer is pointing
|
||||
// to the base address the object.
|
||||
//
|
||||
// We go for 2nd option for simplicity.
|
||||
if (!isIdentifiedObject(V))
|
||||
return false;
|
||||
|
||||
// This function needs to use the aligned object size because we allow
|
||||
// reads a bit past the end given sufficient alignment.
|
||||
uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
|
||||
|
@ -364,26 +364,6 @@ bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Compute the size of the underlying object pointed by Ptr. Returns
|
||||
/// true and the object size in Size if successful, and false otherwise.
|
||||
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
|
||||
/// byval arguments, and global variables.
|
||||
bool llvm::getUnderlyingObjectSize(const Value *Ptr, uint64_t &Size,
|
||||
const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI,
|
||||
bool RoundToAlign) {
|
||||
if (!TD)
|
||||
return false;
|
||||
|
||||
ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign);
|
||||
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
|
||||
if (!Visitor.knownSize(Data))
|
||||
return false;
|
||||
|
||||
Size = Data.first.getZExtValue();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
STATISTIC(ObjectVisitorArgument,
|
||||
"Number of arguments with unsolved size and offset");
|
||||
@ -409,23 +389,16 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
|
||||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
|
||||
V = V->stripPointerCasts();
|
||||
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
||||
// If we have already seen this instruction, bail out. Cycles can happen in
|
||||
// unreachable code after constant propagation.
|
||||
if (!SeenInsts.insert(I))
|
||||
return unknown();
|
||||
|
||||
if (isa<Instruction>(V) || isa<GEPOperator>(V)) {
|
||||
// Return cached value or insert unknown in cache if size of V was not
|
||||
// computed yet in order to avoid recursions in PHis.
|
||||
std::pair<CacheMapTy::iterator, bool> CacheVal =
|
||||
CacheMap.insert(std::make_pair(V, unknown()));
|
||||
if (!CacheVal.second)
|
||||
return CacheVal.first->second;
|
||||
|
||||
SizeOffsetType Result;
|
||||
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
|
||||
Result = visitGEPOperator(*GEP);
|
||||
else
|
||||
Result = visit(cast<Instruction>(*V));
|
||||
return CacheMap[V] = Result;
|
||||
return visitGEPOperator(*GEP);
|
||||
return visit(*I);
|
||||
}
|
||||
|
||||
if (Argument *A = dyn_cast<Argument>(V))
|
||||
return visitArgument(*A);
|
||||
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
|
||||
@ -439,6 +412,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
|
||||
if (CE->getOpcode() == Instruction::IntToPtr)
|
||||
return unknown(); // clueless
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
return visitGEPOperator(cast<GEPOperator>(*CE));
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
|
||||
@ -572,21 +547,9 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) {
|
||||
return unknown();
|
||||
}
|
||||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PHI) {
|
||||
if (PHI.getNumIncomingValues() == 0)
|
||||
return unknown();
|
||||
|
||||
SizeOffsetType Ret = compute(PHI.getIncomingValue(0));
|
||||
if (!bothKnown(Ret))
|
||||
return unknown();
|
||||
|
||||
// Verify that all PHI incoming pointers have the same size and offset.
|
||||
for (unsigned i = 1, e = PHI.getNumIncomingValues(); i != e; ++i) {
|
||||
SizeOffsetType EdgeData = compute(PHI.getIncomingValue(i));
|
||||
if (!bothKnown(EdgeData) || EdgeData != Ret)
|
||||
return unknown();
|
||||
}
|
||||
return Ret;
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
|
||||
// too complex to analyze statically.
|
||||
return unknown();
|
||||
}
|
||||
|
||||
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
|
||||
|
@ -256,131 +256,3 @@ xpto:
|
||||
return:
|
||||
ret i32 7
|
||||
}
|
||||
|
||||
declare noalias i8* @valloc(i32) nounwind
|
||||
|
||||
; CHECK: @test14
|
||||
; CHECK: ret i32 6
|
||||
define i32 @test14(i32 %a) nounwind {
|
||||
switch i32 %a, label %sw.default [
|
||||
i32 1, label %sw.bb
|
||||
i32 2, label %sw.bb1
|
||||
]
|
||||
|
||||
sw.bb:
|
||||
%call = tail call noalias i8* @malloc(i32 6) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.bb1:
|
||||
%call2 = tail call noalias i8* @calloc(i32 3, i32 2) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.default:
|
||||
%call3 = tail call noalias i8* @valloc(i32 6) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.epilog:
|
||||
%b.0 = phi i8* [ %call3, %sw.default ], [ %call2, %sw.bb1 ], [ %call, %sw.bb ]
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %b.0, i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
; CHECK: @test15
|
||||
; CHECK: llvm.objectsize
|
||||
define i32 @test15(i32 %a) nounwind {
|
||||
switch i32 %a, label %sw.default [
|
||||
i32 1, label %sw.bb
|
||||
i32 2, label %sw.bb1
|
||||
]
|
||||
|
||||
sw.bb:
|
||||
%call = tail call noalias i8* @malloc(i32 3) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.bb1:
|
||||
%call2 = tail call noalias i8* @calloc(i32 2, i32 1) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.default:
|
||||
%call3 = tail call noalias i8* @valloc(i32 3) nounwind
|
||||
br label %sw.epilog
|
||||
|
||||
sw.epilog:
|
||||
%b.0 = phi i8* [ %call3, %sw.default ], [ %call2, %sw.bb1 ], [ %call, %sw.bb ]
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %b.0, i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
; CHECK: @test16
|
||||
; CHECK: llvm.objectsize
|
||||
define i32 @test16(i8* %a, i32 %n) nounwind {
|
||||
%b = alloca [5 x i8], align 1
|
||||
%c = alloca [5 x i8], align 1
|
||||
switch i32 %n, label %sw.default [
|
||||
i32 1, label %sw.bb
|
||||
i32 2, label %sw.bb1
|
||||
]
|
||||
|
||||
sw.bb:
|
||||
%bp = bitcast [5 x i8]* %b to i8*
|
||||
br label %sw.epilog
|
||||
|
||||
sw.bb1:
|
||||
%cp = bitcast [5 x i8]* %c to i8*
|
||||
br label %sw.epilog
|
||||
|
||||
sw.default:
|
||||
br label %sw.epilog
|
||||
|
||||
sw.epilog:
|
||||
%phi = phi i8* [ %a, %sw.default ], [ %cp, %sw.bb1 ], [ %bp, %sw.bb ]
|
||||
%sz = call i32 @llvm.objectsize.i32(i8* %phi, i1 false)
|
||||
ret i32 %sz
|
||||
}
|
||||
|
||||
; CHECK: @test17
|
||||
; CHECK: ret i32 5
|
||||
define i32 @test17(i32 %n) nounwind {
|
||||
%b = alloca [5 x i8], align 1
|
||||
%c = alloca [5 x i8], align 1
|
||||
%bp = bitcast [5 x i8]* %b to i8*
|
||||
switch i32 %n, label %sw.default [
|
||||
i32 1, label %sw.bb
|
||||
i32 2, label %sw.bb1
|
||||
]
|
||||
|
||||
sw.bb:
|
||||
br label %sw.epilog
|
||||
|
||||
sw.bb1:
|
||||
%cp = bitcast [5 x i8]* %c to i8*
|
||||
br label %sw.epilog
|
||||
|
||||
sw.default:
|
||||
br label %sw.epilog
|
||||
|
||||
sw.epilog:
|
||||
%phi = phi i8* [ %bp, %sw.default ], [ %cp, %sw.bb1 ], [ %bp, %sw.bb ]
|
||||
%sz = call i32 @llvm.objectsize.i32(i8* %phi, i1 false)
|
||||
ret i32 %sz
|
||||
}
|
||||
|
||||
@globalalias = alias internal [60 x i8]* @a
|
||||
|
||||
; CHECK: @test18
|
||||
; CHECK-NEXT: ret i32 60
|
||||
define i32 @test18() {
|
||||
%bc = bitcast [60 x i8]* @globalalias to i8*
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* %bc, i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
@globalalias2 = alias weak [60 x i8]* @a
|
||||
|
||||
; CHECK: @test19
|
||||
; CHECK: llvm.objectsize
|
||||
define i32 @test19() {
|
||||
%bc = bitcast [60 x i8]* @globalalias2 to i8*
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* %bc, i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user