refactor the MemoryBuiltin analysis:

- provide more extensive set of functions to detect library allocation functions (e.g., malloc, calloc, strdup, etc)
 - provide an API to compute the size and offset of an object pointed by

Move a few clients (GVN, AA, instcombine, ...) to the new API.
This implementation is a lot more aggressive than each of the custom implementations being replaced.

Patch reviewed by Nick Lewycky and Chandler Carruth, thanks.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@158919 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nuno Lopes 2012-06-21 15:45:28 +00:00
parent 2114a8aaba
commit 9e72a79ef4
11 changed files with 716 additions and 289 deletions

View File

@ -15,6 +15,14 @@
#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
#define LLVM_ANALYSIS_MEMORYBUILTINS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Operator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/TargetFolder.h"
namespace llvm {
class CallInst;
class PointerType;
@ -22,24 +30,50 @@ class TargetData;
class Type;
class Value;
/// \brief Tests if a value is a call to a library function that allocates or
/// reallocates memory (either malloc, calloc, realloc, or strdup like).
bool isAllocationFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call to a function that returns a NoAlias
/// pointer (including malloc/calloc/strdup-like functions).
bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call to a library function that allocates
/// uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call to a library function that allocates
/// zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call to a library function that allocates
/// memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call to a library function that reallocates
/// memory (such as realloc).
bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
/// isMalloc - Returns true if the value is either a malloc call or a bitcast of
/// the result of a malloc call
bool isMalloc(const Value *I);
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
const CallInst *extractMallocCall(const Value *I);
CallInst *extractMallocCall(Value *I);
static inline CallInst *extractMallocCall(Value *I) {
return const_cast<CallInst*>(extractMallocCall((const Value*)I));
}
/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
/// instruction is a bitcast of the result of a malloc call.
const CallInst *extractMallocCallFromBitCast(const Value *I);
CallInst *extractMallocCallFromBitCast(Value *I);
static inline CallInst *extractMallocCallFromBitCast(Value *I) {
return const_cast<CallInst*>(extractMallocCallFromBitCast((const Value*)I));
}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
@ -67,7 +101,7 @@ Type *getMallocAllocatedType(const CallInst *CI);
/// determined.
Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
bool LookThroughSExt = false);
//===----------------------------------------------------------------------===//
// calloc Call Utility Functions.
@ -76,7 +110,9 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
const CallInst *extractCallocCall(const Value *I);
CallInst *extractCallocCall(Value *I);
static inline CallInst *extractCallocCall(Value *I) {
return const_cast<CallInst*>(extractCallocCall((const Value*)I));
}
//===----------------------------------------------------------------------===//
@ -90,6 +126,126 @@ static inline CallInst *isFreeCall(Value *I) {
return const_cast<CallInst*>(isFreeCall((const Value*)I));
}
//===----------------------------------------------------------------------===//
// Utility functions to compute size of objects.
//
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
bool RoundToAlign = false);
typedef std::pair<APInt, APInt> SizeOffsetType;
/// \brief Evaluate the size and offset of an object ponted by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const TargetData *TD;
bool RoundToAlign;
unsigned IntTyBits;
APInt Zero;
APInt align(APInt Size, uint64_t Align);
SizeOffsetType unknown() {
return std::make_pair(APInt(), APInt());
}
public:
ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context,
bool RoundToAlign = false);
SizeOffsetType compute(Value *V);
bool knownSize(SizeOffsetType &SizeOffset) {
return SizeOffset.first.getBitWidth() > 1;
}
bool knownOffset(SizeOffsetType &SizeOffset) {
return SizeOffset.second.getBitWidth() > 1;
}
bool bothKnown(SizeOffsetType &SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
SizeOffsetType visitAllocaInst(AllocaInst &I);
SizeOffsetType visitArgument(Argument &A);
SizeOffsetType visitCallSite(CallSite CS);
SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetType visitGEPOperator(GEPOperator &GEP);
SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetType visitLoadInst(LoadInst &I);
SizeOffsetType visitPHINode(PHINode&);
SizeOffsetType visitSelectInst(SelectInst &I);
SizeOffsetType visitUndefValue(UndefValue&);
SizeOffsetType visitInstruction(Instruction &I);
};
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
typedef IRBuilder<true, TargetFolder> BuilderTy;
typedef DenseMap<const Value*, SizeOffsetEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
/// \brief Evaluate the size and offset of an object ponted by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
const TargetData *TD;
LLVMContext &Context;
BuilderTy Builder;
ObjectSizeOffsetVisitor Visitor;
IntegerType *IntTy;
Value *Zero;
CacheMapTy CacheMap;
PtrSetTy SeenVals;
SizeOffsetEvalType unknown() {
return std::make_pair((Value*)0, (Value*)0);
}
SizeOffsetEvalType compute_(Value *V);
public:
ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context);
SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType &SizeOffset) {
return SizeOffset.first;
}
bool knownOffset(SizeOffsetEvalType &SizeOffset) {
return SizeOffset.second;
}
bool anyKnown(SizeOffsetEvalType &SizeOffset) {
return knownSize(SizeOffset) || knownOffset(SizeOffset);
}
bool bothKnown(SizeOffsetEvalType &SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
SizeOffsetEvalType visitCallSite(CallSite CS);
SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetEvalType visitLoadInst(LoadInst &I);
SizeOffsetEvalType visitPHINode(PHINode &PHI);
SizeOffsetEvalType visitSelectInst(SelectInst &I);
SizeOffsetEvalType visitInstruction(Instruction &I);
};
} // End llvm namespace
#endif

View File

@ -86,47 +86,10 @@ static bool isEscapeSource(const Value *V) {
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD,
bool RoundToAlign = false) {
Type *AccessTy;
unsigned Align;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
AccessTy = GV->getType()->getElementType();
Align = GV->getAlignment();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
if (!AI->isArrayAllocation())
AccessTy = AI->getType()->getElementType();
else
return AliasAnalysis::UnknownSize;
Align = AI->getAlignment();
} else if (const CallInst* CI = extractMallocCall(V)) {
if (!RoundToAlign && !isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return C->getZExtValue();
return AliasAnalysis::UnknownSize;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr()) {
AccessTy = cast<PointerType>(A->getType())->getElementType();
Align = A->getParamAlignment();
} else {
return AliasAnalysis::UnknownSize;
}
} else {
return AliasAnalysis::UnknownSize;
}
if (!AccessTy->isSized())
return AliasAnalysis::UnknownSize;
uint64_t Size = TD.getTypeAllocSize(AccessTy);
// If there is an explicitly specified alignment, and we need to
// take alignment into account, round up the size. (If the alignment
// is implicit, getTypeAllocSize is sufficient.)
if (RoundToAlign && Align)
Size = RoundUpToAlignment(Size, Align);
return Size;
uint64_t Size;
if (getObjectSize(V, Size, &TD, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified

View File

@ -329,15 +329,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
if (isMalloc(Ptr)) {
// Okay, easy case.
} else if (CallInst *CI = dyn_cast<CallInst>(Ptr)) {
Function *F = CI->getCalledFunction();
if (!F || !F->isDeclaration()) return false; // Too hard to analyze.
if (F->getName() != "calloc") return false; // Not calloc.
} else {
if (!isAllocLikeFn(Ptr))
return false; // Too hard to analyze.
}
// Analyze all uses of the allocation. If any of them are used in a
// non-simple way (e.g. stored to another global) bail out.
@ -454,19 +447,18 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
for (inst_iterator II = inst_begin(SCC[i]->getFunction()),
E = inst_end(SCC[i]->getFunction());
II != E && FunctionEffect != ModRef; ++II)
if (isa<LoadInst>(*II)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&*II)) {
FunctionEffect |= Ref;
if (cast<LoadInst>(*II).isVolatile())
if (LI->isVolatile())
// Volatile loads may have side-effects, so mark them as writing
// memory (for example, a flag inside the processor).
FunctionEffect |= Mod;
} else if (isa<StoreInst>(*II)) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(&*II)) {
FunctionEffect |= Mod;
if (cast<StoreInst>(*II).isVolatile())
if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
} else if (isMalloc(&cast<Instruction>(*II)) ||
isFreeCall(&cast<Instruction>(*II))) {
} else if (isAllocationFn(&*II) || isFreeCall(&*II)) {
FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls.

View File

@ -12,80 +12,165 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "memory-builtins"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Constants.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/Metadata.h"
#include "llvm/Module.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
enum AllocType {
MallocLike = 1<<0, // allocates
CallocLike = 1<<1, // allocates + bzero
ReallocLike = 1<<2, // reallocates
StrDupLike = 1<<3,
AllocLike = MallocLike | CallocLike | StrDupLike,
AnyAlloc = MallocLike | CallocLike | ReallocLike | StrDupLike
};
/// isMalloc - Returns true if the value is either a malloc call or a
/// bitcast of the result of a malloc call.
bool llvm::isMalloc(const Value *I) {
return extractMallocCall(I) || extractMallocCallFromBitCast(I);
}
struct AllocFnsTy {
const char *Name;
AllocType AllocTy;
unsigned char NumParams;
// First and Second size parameters (or -1 if unused)
unsigned char FstParam, SndParam;
};
static bool isMallocCall(const CallInst *CI) {
static const AllocFnsTy AllocationFnData[] = {
{"malloc", MallocLike, 1, 0, -1},
{"valloc", MallocLike, 1, 0, -1},
{"_Znwj", MallocLike, 1, 0, -1}, // operator new(unsigned int)
{"_Znwm", MallocLike, 1, 0, -1}, // operator new(unsigned long)
{"_Znaj", MallocLike, 1, 0, -1}, // operator new[](unsigned int)
{"_Znam", MallocLike, 1, 0, -1}, // operator new[](unsigned long)
{"posix_memalign", MallocLike, 3, 2, -1},
{"calloc", CallocLike, 2, 0, 1},
{"realloc", ReallocLike, 2, 1, -1},
{"reallocf", ReallocLike, 2, 1, -1},
{"strdup", StrDupLike, 1, -1, -1},
{"strndup", StrDupLike, 2, -1, -1}
};
static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
if (LookThroughBitCast)
V = V->stripPointerCasts();
const CallInst *CI = dyn_cast<CallInst>(V);
if (!CI)
return false;
return 0;
Function *Callee = CI->getCalledFunction();
if (Callee == 0 || !Callee->isDeclaration())
return false;
if (Callee->getName() != "malloc" &&
Callee->getName() != "_Znwj" && // operator new(unsigned int)
Callee->getName() != "_Znwm" && // operator new(unsigned long)
Callee->getName() != "_Znaj" && // operator new[](unsigned int)
Callee->getName() != "_Znam") // operator new[](unsigned long)
return false;
if (!Callee || !Callee->isDeclaration())
return 0;
return Callee;
}
// Check malloc prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
/// \brief Returns the allocation data for the given value if it is a call to a
/// known allocation function, and NULL otherwise.
static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
bool LookThroughBitCast = false) {
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
return 0;
unsigned i = 0;
bool found = false;
for ( ; i < array_lengthof(AllocationFnData); ++i) {
if (Callee->getName() == AllocationFnData[i].Name) {
found = true;
break;
}
}
if (!found)
return 0;
const AllocFnsTy *FnData = &AllocationFnData[i];
if ((FnData->AllocTy & AllocTy) == 0)
return 0;
// Check function prototype.
// FIXME: Check the nobuiltin metadata?? (PR5130)
unsigned FstParam = FnData->FstParam;
unsigned SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType();
return FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
FTy->getNumParams() == 1 &&
(FTy->getParamType(0)->isIntegerTy(32) ||
FTy->getParamType(0)->isIntegerTy(64));
if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
FTy->getNumParams() == FnData->NumParams &&
(FstParam == (unsigned char)-1 ||
(FTy->getParamType(FstParam)->isIntegerTy(32) ||
FTy->getParamType(FstParam)->isIntegerTy(64))) &&
(SndParam == (unsigned char)-1 ||
FTy->getParamType(SndParam)->isIntegerTy(32) ||
FTy->getParamType(SndParam)->isIntegerTy(64)))
return FnData;
return 0;
}
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
Function *Callee = getCalledFunction(V, LookThroughBitCast);
return Callee && Callee->hasFnAttr(Attribute::NoAlias);
}
/// \brief Tests if a value is a call to a library function that allocates or
/// reallocates memory (either malloc, calloc, realloc, or strdup like).
bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) {
return getAllocationData(V, AnyAlloc, LookThroughBitCast);
}
/// \brief Tests if a value is a call to a function that returns a NoAlias
/// pointer (including malloc/calloc/strdup-like functions).
bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) {
return isAllocLikeFn(V, LookThroughBitCast) ||
hasNoAliasAttr(V, LookThroughBitCast);
}
/// \brief Tests if a value is a call to a library function that allocates
/// uninitialized memory (such as malloc).
bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) {
return getAllocationData(V, MallocLike, LookThroughBitCast);
}
/// \brief Tests if a value is a call to a library function that allocates
/// zero-filled memory (such as calloc).
bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) {
return getAllocationData(V, CallocLike, LookThroughBitCast);
}
/// \brief Tests if a value is a call to a library function that allocates
/// memory (either malloc, calloc, or strdup like).
bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) {
return getAllocationData(V, AllocLike, LookThroughBitCast);
}
/// \brief Tests if a value is a call to a library function that reallocates
/// memory (such as realloc).
bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) {
return getAllocationData(V, ReallocLike, LookThroughBitCast);
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
const CallInst *llvm::extractMallocCall(const Value *I) {
const CallInst *CI = dyn_cast<CallInst>(I);
return (isMallocCall(CI)) ? CI : NULL;
}
CallInst *llvm::extractMallocCall(Value *I) {
CallInst *CI = dyn_cast<CallInst>(I);
return (isMallocCall(CI)) ? CI : NULL;
}
static bool isBitCastOfMallocCall(const BitCastInst *BCI) {
if (!BCI)
return false;
return isMallocCall(dyn_cast<CallInst>(BCI->getOperand(0)));
return isMallocLikeFn(I) ? cast<CallInst>(I) : 0;
}
/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
/// instruction is a bitcast of the result of a malloc call.
CallInst *llvm::extractMallocCallFromBitCast(Value *I) {
BitCastInst *BCI = dyn_cast<BitCastInst>(I);
return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0))
: NULL;
}
const CallInst *llvm::extractMallocCallFromBitCast(const Value *I) {
const BitCastInst *BCI = dyn_cast<BitCastInst>(I);
return (isBitCastOfMallocCall(BCI)) ? cast<CallInst>(BCI->getOperand(0))
: NULL;
return BCI ? extractMallocCall(BCI->getOperand(0)) : 0;
}
static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
@ -134,7 +219,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
PointerType *llvm::getMallocType(const CallInst *CI) {
assert(isMalloc(CI) && "getMallocType and not malloc call");
assert(isMallocLikeFn(CI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
@ -176,53 +261,17 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) {
/// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
bool LookThroughSExt) {
assert(isMalloc(CI) && "getMallocArraySize and not malloc call");
assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call");
return computeArraySize(CI, TD, LookThroughSExt);
}
//===----------------------------------------------------------------------===//
// calloc Call Utility Functions.
//
static bool isCallocCall(const CallInst *CI) {
if (!CI)
return false;
Function *Callee = CI->getCalledFunction();
if (Callee == 0 || !Callee->isDeclaration())
return false;
if (Callee->getName() != "calloc")
return false;
// Check malloc prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute exists.
FunctionType *FTy = Callee->getFunctionType();
return FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
FTy->getNumParams() == 2 &&
((FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isIntegerTy(32)) ||
(FTy->getParamType(0)->isIntegerTy(64) &&
FTy->getParamType(1)->isIntegerTy(64)));
}
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
const CallInst *llvm::extractCallocCall(const Value *I) {
const CallInst *CI = dyn_cast<CallInst>(I);
return isCallocCall(CI) ? CI : 0;
return isCallocLikeFn(I) ? cast<CallInst>(I) : 0;
}
CallInst *llvm::extractCallocCall(Value *I) {
CallInst *CI = dyn_cast<CallInst>(I);
return isCallocCall(CI) ? CI : 0;
}
//===----------------------------------------------------------------------===//
// free Call Utility Functions.
//
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *llvm::isFreeCall(const Value *I) {
@ -251,3 +300,388 @@ const CallInst *llvm::isFreeCall(const Value *I) {
return CI;
}
//===----------------------------------------------------------------------===//
// Utility functions to compute size of objects.
//
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
bool RoundToAlign) {
if (!TD)
return false;
ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
return false;
APInt ObjSize = Data.first, Offset = Data.second;
// check for overflow
if (Offset.slt(0) || ObjSize.ult(Offset))
Size = 0;
else
Size = (ObjSize - Offset).getZExtValue();
return true;
}
STATISTIC(ObjectVisitorArgument,
"Number of arguments with unsolved size and offset");
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
if (RoundToAlign && Align)
return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align));
return Size;
}
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
LLVMContext &Context,
bool RoundToAlign)
: TD(TD), RoundToAlign(RoundToAlign) {
IntegerType *IntTy = TD->getIntPtrType(Context);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
}
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
V = V->stripPointerCasts();
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
return visitGEPOperator(*GEP);
if (Instruction *I = dyn_cast<Instruction>(V))
return visit(*I);
if (Argument *A = dyn_cast<Argument>(V))
return visitArgument(*A);
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
return visitConstantPointerNull(*P);
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return visitGlobalVariable(*GV);
if (UndefValue *UV = dyn_cast<UndefValue>(V))
return visitUndefValue(*UV);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::IntToPtr)
return unknown(); // clueless
DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
<< '\n');
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
APInt Size(IntTyBits, TD->getTypeAllocSize(I.getAllocatedType()));
if (!I.isArrayAllocation())
return std::make_pair(align(Size, I.getAlignment()), Zero);
Value *ArraySize = I.getArraySize();
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
Size *= C->getValue().zextOrSelf(IntTyBits);
return std::make_pair(align(Size, I.getAlignment()), Zero);
}
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
// no interprocedural analysis is done at the moment
if (!A.hasByValAttr()) {
++ObjectVisitorArgument;
return unknown();
}
PointerType *PT = cast<PointerType>(A.getType());
APInt Size(IntTyBits, TD->getTypeAllocSize(PT->getElementType()));
return std::make_pair(align(Size, A.getParamAlignment()), Zero);
}
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
if (!FnData)
return unknown();
// handle strdup-like functions separately
if (FnData->AllocTy == StrDupLike) {
// TODO
return unknown();
}
ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
if (!Arg)
return unknown();
APInt Size = Arg->getValue();
// size determined by just 1 parameter
if (FnData->SndParam == (unsigned char)-1)
return std::make_pair(Size, Zero);
Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam));
if (!Arg)
return unknown();
Size *= Arg->getValue();
return std::make_pair(Size, Zero);
// TODO: handle more standard functions (+ wchar cousins):
// - strdup / strndup
// - strcpy / strncpy
// - strcat / strncat
// - memcpy / memmove
// - strcat / strncat
// - memset
}
SizeOffsetType
ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull&) {
return std::make_pair(Zero, Zero);
}
SizeOffsetType
ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
// Easy cases were already folded by previous passes.
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices())
return unknown();
SmallVector<Value*, 8> Ops(GEP.idx_begin(), GEP.idx_end());
APInt Offset(IntTyBits,TD->getIndexedOffset(GEP.getPointerOperandType(),Ops));
return std::make_pair(PtrData.first, PtrData.second + Offset);
}
SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){
if (!GV.hasDefinitiveInitializer())
return unknown();
APInt Size(IntTyBits, TD->getTypeAllocSize(GV.getType()->getElementType()));
return std::make_pair(align(Size, GV.getAlignment()), Zero);
}
SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) {
// clueless
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) {
++ObjectVisitorLoad;
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
// too complex to analyze statically.
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
SizeOffsetType TrueSide = compute(I.getTrueValue());
SizeOffsetType FalseSide = compute(I.getFalseValue());
if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide)
return TrueSide;
return unknown();
}
SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) {
return std::make_pair(Zero, Zero);
}
SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I << '\n');
return unknown();
}
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
LLVMContext &Context)
: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)),
Visitor(TD, Context) {
IntTy = TD->getIntPtrType(Context);
Zero = ConstantInt::get(IntTy, 0);
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
SizeOffsetEvalType Result = compute_(V);
if (!bothKnown(Result)) {
// erase everything that was computed in this iteration from the cache, so
// that no dangling references are left behind. We could be a bit smarter if
// we kept a dependency graph. It's probably not worth the complexity.
for (PtrSetTy::iterator I=SeenVals.begin(), E=SeenVals.end(); I != E; ++I) {
CacheMapTy::iterator CacheIt = CacheMap.find(*I);
// non-computable results can be safely cached
if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second))
CacheMap.erase(CacheIt);
}
}
SeenVals.clear();
return Result;
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
SizeOffsetType Const = Visitor.compute(V);
if (Visitor.bothKnown(Const))
return std::make_pair(ConstantInt::get(Context, Const.first),
ConstantInt::get(Context, Const.second));
V = V->stripPointerCasts();
// check cache
CacheMapTy::iterator CacheIt = CacheMap.find(V);
if (CacheIt != CacheMap.end())
return CacheIt->second;
// always generate code immediately before the instruction being
// processed, so that the generated code dominates the same BBs
Instruction *PrevInsertPoint = Builder.GetInsertPoint();
if (Instruction *I = dyn_cast<Instruction>(V))
Builder.SetInsertPoint(I);
// record the pointers that were handled in this run, so that they can be
// cleaned later if something fails
SeenVals.insert(V);
// now compute the size and offset
SizeOffsetEvalType Result;
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
Result = visitGEPOperator(*GEP);
} else if (Instruction *I = dyn_cast<Instruction>(V)) {
Result = visit(*I);
} else if (isa<Argument>(V) ||
(isa<ConstantExpr>(V) &&
cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) ||
isa<GlobalVariable>(V)) {
// ignore values where we cannot do more than what ObjectSizeVisitor can
Result = unknown();
} else {
DEBUG(dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: "
<< *V << '\n');
Result = unknown();
}
if (PrevInsertPoint)
Builder.SetInsertPoint(PrevInsertPoint);
// Don't reuse CacheIt since it may be invalid at this point.
CacheMap[V] = Result;
return Result;
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
// must be a VLA
assert(I.isArrayAllocation());
Value *ArraySize = I.getArraySize();
Value *Size = ConstantInt::get(ArraySize->getType(),
TD->getTypeAllocSize(I.getAllocatedType()));
Size = Builder.CreateMul(Size, ArraySize);
return std::make_pair(Size, Zero);
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
if (!FnData)
return unknown();
// handle strdup-like functions separately
if (FnData->AllocTy == StrDupLike) {
// TODO
return unknown();
}
Value *FirstArg = CS.getArgument(FnData->FstParam);
if (FnData->SndParam == (unsigned char)-1)
return std::make_pair(FirstArg, Zero);
Value *SecondArg = CS.getArgument(FnData->SndParam);
Value *Size = Builder.CreateMul(FirstArg, SecondArg);
return std::make_pair(Size, Zero);
// TODO: handle more standard functions (+ wchar cousins):
// - strdup / strndup
// - strcpy / strncpy
// - strcat / strncat
// - memcpy / memmove
// - strcat / strncat
// - memset
}
SizeOffsetEvalType
ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand());
if (!bothKnown(PtrData))
return unknown();
Value *Offset = EmitGEPOffset(&Builder, *TD, &GEP);
Offset = Builder.CreateAdd(PtrData.second, Offset);
return std::make_pair(PtrData.first, Offset);
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) {
// clueless
return unknown();
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) {
return unknown();
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
// create 2 PHIs: one for size and another for offset
PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
// insert right away in the cache to handle recursive PHIs
CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI);
// compute offset/size for each PHI incoming pointer
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt());
SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
if (!bothKnown(EdgeData)) {
OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy));
OffsetPHI->eraseFromParent();
SizePHI->replaceAllUsesWith(UndefValue::get(IntTy));
SizePHI->eraseFromParent();
return unknown();
}
SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i));
OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i));
}
return std::make_pair(SizePHI, OffsetPHI);
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
if (!bothKnown(TrueSide) || !bothKnown(FalseSide))
return unknown();
if (TrueSide == FalseSide)
return TrueSide;
Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first,
FalseSide.first);
Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second,
FalseSide.second);
return std::make_pair(Size, Offset);
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) {
DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I <<'\n');
return unknown();
}

View File

@ -474,8 +474,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
if (isa<AllocaInst>(Inst) ||
(isa<CallInst>(Inst) && extractMallocCall(Inst))) {
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))

View File

@ -172,7 +172,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (isFreeCall(&CI))
return visitFree(CI);
if (extractMallocCall(&CI) || extractCallocCall(&CI))
if (isAllocLikeFn(&CI))
return visitMalloc(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
@ -246,84 +246,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
// We need target data for just about everything so depend on it.
if (!TD) return 0;
Type *ReturnTy = CI.getType();
uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
// Get to the real allocated thing and offset as fast as possible.
Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
uint64_t Offset = 0;
uint64_t Size = -1ULL;
// Try to look through constant GEPs.
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
if (!GEP->hasAllConstantIndices()) return 0;
// Get the current byte offset into the thing. Use the original
// operand in case we're looking through a bitcast.
SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
if (!GEP->getPointerOperandType()->isPointerTy())
return 0;
Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
Op1 = GEP->getPointerOperand()->stripPointerCasts();
// Make sure we're not a constant offset from an external
// global.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
if (!GV->hasDefinitiveInitializer()) return 0;
}
// If we've stripped down to a single global variable that we
// can know the size of then just return that.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
if (GV->hasDefinitiveInitializer()) {
Constant *C = GV->getInitializer();
Size = TD->getTypeAllocSize(C->getType());
} else {
// Can't determine size of the GV.
Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
return ReplaceInstUsesWith(CI, RetVal);
}
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
// Get alloca size.
if (AI->getAllocatedType()->isSized()) {
Size = TD->getTypeAllocSize(AI->getAllocatedType());
if (AI->isArrayAllocation()) {
const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
if (!C) return 0;
Size *= C->getZExtValue();
}
}
} else if (CallInst *MI = extractMallocCall(Op1)) {
// Get allocation size.
Value *Arg = MI->getArgOperand(0);
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
Size = CI->getZExtValue();
} else if (CallInst *MI = extractCallocCall(Op1)) {
// Get allocation size.
Value *Arg1 = MI->getArgOperand(0);
Value *Arg2 = MI->getArgOperand(1);
if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1))
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2))
Size = (CI1->getValue() * CI2->getValue()).getZExtValue();
}
// Do not return "I don't know" here. Later optimization passes could
// make it possible to evaluate objectsize to a constant.
if (Size == -1ULL)
return 0;
if (Size < Offset) {
// Out of bound reference? Negative index normalized to large
// index? Just return "I don't know".
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
}
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
uint64_t Size;
if (getObjectSize(II->getArgOperand(0), Size, TD))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
return 0;
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
@ -768,7 +694,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
TerminatorInst *TI = II->getParent()->getTerminator();
bool CannotRemove = false;
for (++BI; &*BI != TI; ++BI) {
if (isa<AllocaInst>(BI) || isMalloc(BI)) {
if (isa<AllocaInst>(BI)) {
CannotRemove = true;
break;
}

View File

@ -1068,7 +1068,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
if (isa<AllocaInst>(BCI->getOperand(0)) ||
isMalloc(BCI->getOperand(0))) {
isAllocationFn(BCI->getOperand(0))) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) {

View File

@ -275,39 +275,9 @@ static Value *getStoredPointerOperand(Instruction *I) {
}
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
const TargetData *TD = AA.getTargetData();
if (const CallInst *CI = extractMallocCall(V)) {
if (const ConstantInt *C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return C->getZExtValue();
}
if (const CallInst *CI = extractCallocCall(V)) {
if (const ConstantInt *C1 = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
if (const ConstantInt *C2 = dyn_cast<ConstantInt>(CI->getArgOperand(1)))
return (C1->getValue() * C2->getValue()).getZExtValue();
}
if (TD == 0)
return AliasAnalysis::UnknownSize;
if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
// Get size information for the alloca
if (const ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
}
if (const Argument *A = dyn_cast<Argument>(V)) {
if (A->hasByValAttr())
if (PointerType *PT = dyn_cast<PointerType>(A->getType()))
return TD->getTypeAllocSize(PT->getElementType());
}
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->mayBeOverridden())
return TD->getTypeAllocSize(GV->getType()->getElementType());
}
uint64_t Size;
if (getObjectSize(V, Size, AA.getTargetData()))
return Size;
return AliasAnalysis::UnknownSize;
}
@ -705,16 +675,13 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Find all of the alloca'd pointers in the entry block.
BasicBlock *Entry = BB.getParent()->begin();
for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
DeadStackObjects.insert(AI);
if (isa<AllocaInst>(I))
DeadStackObjects.insert(I);
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
CallInst *CI = extractMallocCall(I);
if (!CI)
CI = extractCallocCall(I);
if (CI && !PointerMayBeCaptured(CI, true, true))
DeadStackObjects.insert(CI);
else if (isAllocLikeFn(I) && !PointerMayBeCaptured(I, true, true))
DeadStackObjects.insert(I);
}
// Treat byval arguments the same, stores to them are dead at the end of the
@ -773,18 +740,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
DeadStackObjects.remove(A);
continue;
}
if (CallInst *CI = extractMallocCall(BBI)) {
DeadStackObjects.remove(CI);
continue;
}
if (CallInst *CI = extractCallocCall(BBI)) {
DeadStackObjects.remove(CI);
if (isa<AllocaInst>(BBI) || isAllocLikeFn(BBI)) {
DeadStackObjects.remove(BBI);
continue;
}

View File

@ -1436,7 +1436,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst) ||
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
@ -1951,7 +1951,7 @@ bool GVN::processLoad(LoadInst *L) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
markInstructionForDeletion(L);
++NumGVNLoad;

View File

@ -266,7 +266,7 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
return isa<UndefValue>(II->getArgOperand(1));
}
if (extractMallocCall(I) || extractCallocCall(I)) return true;
if (isAllocLikeFn(I)) return true;
if (CallInst *CI = isFreeCall(I))
if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))

View File

@ -42,7 +42,7 @@ define i32 @f() nounwind {
define i1 @baz() nounwind {
; CHECK: @baz
; CHECK-NEXT: ret i1 true
; CHECK-NEXT: objectsize
%1 = tail call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 0), i1 false)
%2 = icmp eq i32 %1, -1
ret i1 %2