2003-11-25 20:11:47 +00:00
|
|
|
//===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===//
|
2005-04-21 21:13:18 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-21 21:13:18 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2003-02-26 19:41:54 +00:00
|
|
|
//
|
|
|
|
// This file defines the default implementation of the Alias Analysis interface
|
|
|
|
// that simply implements a few identities (two different globals cannot alias,
|
|
|
|
// etc), but otherwise does no analysis.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2005-01-08 22:01:16 +00:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2004-01-12 17:57:32 +00:00
|
|
|
#include "llvm/Constants.h"
|
2003-02-26 19:41:54 +00:00
|
|
|
#include "llvm/DerivedTypes.h"
|
2004-03-15 03:36:49 +00:00
|
|
|
#include "llvm/Function.h"
|
2007-08-02 01:18:14 +00:00
|
|
|
#include "llvm/ParameterAttributes.h"
|
2004-03-15 03:36:49 +00:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2004-07-29 12:17:34 +00:00
|
|
|
#include "llvm/Instructions.h"
|
2008-02-17 21:29:08 +00:00
|
|
|
#include "llvm/IntrinsicInst.h"
|
2004-03-15 03:36:49 +00:00
|
|
|
#include "llvm/Pass.h"
|
2003-02-26 19:41:54 +00:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2007-02-10 22:15:31 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2007-09-07 04:06:50 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2006-08-27 12:54:02 +00:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2006-10-04 21:52:35 +00:00
|
|
|
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2004-09-03 18:19:51 +00:00
|
|
|
#include <algorithm>
|
2003-11-25 18:33:40 +00:00
|
|
|
using namespace llvm;
|
2003-11-11 22:41:34 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
namespace {
|
2004-05-23 21:15:12 +00:00
|
|
|
/// NoAA - This class implements the -no-aa pass, which always returns "I
|
|
|
|
/// don't know" for alias queries. NoAA is unlike other alias analysis
|
|
|
|
/// implementations, in that it does not chain to a previous analysis. As
|
|
|
|
/// such it doesn't follow many of the rules that other alias analyses must.
|
|
|
|
///
|
2006-06-28 23:17:24 +00:00
|
|
|
struct VISIBILITY_HIDDEN NoAA : public ImmutablePass, public AliasAnalysis {
|
2007-05-03 01:11:54 +00:00
|
|
|
static char ID; // Class identification, replacement for typeinfo
|
2007-05-01 21:15:47 +00:00
|
|
|
NoAA() : ImmutablePass((intptr_t)&ID) {}
|
2007-07-02 14:53:37 +00:00
|
|
|
explicit NoAA(intptr_t PID) : ImmutablePass(PID) { }
|
2007-05-01 21:15:47 +00:00
|
|
|
|
2004-06-19 08:05:58 +00:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.addRequired<TargetData>();
|
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2004-06-19 08:05:58 +00:00
|
|
|
virtual void initializePass() {
|
|
|
|
TD = &getAnalysis<TargetData>();
|
|
|
|
}
|
|
|
|
|
2004-05-23 21:15:12 +00:00
|
|
|
virtual AliasResult alias(const Value *V1, unsigned V1Size,
|
|
|
|
const Value *V2, unsigned V2Size) {
|
|
|
|
return MayAlias;
|
|
|
|
}
|
|
|
|
|
2004-12-15 17:13:24 +00:00
|
|
|
virtual ModRefBehavior getModRefBehavior(Function *F, CallSite CS,
|
|
|
|
std::vector<PointerAccessInfo> *Info) {
|
2004-12-15 07:22:13 +00:00
|
|
|
return UnknownModRefBehavior;
|
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2004-12-15 07:22:13 +00:00
|
|
|
virtual void getArgumentAccesses(Function *F, CallSite CS,
|
|
|
|
std::vector<PointerAccessInfo> &Info) {
|
|
|
|
assert(0 && "This method may not be called on this function!");
|
|
|
|
}
|
|
|
|
|
2004-05-23 21:15:12 +00:00
|
|
|
virtual void getMustAliases(Value *P, std::vector<Value*> &RetVals) { }
|
|
|
|
virtual bool pointsToConstantMemory(const Value *P) { return false; }
|
|
|
|
virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
|
|
|
|
return ModRef;
|
|
|
|
}
|
|
|
|
virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
|
|
|
|
return ModRef;
|
|
|
|
}
|
|
|
|
virtual bool hasNoModRefInfoForCalls() const { return true; }
|
|
|
|
|
|
|
|
virtual void deleteValue(Value *V) {}
|
|
|
|
virtual void copyValue(Value *From, Value *To) {}
|
|
|
|
};
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2004-05-23 21:15:12 +00:00
|
|
|
// Register this pass...
|
2007-05-03 01:11:54 +00:00
|
|
|
char NoAA::ID = 0;
|
2006-08-27 22:42:52 +00:00
|
|
|
RegisterPass<NoAA>
|
2004-05-23 21:15:12 +00:00
|
|
|
U("no-aa", "No Alias Analysis (always returns 'may' alias)");
|
|
|
|
|
|
|
|
// Declare that we implement the AliasAnalysis interface
|
2006-08-28 00:42:29 +00:00
|
|
|
RegisterAnalysisGroup<AliasAnalysis> V(U);
|
2004-05-23 21:15:12 +00:00
|
|
|
} // End of anonymous namespace
|
|
|
|
|
2005-01-08 22:01:16 +00:00
|
|
|
ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
|
2004-05-23 21:15:12 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// BasicAliasAnalysis - This is the default alias analysis implementation.
|
|
|
|
/// Because it doesn't chain to a previous alias analysis (like -no-aa), it
|
|
|
|
/// derives from the NoAA class.
|
2006-06-28 23:17:24 +00:00
|
|
|
struct VISIBILITY_HIDDEN BasicAliasAnalysis : public NoAA {
|
2007-05-03 01:11:54 +00:00
|
|
|
static char ID; // Class identification, replacement for typeinfo
|
2007-06-18 17:13:29 +00:00
|
|
|
BasicAliasAnalysis() : NoAA((intptr_t)&ID) { }
|
2003-02-26 19:41:54 +00:00
|
|
|
AliasResult alias(const Value *V1, unsigned V1Size,
|
|
|
|
const Value *V2, unsigned V2Size);
|
2004-01-30 22:17:24 +00:00
|
|
|
|
2004-03-12 22:39:00 +00:00
|
|
|
ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
|
2004-12-07 08:11:24 +00:00
|
|
|
ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
|
|
|
|
return NoAA::getModRefInfo(CS1,CS2);
|
|
|
|
}
|
2004-03-12 22:39:00 +00:00
|
|
|
|
2004-07-27 02:13:55 +00:00
|
|
|
/// hasNoModRefInfoForCalls - We can provide mod/ref information against
|
|
|
|
/// non-escaping allocations.
|
|
|
|
virtual bool hasNoModRefInfoForCalls() const { return false; }
|
2004-04-11 16:43:07 +00:00
|
|
|
|
2004-01-30 22:17:24 +00:00
|
|
|
/// pointsToConstantMemory - Chase pointers until we find a (constant
|
|
|
|
/// global) or not.
|
|
|
|
bool pointsToConstantMemory(const Value *P);
|
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
private:
|
2003-12-11 22:44:13 +00:00
|
|
|
// CheckGEPInstructions - Check two GEP instructions with known
|
|
|
|
// must-aliasing base pointers. This checks to see if the index expressions
|
2003-02-26 19:41:54 +00:00
|
|
|
// preclude the pointers from aliasing...
|
2003-12-11 22:44:13 +00:00
|
|
|
AliasResult
|
2007-02-10 22:12:53 +00:00
|
|
|
CheckGEPInstructions(const Type* BasePtr1Ty,
|
|
|
|
Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1Size,
|
|
|
|
const Type *BasePtr2Ty,
|
|
|
|
Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2Size);
|
2003-02-26 19:41:54 +00:00
|
|
|
};
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// Register this pass...
|
2007-05-03 01:11:54 +00:00
|
|
|
char BasicAliasAnalysis::ID = 0;
|
2006-08-27 22:42:52 +00:00
|
|
|
RegisterPass<BasicAliasAnalysis>
|
2003-02-26 19:41:54 +00:00
|
|
|
X("basicaa", "Basic Alias Analysis (default AA impl)");
|
|
|
|
|
|
|
|
// Declare that we implement the AliasAnalysis interface
|
2006-08-28 00:42:29 +00:00
|
|
|
RegisterAnalysisGroup<AliasAnalysis, true> Y(X);
|
2003-02-26 19:41:54 +00:00
|
|
|
} // End of anonymous namespace
|
|
|
|
|
2005-01-08 22:01:16 +00:00
|
|
|
ImmutablePass *llvm::createBasicAliasAnalysisPass() {
|
|
|
|
return new BasicAliasAnalysis();
|
|
|
|
}
|
|
|
|
|
2008-01-24 18:00:32 +00:00
|
|
|
/// getUnderlyingObject - This traverses the use chain to figure out what object
|
|
|
|
/// the specified value points to. If the value points to, or is derived from,
|
|
|
|
/// a unique object or an argument, return it. This returns:
|
|
|
|
/// Arguments, GlobalVariables, Functions, Allocas, Mallocs.
|
2003-02-26 19:41:54 +00:00
|
|
|
static const Value *getUnderlyingObject(const Value *V) {
|
|
|
|
if (!isa<PointerType>(V->getType())) return 0;
|
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// If we are at some type of object, return it. GlobalValues and Allocations
|
|
|
|
// have unique addresses.
|
|
|
|
if (isa<GlobalValue>(V) || isa<AllocationInst>(V) || isa<Argument>(V))
|
|
|
|
return V;
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// Traverse through different addressing mechanisms...
|
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V)) {
|
2006-11-27 01:05:10 +00:00
|
|
|
if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I))
|
2003-02-26 19:41:54 +00:00
|
|
|
return getUnderlyingObject(I->getOperand(0));
|
2003-06-17 15:25:37 +00:00
|
|
|
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
|
2006-11-27 01:05:10 +00:00
|
|
|
if (CE->getOpcode() == Instruction::BitCast ||
|
2003-06-17 15:25:37 +00:00
|
|
|
CE->getOpcode() == Instruction::GetElementPtr)
|
|
|
|
return getUnderlyingObject(CE->getOperand(0));
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
static const User *isGEP(const Value *V) {
|
|
|
|
if (isa<GetElementPtrInst>(V) ||
|
|
|
|
(isa<ConstantExpr>(V) &&
|
|
|
|
cast<ConstantExpr>(V)->getOpcode() == Instruction::GetElementPtr))
|
|
|
|
return cast<User>(V);
|
|
|
|
return 0;
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
|
2007-02-10 22:15:31 +00:00
|
|
|
static const Value *GetGEPOperands(const Value *V,
|
|
|
|
SmallVector<Value*, 16> &GEPOps){
|
2003-12-11 23:20:16 +00:00
|
|
|
assert(GEPOps.empty() && "Expect empty list to populate!");
|
|
|
|
GEPOps.insert(GEPOps.end(), cast<User>(V)->op_begin()+1,
|
|
|
|
cast<User>(V)->op_end());
|
|
|
|
|
|
|
|
// Accumulate all of the chained indexes into the operand array
|
|
|
|
V = cast<User>(V)->getOperand(0);
|
|
|
|
|
|
|
|
while (const User *G = isGEP(V)) {
|
2004-07-18 00:18:30 +00:00
|
|
|
if (!isa<Constant>(GEPOps[0]) || isa<GlobalValue>(GEPOps[0]) ||
|
2003-12-11 23:20:16 +00:00
|
|
|
!cast<Constant>(GEPOps[0])->isNullValue())
|
|
|
|
break; // Don't handle folding arbitrary pointer offsets yet...
|
|
|
|
GEPOps.erase(GEPOps.begin()); // Drop the zero index
|
|
|
|
GEPOps.insert(GEPOps.begin(), G->op_begin()+1, G->op_end());
|
|
|
|
V = G->getOperand(0);
|
|
|
|
}
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2004-01-30 22:17:24 +00:00
|
|
|
/// pointsToConstantMemory - Chase pointers until we find a (constant
|
|
|
|
/// global) or not.
|
|
|
|
bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
|
2004-01-30 22:48:02 +00:00
|
|
|
if (const Value *V = getUnderlyingObject(P))
|
|
|
|
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
|
|
|
|
return GV->isConstant();
|
2004-01-30 22:17:24 +00:00
|
|
|
return false;
|
|
|
|
}
|
2003-12-11 23:20:16 +00:00
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// Determine if an AllocationInst instruction escapes from the function it is
|
|
|
|
// contained in. If it does not escape, there is no way for another function to
|
|
|
|
// mod/ref it. We do this by looking at its uses and determining if the uses
|
|
|
|
// can escape (recursively).
|
2004-03-12 22:39:00 +00:00
|
|
|
static bool AddressMightEscape(const Value *V) {
|
|
|
|
for (Value::use_const_iterator UI = V->use_begin(), E = V->use_end();
|
|
|
|
UI != E; ++UI) {
|
|
|
|
const Instruction *I = cast<Instruction>(*UI);
|
|
|
|
switch (I->getOpcode()) {
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::Load:
|
|
|
|
break; //next use.
|
2004-03-12 22:39:00 +00:00
|
|
|
case Instruction::Store:
|
|
|
|
if (I->getOperand(0) == V)
|
|
|
|
return true; // Escapes if the pointer is stored.
|
2006-11-27 01:05:10 +00:00
|
|
|
break; // next use.
|
2004-03-12 22:39:00 +00:00
|
|
|
case Instruction::GetElementPtr:
|
2006-11-27 01:05:10 +00:00
|
|
|
if (AddressMightEscape(I))
|
|
|
|
return true;
|
2007-09-05 21:36:14 +00:00
|
|
|
break; // next use.
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::BitCast:
|
|
|
|
if (AddressMightEscape(I))
|
|
|
|
return true;
|
|
|
|
break; // next use
|
2004-07-27 02:18:52 +00:00
|
|
|
case Instruction::Ret:
|
|
|
|
// If returned, the address will escape to calling functions, but no
|
|
|
|
// callees could modify it.
|
2006-11-27 01:05:10 +00:00
|
|
|
break; // next use
|
2008-02-17 21:29:08 +00:00
|
|
|
case Instruction::Call:
|
|
|
|
// If the call is to a few known safe intrinsics, we know that it does
|
|
|
|
// not escape
|
2008-02-18 02:11:28 +00:00
|
|
|
if (!isa<MemIntrinsic>(I))
|
2008-02-17 21:29:08 +00:00
|
|
|
return true;
|
2008-02-18 02:11:28 +00:00
|
|
|
break; // next use
|
2004-03-12 22:39:00 +00:00
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// getModRefInfo - Check to see if the specified callsite can clobber the
|
|
|
|
// specified memory object. Since we only look at local properties of this
|
|
|
|
// function, we really can't say much about this query. We do, however, use
|
|
|
|
// simple "address taken" analysis on local objects.
|
|
|
|
//
|
|
|
|
AliasAnalysis::ModRefResult
|
|
|
|
BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
|
2008-01-24 18:00:32 +00:00
|
|
|
if (!isa<Constant>(P)) {
|
|
|
|
const Value *Object = getUnderlyingObject(P);
|
|
|
|
// Allocations and byval arguments are "new" objects.
|
2008-01-24 19:07:10 +00:00
|
|
|
if (Object &&
|
2008-02-18 02:31:23 +00:00
|
|
|
(isa<AllocationInst>(Object) || isa<Argument>(Object))) {
|
2008-02-17 21:29:08 +00:00
|
|
|
// Okay, the pointer is to a stack allocated (or effectively so, for
|
2008-02-18 03:52:21 +00:00
|
|
|
// for noalias parameters) object. If the address of this object doesn't
|
|
|
|
// escape from this function body to a callee, then we know that no
|
|
|
|
// callees can mod/ref it unless they are actually passed it.
|
2008-02-18 02:31:23 +00:00
|
|
|
if (isa<AllocationInst>(Object) ||
|
|
|
|
cast<Argument>(Object)->hasByValAttr() ||
|
|
|
|
cast<Argument>(Object)->hasNoAliasAttr())
|
|
|
|
if (!AddressMightEscape(Object)) {
|
2008-02-18 03:52:21 +00:00
|
|
|
bool passedAsArg = false;
|
2008-02-18 02:31:23 +00:00
|
|
|
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
|
|
|
|
CI != CE; ++CI)
|
2008-02-18 17:28:21 +00:00
|
|
|
if (isa<PointerType>((*CI)->getType()) &&
|
2008-02-19 06:47:18 +00:00
|
|
|
( getUnderlyingObject(*CI) == P ||
|
2008-02-19 09:28:48 +00:00
|
|
|
alias(cast<Value>(CI), ~0UL, P, ~0UL) != NoAlias) )
|
2008-02-18 03:52:21 +00:00
|
|
|
passedAsArg = true;
|
|
|
|
|
|
|
|
if (!passedAsArg)
|
|
|
|
return NoModRef;
|
2008-02-18 02:31:23 +00:00
|
|
|
}
|
2005-05-08 23:58:12 +00:00
|
|
|
|
|
|
|
// If this is a tail call and P points to a stack location, we know that
|
|
|
|
// the tail call cannot access or modify the local stack.
|
2008-02-18 09:11:02 +00:00
|
|
|
if (isa<AllocaInst>(Object) ||
|
2008-02-18 10:11:00 +00:00
|
|
|
(isa<Argument>(Object) && cast<Argument>(Object)->hasByValAttr()))
|
2008-02-18 02:31:23 +00:00
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
|
2008-02-18 09:11:02 +00:00
|
|
|
if (CI->isTailCall())
|
2008-02-18 02:31:23 +00:00
|
|
|
return NoModRef;
|
2004-03-12 22:39:00 +00:00
|
|
|
}
|
2008-01-24 18:00:32 +00:00
|
|
|
}
|
2004-03-12 22:39:00 +00:00
|
|
|
|
2004-03-15 04:18:28 +00:00
|
|
|
// The AliasAnalysis base class has some smarts, lets use them.
|
|
|
|
return AliasAnalysis::getModRefInfo(CS, P, Size);
|
2004-03-12 22:39:00 +00:00
|
|
|
}
|
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// alias - Provide a bunch of ad-hoc rules to disambiguate in common cases, such
|
|
|
|
// as array references. Note that this function is heavily tail recursive.
|
|
|
|
// Hopefully we have a smart C++ compiler. :)
|
|
|
|
//
|
|
|
|
AliasAnalysis::AliasResult
|
|
|
|
BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
|
|
|
|
const Value *V2, unsigned V2Size) {
|
2003-12-11 22:44:13 +00:00
|
|
|
// Strip off any constant expression casts if they exist
|
|
|
|
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V1))
|
2006-11-27 01:05:10 +00:00
|
|
|
if (CE->isCast() && isa<PointerType>(CE->getOperand(0)->getType()))
|
2003-12-11 22:44:13 +00:00
|
|
|
V1 = CE->getOperand(0);
|
|
|
|
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V2))
|
2006-11-27 01:05:10 +00:00
|
|
|
if (CE->isCast() && isa<PointerType>(CE->getOperand(0)->getType()))
|
2003-12-11 22:44:13 +00:00
|
|
|
V2 = CE->getOperand(0);
|
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// Are we checking for alias of the same value?
|
|
|
|
if (V1 == V2) return MustAlias;
|
|
|
|
|
|
|
|
if ((!isa<PointerType>(V1->getType()) || !isa<PointerType>(V2->getType())) &&
|
2006-12-31 05:48:39 +00:00
|
|
|
V1->getType() != Type::Int64Ty && V2->getType() != Type::Int64Ty)
|
2003-02-26 19:41:54 +00:00
|
|
|
return NoAlias; // Scalars cannot alias each other
|
|
|
|
|
|
|
|
// Strip off cast instructions...
|
2006-11-27 01:05:10 +00:00
|
|
|
if (const BitCastInst *I = dyn_cast<BitCastInst>(V1))
|
2007-01-14 05:57:53 +00:00
|
|
|
return alias(I->getOperand(0), V1Size, V2, V2Size);
|
2006-11-27 01:05:10 +00:00
|
|
|
if (const BitCastInst *I = dyn_cast<BitCastInst>(V2))
|
2007-01-14 05:57:53 +00:00
|
|
|
return alias(V1, V1Size, I->getOperand(0), V2Size);
|
2003-02-26 19:41:54 +00:00
|
|
|
|
|
|
|
// Figure out what objects these things are pointing to if we can...
|
|
|
|
const Value *O1 = getUnderlyingObject(V1);
|
|
|
|
const Value *O2 = getUnderlyingObject(V2);
|
|
|
|
|
2003-09-11 18:14:24 +00:00
|
|
|
// Pointing at a discernible object?
|
2004-11-26 19:20:01 +00:00
|
|
|
if (O1) {
|
|
|
|
if (O2) {
|
2007-08-02 17:52:00 +00:00
|
|
|
if (const Argument *O1Arg = dyn_cast<Argument>(O1)) {
|
2004-11-26 19:20:01 +00:00
|
|
|
// Incoming argument cannot alias locally allocated object!
|
|
|
|
if (isa<AllocationInst>(O2)) return NoAlias;
|
2007-08-02 01:18:14 +00:00
|
|
|
|
|
|
|
// If they are two different objects, and one is a noalias argument
|
|
|
|
// then they do not alias.
|
2008-01-24 18:00:32 +00:00
|
|
|
if (O1 != O2 && O1Arg->hasNoAliasAttr())
|
2007-08-02 01:18:14 +00:00
|
|
|
return NoAlias;
|
2008-01-24 18:00:32 +00:00
|
|
|
|
|
|
|
// Byval arguments can't alias globals or other arguments.
|
|
|
|
if (O1 != O2 && O1Arg->hasByValAttr()) return NoAlias;
|
|
|
|
|
2004-11-26 19:20:01 +00:00
|
|
|
// Otherwise, nothing is known...
|
2007-08-02 01:18:14 +00:00
|
|
|
}
|
|
|
|
|
2007-08-02 17:52:00 +00:00
|
|
|
if (const Argument *O2Arg = dyn_cast<Argument>(O2)) {
|
2004-11-26 19:20:01 +00:00
|
|
|
// Incoming argument cannot alias locally allocated object!
|
|
|
|
if (isa<AllocationInst>(O1)) return NoAlias;
|
2007-08-02 01:18:14 +00:00
|
|
|
|
|
|
|
// If they are two different objects, and one is a noalias argument
|
|
|
|
// then they do not alias.
|
2008-01-24 18:00:32 +00:00
|
|
|
if (O1 != O2 && O2Arg->hasNoAliasAttr())
|
2007-08-02 01:18:14 +00:00
|
|
|
return NoAlias;
|
|
|
|
|
2008-01-24 18:00:32 +00:00
|
|
|
// Byval arguments can't alias globals or other arguments.
|
|
|
|
if (O1 != O2 && O2Arg->hasByValAttr()) return NoAlias;
|
|
|
|
|
2004-11-26 19:20:01 +00:00
|
|
|
// Otherwise, nothing is known...
|
2007-10-26 03:47:14 +00:00
|
|
|
|
2008-01-24 18:00:32 +00:00
|
|
|
} else if (O1 != O2 && !isa<Argument>(O1)) {
|
|
|
|
// If they are two different objects, and neither is an argument,
|
|
|
|
// we know that we have no alias.
|
|
|
|
return NoAlias;
|
2004-11-26 19:20:01 +00:00
|
|
|
}
|
2007-07-31 16:18:07 +00:00
|
|
|
|
2004-11-26 19:20:01 +00:00
|
|
|
// If they are the same object, they we can look at the indexes. If they
|
|
|
|
// index off of the object is the same for both pointers, they must alias.
|
|
|
|
// If they are provably different, they must not alias. Otherwise, we
|
|
|
|
// can't tell anything.
|
2003-09-20 03:08:47 +00:00
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
|
2008-01-24 18:00:32 +00:00
|
|
|
// Unique values don't alias null, except non-byval arguments.
|
|
|
|
if (isa<ConstantPointerNull>(V2)) {
|
|
|
|
if (const Argument *O1Arg = dyn_cast<Argument>(O1)) {
|
|
|
|
if (O1Arg->hasByValAttr())
|
|
|
|
return NoAlias;
|
|
|
|
} else {
|
|
|
|
return NoAlias;
|
|
|
|
}
|
|
|
|
}
|
2004-11-26 19:20:01 +00:00
|
|
|
|
2005-04-21 21:13:18 +00:00
|
|
|
if (isa<GlobalVariable>(O1) ||
|
2005-03-09 16:29:52 +00:00
|
|
|
(isa<AllocationInst>(O1) &&
|
|
|
|
!cast<AllocationInst>(O1)->isArrayAllocation()))
|
2004-11-26 20:01:48 +00:00
|
|
|
if (cast<PointerType>(O1->getType())->getElementType()->isSized()) {
|
2004-11-26 19:20:01 +00:00
|
|
|
// If the size of the other access is larger than the total size of the
|
2004-11-26 20:01:48 +00:00
|
|
|
// global/alloca/malloc, it cannot be accessing the global (it's
|
|
|
|
// undefined to load or store bytes before or after an object).
|
|
|
|
const Type *ElTy = cast<PointerType>(O1->getType())->getElementType();
|
Executive summary: getTypeSize -> getTypeStoreSize / getABITypeSize.
The meaning of getTypeSize was not clear - clarifying it is important
now that we have x86 long double and arbitrary precision integers.
The issue with long double is that it requires 80 bits, and this is
not a multiple of its alignment. This gives a primitive type for
which getTypeSize differed from getABITypeSize. For arbitrary precision
integers it is even worse: there is the minimum number of bits needed to
hold the type (eg: 36 for an i36), the maximum number of bits that will
be overwriten when storing the type (40 bits for i36) and the ABI size
(i.e. the storage size rounded up to a multiple of the alignment; 64 bits
for i36).
This patch removes getTypeSize (not really - it is still there but
deprecated to allow for a gradual transition). Instead there is:
(1) getTypeSizeInBits - a number of bits that suffices to hold all
values of the type. For a primitive type, this is the minimum number
of bits. For an i36 this is 36 bits. For x86 long double it is 80.
This corresponds to gcc's TYPE_PRECISION.
(2) getTypeStoreSizeInBits - the maximum number of bits that is
written when storing the type (or read when reading it). For an
i36 this is 40 bits, for an x86 long double it is 80 bits. This
is the size alias analysis is interested in (getTypeStoreSize
returns the number of bytes). There doesn't seem to be anything
corresponding to this in gcc.
(3) getABITypeSizeInBits - this is getTypeStoreSizeInBits rounded
up to a multiple of the alignment. For an i36 this is 64, for an
x86 long double this is 96 or 128 depending on the OS. This is the
spacing between consecutive elements when you form an array out of
this type (getABITypeSize returns the number of bytes). This is
TYPE_SIZE in gcc.
Since successive elements in a SequentialType (arrays, pointers
and vectors) need to be aligned, the spacing between them will be
given by getABITypeSize. This means that the size of an array
is the length times the getABITypeSize. It also means that GEP
computations need to use getABITypeSize when computing offsets.
Furthermore, if an alloca allocates several elements at once then
these too need to be aligned, so the size of the alloca has to be
the number of elements multiplied by getABITypeSize. Logically
speaking this doesn't have to be the case when allocating just
one element, but it is simpler to also use getABITypeSize in this
case. So alloca's and mallocs should use getABITypeSize. Finally,
since gcc's only notion of size is that given by getABITypeSize, if
you want to output assembler etc the same as gcc then getABITypeSize
is the size you want.
Since a store will overwrite no more than getTypeStoreSize bytes,
and a read will read no more than that many bytes, this is the
notion of size appropriate for alias analysis calculations.
In this patch I have corrected all type size uses except some of
those in ScalarReplAggregates, lib/Codegen, lib/Target (the hard
cases). I will get around to auditing these too at some point,
but I could do with some help.
Finally, I made one change which I think wise but others might
consider pointless and suboptimal: in an unpacked struct the
amount of space allocated for a field is now given by the ABI
size rather than getTypeStoreSize. I did this because every
other place that reserves memory for a type (eg: alloca) now
uses getABITypeSize, and I didn't want to make an exception
for unpacked structs, i.e. I did it to make things more uniform.
This only effects structs containing long doubles and arbitrary
precision integers. If someone wants to pack these types more
tightly they can always use a packed struct.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43620 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-01 20:53:16 +00:00
|
|
|
unsigned GlobalSize = getTargetData().getABITypeSize(ElTy);
|
2004-11-28 20:30:15 +00:00
|
|
|
if (GlobalSize < V2Size && V2Size != ~0U)
|
2004-11-26 19:20:01 +00:00
|
|
|
return NoAlias;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (O2) {
|
|
|
|
if (!isa<Argument>(O2) && isa<ConstantPointerNull>(V1))
|
|
|
|
return NoAlias; // Unique values don't alias null
|
|
|
|
|
2005-03-09 16:29:52 +00:00
|
|
|
if (isa<GlobalVariable>(O2) ||
|
|
|
|
(isa<AllocationInst>(O2) &&
|
|
|
|
!cast<AllocationInst>(O2)->isArrayAllocation()))
|
2004-11-26 20:01:48 +00:00
|
|
|
if (cast<PointerType>(O2->getType())->getElementType()->isSized()) {
|
2004-11-26 19:20:01 +00:00
|
|
|
// If the size of the other access is larger than the total size of the
|
2004-11-26 20:01:48 +00:00
|
|
|
// global/alloca/malloc, it cannot be accessing the object (it's
|
|
|
|
// undefined to load or store bytes before or after an object).
|
|
|
|
const Type *ElTy = cast<PointerType>(O2->getType())->getElementType();
|
Executive summary: getTypeSize -> getTypeStoreSize / getABITypeSize.
The meaning of getTypeSize was not clear - clarifying it is important
now that we have x86 long double and arbitrary precision integers.
The issue with long double is that it requires 80 bits, and this is
not a multiple of its alignment. This gives a primitive type for
which getTypeSize differed from getABITypeSize. For arbitrary precision
integers it is even worse: there is the minimum number of bits needed to
hold the type (eg: 36 for an i36), the maximum number of bits that will
be overwriten when storing the type (40 bits for i36) and the ABI size
(i.e. the storage size rounded up to a multiple of the alignment; 64 bits
for i36).
This patch removes getTypeSize (not really - it is still there but
deprecated to allow for a gradual transition). Instead there is:
(1) getTypeSizeInBits - a number of bits that suffices to hold all
values of the type. For a primitive type, this is the minimum number
of bits. For an i36 this is 36 bits. For x86 long double it is 80.
This corresponds to gcc's TYPE_PRECISION.
(2) getTypeStoreSizeInBits - the maximum number of bits that is
written when storing the type (or read when reading it). For an
i36 this is 40 bits, for an x86 long double it is 80 bits. This
is the size alias analysis is interested in (getTypeStoreSize
returns the number of bytes). There doesn't seem to be anything
corresponding to this in gcc.
(3) getABITypeSizeInBits - this is getTypeStoreSizeInBits rounded
up to a multiple of the alignment. For an i36 this is 64, for an
x86 long double this is 96 or 128 depending on the OS. This is the
spacing between consecutive elements when you form an array out of
this type (getABITypeSize returns the number of bytes). This is
TYPE_SIZE in gcc.
Since successive elements in a SequentialType (arrays, pointers
and vectors) need to be aligned, the spacing between them will be
given by getABITypeSize. This means that the size of an array
is the length times the getABITypeSize. It also means that GEP
computations need to use getABITypeSize when computing offsets.
Furthermore, if an alloca allocates several elements at once then
these too need to be aligned, so the size of the alloca has to be
the number of elements multiplied by getABITypeSize. Logically
speaking this doesn't have to be the case when allocating just
one element, but it is simpler to also use getABITypeSize in this
case. So alloca's and mallocs should use getABITypeSize. Finally,
since gcc's only notion of size is that given by getABITypeSize, if
you want to output assembler etc the same as gcc then getABITypeSize
is the size you want.
Since a store will overwrite no more than getTypeStoreSize bytes,
and a read will read no more than that many bytes, this is the
notion of size appropriate for alias analysis calculations.
In this patch I have corrected all type size uses except some of
those in ScalarReplAggregates, lib/Codegen, lib/Target (the hard
cases). I will get around to auditing these too at some point,
but I could do with some help.
Finally, I made one change which I think wise but others might
consider pointless and suboptimal: in an unpacked struct the
amount of space allocated for a field is now given by the ABI
size rather than getTypeStoreSize. I did this because every
other place that reserves memory for a type (eg: alloca) now
uses getABITypeSize, and I didn't want to make an exception
for unpacked structs, i.e. I did it to make things more uniform.
This only effects structs containing long doubles and arbitrary
precision integers. If someone wants to pack these types more
tightly they can always use a packed struct.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43620 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-01 20:53:16 +00:00
|
|
|
unsigned GlobalSize = getTargetData().getABITypeSize(ElTy);
|
2004-11-28 20:30:15 +00:00
|
|
|
if (GlobalSize < V1Size && V1Size != ~0U)
|
2004-11-26 19:20:01 +00:00
|
|
|
return NoAlias;
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// If we have two gep instructions with must-alias'ing base pointers, figure
|
|
|
|
// out if the indexes to the GEP tell us anything about the derived pointer.
|
|
|
|
// Note that we also handle chains of getelementptr instructions as well as
|
|
|
|
// constant expression getelementptrs here.
|
2003-02-26 19:41:54 +00:00
|
|
|
//
|
2003-12-11 22:44:13 +00:00
|
|
|
if (isGEP(V1) && isGEP(V2)) {
|
|
|
|
// Drill down into the first non-gep value, to test for must-aliasing of
|
|
|
|
// the base pointers.
|
2007-12-13 16:22:58 +00:00
|
|
|
const User *G = cast<User>(V1);
|
|
|
|
while (isGEP(G->getOperand(0)) &&
|
|
|
|
G->getOperand(1) ==
|
|
|
|
Constant::getNullValue(G->getOperand(1)->getType()))
|
|
|
|
G = cast<User>(G->getOperand(0));
|
|
|
|
const Value *BasePtr1 = G->getOperand(0);
|
|
|
|
|
|
|
|
G = cast<User>(V2);
|
|
|
|
while (isGEP(G->getOperand(0)) &&
|
|
|
|
G->getOperand(1) ==
|
|
|
|
Constant::getNullValue(G->getOperand(1)->getType()))
|
|
|
|
G = cast<User>(G->getOperand(0));
|
|
|
|
const Value *BasePtr2 = G->getOperand(0);
|
2003-12-11 22:44:13 +00:00
|
|
|
|
|
|
|
// Do the base pointers alias?
|
2007-01-14 05:57:53 +00:00
|
|
|
AliasResult BaseAlias = alias(BasePtr1, ~0U, BasePtr2, ~0U);
|
2003-12-11 22:44:13 +00:00
|
|
|
if (BaseAlias == NoAlias) return NoAlias;
|
|
|
|
if (BaseAlias == MustAlias) {
|
|
|
|
// If the base pointers alias each other exactly, check to see if we can
|
|
|
|
// figure out anything about the resultant pointers, to try to prove
|
|
|
|
// non-aliasing.
|
|
|
|
|
|
|
|
// Collect all of the chained GEP operands together into one simple place
|
2007-02-10 22:15:31 +00:00
|
|
|
SmallVector<Value*, 16> GEP1Ops, GEP2Ops;
|
2003-12-11 23:20:16 +00:00
|
|
|
BasePtr1 = GetGEPOperands(V1, GEP1Ops);
|
|
|
|
BasePtr2 = GetGEPOperands(V2, GEP2Ops);
|
|
|
|
|
2004-07-29 07:56:39 +00:00
|
|
|
// If GetGEPOperands were able to fold to the same must-aliased pointer,
|
|
|
|
// do the comparison.
|
|
|
|
if (BasePtr1 == BasePtr2) {
|
|
|
|
AliasResult GAlias =
|
2007-02-10 22:12:53 +00:00
|
|
|
CheckGEPInstructions(BasePtr1->getType(),
|
|
|
|
&GEP1Ops[0], GEP1Ops.size(), V1Size,
|
|
|
|
BasePtr2->getType(),
|
|
|
|
&GEP2Ops[0], GEP2Ops.size(), V2Size);
|
2004-07-29 07:56:39 +00:00
|
|
|
if (GAlias != MayAlias)
|
|
|
|
return GAlias;
|
|
|
|
}
|
2003-12-11 22:44:13 +00:00
|
|
|
}
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
|
|
|
|
// Check to see if these two pointers are related by a getelementptr
|
|
|
|
// instruction. If one pointer is a GEP with a non-zero index of the other
|
|
|
|
// pointer, we know they cannot alias.
|
|
|
|
//
|
2003-12-11 23:20:16 +00:00
|
|
|
if (isGEP(V2)) {
|
2003-02-26 19:41:54 +00:00
|
|
|
std::swap(V1, V2);
|
|
|
|
std::swap(V1Size, V2Size);
|
|
|
|
}
|
|
|
|
|
2003-02-26 21:57:23 +00:00
|
|
|
if (V1Size != ~0U && V2Size != ~0U)
|
2006-11-02 20:25:50 +00:00
|
|
|
if (isGEP(V1)) {
|
2007-02-10 22:15:31 +00:00
|
|
|
SmallVector<Value*, 16> GEPOperands;
|
2003-12-11 23:20:16 +00:00
|
|
|
const Value *BasePtr = GetGEPOperands(V1, GEPOperands);
|
|
|
|
|
|
|
|
AliasResult R = alias(BasePtr, V1Size, V2, V2Size);
|
2003-02-26 21:57:23 +00:00
|
|
|
if (R == MustAlias) {
|
|
|
|
// If there is at least one non-zero constant index, we know they cannot
|
|
|
|
// alias.
|
|
|
|
bool ConstantFound = false;
|
2003-12-11 06:02:00 +00:00
|
|
|
bool AllZerosFound = true;
|
2003-12-11 23:20:16 +00:00
|
|
|
for (unsigned i = 0, e = GEPOperands.size(); i != e; ++i)
|
|
|
|
if (const Constant *C = dyn_cast<Constant>(GEPOperands[i])) {
|
2003-02-26 21:57:23 +00:00
|
|
|
if (!C->isNullValue()) {
|
|
|
|
ConstantFound = true;
|
2003-12-11 06:06:28 +00:00
|
|
|
AllZerosFound = false;
|
2003-02-26 21:57:23 +00:00
|
|
|
break;
|
2003-12-11 06:02:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
AllZerosFound = false;
|
2003-02-26 21:57:23 +00:00
|
|
|
}
|
2003-12-11 06:02:00 +00:00
|
|
|
|
|
|
|
// If we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 must aliases
|
|
|
|
// the ptr, the end result is a must alias also.
|
|
|
|
if (AllZerosFound)
|
|
|
|
return MustAlias;
|
|
|
|
|
2003-02-26 21:57:23 +00:00
|
|
|
if (ConstantFound) {
|
|
|
|
if (V2Size <= 1 && V1Size <= 1) // Just pointer check?
|
2003-02-26 19:41:54 +00:00
|
|
|
return NoAlias;
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 21:57:23 +00:00
|
|
|
// Otherwise we have to check to see that the distance is more than
|
|
|
|
// the size of the argument... build an index vector that is equal to
|
|
|
|
// the arguments provided, except substitute 0's for any variable
|
|
|
|
// indexes we find...
|
2004-12-08 23:42:11 +00:00
|
|
|
if (cast<PointerType>(
|
|
|
|
BasePtr->getType())->getElementType()->isSized()) {
|
|
|
|
for (unsigned i = 0; i != GEPOperands.size(); ++i)
|
2007-01-12 18:20:48 +00:00
|
|
|
if (!isa<ConstantInt>(GEPOperands[i]))
|
2004-12-08 23:42:11 +00:00
|
|
|
GEPOperands[i] =
|
|
|
|
Constant::getNullValue(GEPOperands[i]->getType());
|
|
|
|
int64_t Offset =
|
2007-02-10 20:35:22 +00:00
|
|
|
getTargetData().getIndexedOffset(BasePtr->getType(),
|
|
|
|
&GEPOperands[0],
|
|
|
|
GEPOperands.size());
|
2004-12-08 23:42:11 +00:00
|
|
|
|
|
|
|
if (Offset >= (int64_t)V2Size || Offset <= -(int64_t)V1Size)
|
|
|
|
return NoAlias;
|
|
|
|
}
|
2003-02-26 21:57:23 +00:00
|
|
|
}
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
return MayAlias;
|
|
|
|
}
|
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// This function is used to determin if the indices of two GEP instructions are
|
|
|
|
// equal. V1 and V2 are the indices.
|
|
|
|
static bool IndexOperandsEqual(Value *V1, Value *V2) {
|
2004-04-05 01:30:19 +00:00
|
|
|
if (V1->getType() == V2->getType())
|
|
|
|
return V1 == V2;
|
|
|
|
if (Constant *C1 = dyn_cast<Constant>(V1))
|
|
|
|
if (Constant *C2 = dyn_cast<Constant>(V2)) {
|
2006-11-27 01:05:10 +00:00
|
|
|
// Sign extend the constants to long types, if necessary
|
2006-12-31 05:48:39 +00:00
|
|
|
if (C1->getType() != Type::Int64Ty)
|
|
|
|
C1 = ConstantExpr::getSExt(C1, Type::Int64Ty);
|
|
|
|
if (C2->getType() != Type::Int64Ty)
|
|
|
|
C2 = ConstantExpr::getSExt(C2, Type::Int64Ty);
|
2004-04-05 01:30:19 +00:00
|
|
|
return C1 == C2;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
/// CheckGEPInstructions - Check two GEP instructions with known must-aliasing
|
|
|
|
/// base pointers. This checks to see if the index expressions preclude the
|
|
|
|
/// pointers from aliasing...
|
2006-10-20 07:07:24 +00:00
|
|
|
AliasAnalysis::AliasResult
|
|
|
|
BasicAliasAnalysis::CheckGEPInstructions(
|
2007-02-10 22:12:53 +00:00
|
|
|
const Type* BasePtr1Ty, Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1S,
|
|
|
|
const Type *BasePtr2Ty, Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2S) {
|
2003-12-11 22:44:13 +00:00
|
|
|
// We currently can't handle the case when the base pointers have different
|
|
|
|
// primitive types. Since this is uncommon anyway, we are happy being
|
|
|
|
// extremely conservative.
|
|
|
|
if (BasePtr1Ty != BasePtr2Ty)
|
|
|
|
return MayAlias;
|
|
|
|
|
2004-12-08 23:56:15 +00:00
|
|
|
const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
|
2003-12-11 22:44:13 +00:00
|
|
|
|
|
|
|
// Find the (possibly empty) initial sequence of equal values... which are not
|
|
|
|
// necessarily constants.
|
2007-02-10 22:12:53 +00:00
|
|
|
unsigned NumGEP1Operands = NumGEP1Ops, NumGEP2Operands = NumGEP2Ops;
|
2003-12-11 22:44:13 +00:00
|
|
|
unsigned MinOperands = std::min(NumGEP1Operands, NumGEP2Operands);
|
|
|
|
unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands);
|
|
|
|
unsigned UnequalOper = 0;
|
|
|
|
while (UnequalOper != MinOperands &&
|
2006-11-27 01:05:10 +00:00
|
|
|
IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) {
|
2003-12-11 22:44:13 +00:00
|
|
|
// Advance through the type as we go...
|
|
|
|
++UnequalOper;
|
|
|
|
if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
|
|
|
|
BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[UnequalOper-1]);
|
|
|
|
else {
|
|
|
|
// If all operands equal each other, then the derived pointers must
|
|
|
|
// alias each other...
|
|
|
|
BasePtr1Ty = 0;
|
|
|
|
assert(UnequalOper == NumGEP1Operands && UnequalOper == NumGEP2Operands &&
|
|
|
|
"Ran out of type nesting, but not out of operands?");
|
|
|
|
return MustAlias;
|
2003-06-02 05:42:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// If we have seen all constant operands, and run out of indexes on one of the
|
|
|
|
// getelementptrs, check to see if the tail of the leftover one is all zeros.
|
|
|
|
// If so, return mustalias.
|
2003-12-11 23:20:16 +00:00
|
|
|
if (UnequalOper == MinOperands) {
|
2007-02-10 22:12:53 +00:00
|
|
|
if (NumGEP1Ops < NumGEP2Ops) {
|
|
|
|
std::swap(GEP1Ops, GEP2Ops);
|
|
|
|
std::swap(NumGEP1Ops, NumGEP2Ops);
|
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
bool AllAreZeros = true;
|
|
|
|
for (unsigned i = UnequalOper; i != MaxOperands; ++i)
|
2004-10-16 16:07:10 +00:00
|
|
|
if (!isa<Constant>(GEP1Ops[i]) ||
|
2003-12-11 22:44:13 +00:00
|
|
|
!cast<Constant>(GEP1Ops[i])->isNullValue()) {
|
|
|
|
AllAreZeros = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (AllAreZeros) return MustAlias;
|
|
|
|
}
|
|
|
|
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// So now we know that the indexes derived from the base pointers,
|
|
|
|
// which are known to alias, are different. We can still determine a
|
|
|
|
// no-alias result if there are differing constant pairs in the index
|
|
|
|
// chain. For example:
|
|
|
|
// A[i][0] != A[j][1] iff (&A[0][1]-&A[0][0] >= std::max(G1S, G2S))
|
|
|
|
//
|
2006-03-04 02:06:34 +00:00
|
|
|
// We have to be careful here about array accesses. In particular, consider:
|
|
|
|
// A[1][0] vs A[0][i]
|
|
|
|
// In this case, we don't *know* that the array will be accessed in bounds:
|
|
|
|
// the index could even be negative. Because of this, we have to
|
|
|
|
// conservatively *give up* and return may alias. We disregard differing
|
|
|
|
// array subscripts that are followed by a variable index without going
|
|
|
|
// through a struct.
|
|
|
|
//
|
2003-02-26 19:41:54 +00:00
|
|
|
unsigned SizeMax = std::max(G1S, G2S);
|
2004-11-28 20:30:15 +00:00
|
|
|
if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work.
|
2003-06-02 05:42:39 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// Scan for the first operand that is constant and unequal in the
|
2004-04-05 01:30:19 +00:00
|
|
|
// two getelementptrs...
|
2003-02-26 19:41:54 +00:00
|
|
|
unsigned FirstConstantOper = UnequalOper;
|
2003-12-11 22:44:13 +00:00
|
|
|
for (; FirstConstantOper != MinOperands; ++FirstConstantOper) {
|
|
|
|
const Value *G1Oper = GEP1Ops[FirstConstantOper];
|
|
|
|
const Value *G2Oper = GEP2Ops[FirstConstantOper];
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2004-01-12 17:57:32 +00:00
|
|
|
if (G1Oper != G2Oper) // Found non-equal constant indexes...
|
2004-10-16 16:07:10 +00:00
|
|
|
if (Constant *G1OC = dyn_cast<ConstantInt>(const_cast<Value*>(G1Oper)))
|
|
|
|
if (Constant *G2OC = dyn_cast<ConstantInt>(const_cast<Value*>(G2Oper))){
|
2004-04-05 01:30:19 +00:00
|
|
|
if (G1OC->getType() != G2OC->getType()) {
|
|
|
|
// Sign extend both operands to long.
|
2006-12-31 05:48:39 +00:00
|
|
|
if (G1OC->getType() != Type::Int64Ty)
|
|
|
|
G1OC = ConstantExpr::getSExt(G1OC, Type::Int64Ty);
|
|
|
|
if (G2OC->getType() != Type::Int64Ty)
|
|
|
|
G2OC = ConstantExpr::getSExt(G2OC, Type::Int64Ty);
|
2004-04-05 01:30:19 +00:00
|
|
|
GEP1Ops[FirstConstantOper] = G1OC;
|
|
|
|
GEP2Ops[FirstConstantOper] = G2OC;
|
|
|
|
}
|
2006-03-04 02:06:34 +00:00
|
|
|
|
2004-04-05 01:30:19 +00:00
|
|
|
if (G1OC != G2OC) {
|
2007-07-16 14:29:03 +00:00
|
|
|
// Handle the "be careful" case above: if this is an array/vector
|
2006-03-04 02:06:34 +00:00
|
|
|
// subscript, scan for a subsequent variable array index.
|
2006-11-03 21:58:48 +00:00
|
|
|
if (isa<SequentialType>(BasePtr1Ty)) {
|
|
|
|
const Type *NextTy =
|
|
|
|
cast<SequentialType>(BasePtr1Ty)->getElementType();
|
2006-03-04 02:06:34 +00:00
|
|
|
bool isBadCase = false;
|
|
|
|
|
|
|
|
for (unsigned Idx = FirstConstantOper+1;
|
2006-11-03 21:58:48 +00:00
|
|
|
Idx != MinOperands && isa<SequentialType>(NextTy); ++Idx) {
|
2006-03-04 02:06:34 +00:00
|
|
|
const Value *V1 = GEP1Ops[Idx], *V2 = GEP2Ops[Idx];
|
|
|
|
if (!isa<Constant>(V1) || !isa<Constant>(V2)) {
|
|
|
|
isBadCase = true;
|
|
|
|
break;
|
|
|
|
}
|
2006-11-03 21:58:48 +00:00
|
|
|
NextTy = cast<SequentialType>(NextTy)->getElementType();
|
2006-03-04 02:06:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isBadCase) G1OC = 0;
|
|
|
|
}
|
|
|
|
|
2004-10-16 16:07:10 +00:00
|
|
|
// Make sure they are comparable (ie, not constant expressions), and
|
|
|
|
// make sure the GEP with the smaller leading constant is GEP1.
|
2006-03-04 02:06:34 +00:00
|
|
|
if (G1OC) {
|
2006-12-23 06:05:41 +00:00
|
|
|
Constant *Compare = ConstantExpr::getICmp(ICmpInst::ICMP_SGT,
|
|
|
|
G1OC, G2OC);
|
2007-01-11 12:24:14 +00:00
|
|
|
if (ConstantInt *CV = dyn_cast<ConstantInt>(Compare)) {
|
2007-02-10 22:12:53 +00:00
|
|
|
if (CV->getZExtValue()) { // If they are comparable and G2 > G1
|
2006-03-04 02:06:34 +00:00
|
|
|
std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2
|
2007-02-10 22:12:53 +00:00
|
|
|
std::swap(NumGEP1Ops, NumGEP2Ops);
|
|
|
|
}
|
2006-03-04 02:06:34 +00:00
|
|
|
break;
|
|
|
|
}
|
2004-04-05 01:30:19 +00:00
|
|
|
}
|
2004-01-12 17:57:32 +00:00
|
|
|
}
|
|
|
|
}
|
2003-12-11 22:44:13 +00:00
|
|
|
BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->getTypeAtIndex(G1Oper);
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// No shared constant operands, and we ran out of common operands. At this
|
|
|
|
// point, the GEP instructions have run through all of their operands, and we
|
|
|
|
// haven't found evidence that there are any deltas between the GEP's.
|
|
|
|
// However, one GEP may have more operands than the other. If this is the
|
2004-04-05 01:30:19 +00:00
|
|
|
// case, there may still be hope. Check this now.
|
2003-12-11 22:44:13 +00:00
|
|
|
if (FirstConstantOper == MinOperands) {
|
|
|
|
// Make GEP1Ops be the longer one if there is a longer one.
|
2007-02-10 22:12:53 +00:00
|
|
|
if (NumGEP1Ops < NumGEP2Ops) {
|
2003-12-11 22:44:13 +00:00
|
|
|
std::swap(GEP1Ops, GEP2Ops);
|
2007-02-10 22:12:53 +00:00
|
|
|
std::swap(NumGEP1Ops, NumGEP2Ops);
|
|
|
|
}
|
2003-12-11 22:44:13 +00:00
|
|
|
|
|
|
|
// Is there anything to check?
|
2007-02-10 22:12:53 +00:00
|
|
|
if (NumGEP1Ops > MinOperands) {
|
2003-12-11 22:44:13 +00:00
|
|
|
for (unsigned i = FirstConstantOper; i != MaxOperands; ++i)
|
2007-01-11 12:24:14 +00:00
|
|
|
if (isa<ConstantInt>(GEP1Ops[i]) &&
|
2007-04-19 05:39:12 +00:00
|
|
|
!cast<ConstantInt>(GEP1Ops[i])->isZero()) {
|
2003-12-11 22:44:13 +00:00
|
|
|
// Yup, there's a constant in the tail. Set all variables to
|
|
|
|
// constants in the GEP instruction to make it suiteable for
|
|
|
|
// TargetData::getIndexedOffset.
|
|
|
|
for (i = 0; i != MaxOperands; ++i)
|
2007-01-12 18:20:48 +00:00
|
|
|
if (!isa<ConstantInt>(GEP1Ops[i]))
|
2003-12-11 22:44:13 +00:00
|
|
|
GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType());
|
|
|
|
// Okay, now get the offset. This is the relative offset for the full
|
|
|
|
// instruction.
|
|
|
|
const TargetData &TD = getTargetData();
|
2007-02-10 22:12:53 +00:00
|
|
|
int64_t Offset1 = TD.getIndexedOffset(GEPPointerTy, GEP1Ops,
|
|
|
|
NumGEP1Ops);
|
2003-12-11 22:44:13 +00:00
|
|
|
|
2007-02-10 22:12:53 +00:00
|
|
|
// Now check without any constants at the end.
|
|
|
|
int64_t Offset2 = TD.getIndexedOffset(GEPPointerTy, GEP1Ops,
|
|
|
|
MinOperands);
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// If the tail provided a bit enough offset, return noalias!
|
|
|
|
if ((uint64_t)(Offset2-Offset1) >= SizeMax)
|
|
|
|
return NoAlias;
|
|
|
|
}
|
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// Couldn't find anything useful.
|
|
|
|
return MayAlias;
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
|
|
|
|
// If there are non-equal constants arguments, then we can figure
|
|
|
|
// out a minimum known delta between the two index expressions... at
|
|
|
|
// this point we know that the first constant index of GEP1 is less
|
|
|
|
// than the first constant index of GEP2.
|
2003-12-11 22:44:13 +00:00
|
|
|
|
|
|
|
// Advance BasePtr[12]Ty over this first differing constant operand.
|
2006-03-04 21:48:01 +00:00
|
|
|
BasePtr2Ty = cast<CompositeType>(BasePtr1Ty)->
|
|
|
|
getTypeAtIndex(GEP2Ops[FirstConstantOper]);
|
|
|
|
BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->
|
|
|
|
getTypeAtIndex(GEP1Ops[FirstConstantOper]);
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// We are going to be using TargetData::getIndexedOffset to determine the
|
|
|
|
// offset that each of the GEP's is reaching. To do this, we have to convert
|
|
|
|
// all variable references to constant references. To do this, we convert the
|
2006-03-04 21:48:01 +00:00
|
|
|
// initial sequence of array subscripts into constant zeros to start with.
|
|
|
|
const Type *ZeroIdxTy = GEPPointerTy;
|
|
|
|
for (unsigned i = 0; i != FirstConstantOper; ++i) {
|
|
|
|
if (!isa<StructType>(ZeroIdxTy))
|
2006-12-31 05:48:39 +00:00
|
|
|
GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Type::Int32Ty);
|
2003-12-11 22:44:13 +00:00
|
|
|
|
2006-03-04 21:48:01 +00:00
|
|
|
if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
|
|
|
|
ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
|
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
// We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-02-26 19:41:54 +00:00
|
|
|
// Loop over the rest of the operands...
|
2003-12-11 22:44:13 +00:00
|
|
|
for (unsigned i = FirstConstantOper+1; i != MaxOperands; ++i) {
|
2007-02-10 22:12:53 +00:00
|
|
|
const Value *Op1 = i < NumGEP1Ops ? GEP1Ops[i] : 0;
|
|
|
|
const Value *Op2 = i < NumGEP2Ops ? GEP2Ops[i] : 0;
|
2003-12-11 22:44:13 +00:00
|
|
|
// If they are equal, use a zero index...
|
|
|
|
if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) {
|
2007-01-12 18:20:48 +00:00
|
|
|
if (!isa<ConstantInt>(Op1))
|
2003-12-11 22:44:13 +00:00
|
|
|
GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Op1->getType());
|
|
|
|
// Otherwise, just keep the constants we have.
|
2003-02-26 19:41:54 +00:00
|
|
|
} else {
|
2003-12-11 22:44:13 +00:00
|
|
|
if (Op1) {
|
|
|
|
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
|
|
|
// If this is an array index, make sure the array element is in range.
|
2006-11-03 21:58:48 +00:00
|
|
|
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty)) {
|
2006-10-20 07:07:24 +00:00
|
|
|
if (Op1C->getZExtValue() >= AT->getNumElements())
|
2003-12-11 22:44:13 +00:00
|
|
|
return MayAlias; // Be conservative with out-of-range accesses
|
2007-12-09 07:35:13 +00:00
|
|
|
} else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty)) {
|
|
|
|
if (Op1C->getZExtValue() >= VT->getNumElements())
|
2006-11-03 21:58:48 +00:00
|
|
|
return MayAlias; // Be conservative with out-of-range accesses
|
|
|
|
}
|
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
} else {
|
|
|
|
// GEP1 is known to produce a value less than GEP2. To be
|
|
|
|
// conservatively correct, we must assume the largest possible
|
|
|
|
// constant is used in this position. This cannot be the initial
|
|
|
|
// index to the GEP instructions (because we know we have at least one
|
|
|
|
// element before this one with the different constant arguments), so
|
|
|
|
// we know that the current index must be into either a struct or
|
|
|
|
// array. Because we know it's not constant, this cannot be a
|
|
|
|
// structure index. Because of this, we can calculate the maximum
|
|
|
|
// value possible.
|
|
|
|
//
|
|
|
|
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
|
2007-01-14 05:57:53 +00:00
|
|
|
GEP1Ops[i] = ConstantInt::get(Type::Int64Ty,AT->getNumElements()-1);
|
2007-11-06 05:58:42 +00:00
|
|
|
else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
|
|
|
|
GEP1Ops[i] = ConstantInt::get(Type::Int64Ty,VT->getNumElements()-1);
|
2003-12-11 22:44:13 +00:00
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2003-12-11 22:44:13 +00:00
|
|
|
if (Op2) {
|
|
|
|
if (const ConstantInt *Op2C = dyn_cast<ConstantInt>(Op2)) {
|
|
|
|
// If this is an array index, make sure the array element is in range.
|
2007-12-09 07:35:13 +00:00
|
|
|
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr2Ty)) {
|
2006-10-20 07:07:24 +00:00
|
|
|
if (Op2C->getZExtValue() >= AT->getNumElements())
|
2003-12-11 22:44:13 +00:00
|
|
|
return MayAlias; // Be conservative with out-of-range accesses
|
2007-12-09 07:35:13 +00:00
|
|
|
} else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr2Ty)) {
|
2007-11-06 05:58:42 +00:00
|
|
|
if (Op2C->getZExtValue() >= VT->getNumElements())
|
2006-11-03 21:58:48 +00:00
|
|
|
return MayAlias; // Be conservative with out-of-range accesses
|
|
|
|
}
|
2003-12-11 22:44:13 +00:00
|
|
|
} else { // Conservatively assume the minimum value for this index
|
|
|
|
GEP2Ops[i] = Constant::getNullValue(Op2->getType());
|
|
|
|
}
|
2003-06-02 05:42:39 +00:00
|
|
|
}
|
2003-12-11 22:44:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (BasePtr1Ty && Op1) {
|
|
|
|
if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
|
|
|
|
BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[i]);
|
|
|
|
else
|
|
|
|
BasePtr1Ty = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BasePtr2Ty && Op2) {
|
|
|
|
if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr2Ty))
|
|
|
|
BasePtr2Ty = CT->getTypeAtIndex(GEP2Ops[i]);
|
|
|
|
else
|
|
|
|
BasePtr2Ty = 0;
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
|
|
|
}
|
2005-04-21 21:13:18 +00:00
|
|
|
|
2004-12-08 23:56:15 +00:00
|
|
|
if (GEPPointerTy->getElementType()->isSized()) {
|
2007-02-10 20:35:22 +00:00
|
|
|
int64_t Offset1 =
|
2007-02-10 22:12:53 +00:00
|
|
|
getTargetData().getIndexedOffset(GEPPointerTy, GEP1Ops, NumGEP1Ops);
|
2007-02-10 20:35:22 +00:00
|
|
|
int64_t Offset2 =
|
2007-02-10 22:12:53 +00:00
|
|
|
getTargetData().getIndexedOffset(GEPPointerTy, GEP2Ops, NumGEP2Ops);
|
2007-11-06 05:58:42 +00:00
|
|
|
assert(Offset1 != Offset2 &&
|
|
|
|
"There is at least one different constant here!");
|
|
|
|
|
|
|
|
// Make sure we compare the absolute difference.
|
|
|
|
if (Offset1 > Offset2)
|
|
|
|
std::swap(Offset1, Offset2);
|
|
|
|
|
2004-12-08 23:56:15 +00:00
|
|
|
if ((uint64_t)(Offset2-Offset1) >= SizeMax) {
|
2006-12-07 01:30:32 +00:00
|
|
|
//cerr << "Determined that these two GEP's don't alias ["
|
|
|
|
// << SizeMax << " bytes]: \n" << *GEP1 << *GEP2;
|
2004-12-08 23:56:15 +00:00
|
|
|
return NoAlias;
|
|
|
|
}
|
2003-02-26 19:41:54 +00:00
|
|
|
}
|
|
|
|
return MayAlias;
|
|
|
|
}
|
|
|
|
|
2006-06-07 22:00:26 +00:00
|
|
|
// Make sure that anything that uses AliasAnalysis pulls in this file...
|
|
|
|
DEFINING_FILE_FOR(BasicAliasAnalysis)
|