Remove trailing spaces.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189173 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakub Staszak
2013-08-24 14:16:00 +00:00
parent 0b6962f4be
commit 394e5a9ee8

View File

@@ -122,7 +122,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// question (in this case rewind to p), or // question (in this case rewind to p), or
// - just give up. It is up to caller to make sure the pointer is pointing // - just give up. It is up to caller to make sure the pointer is pointing
// to the base address the object. // to the base address the object.
// //
// We go for 2nd option for simplicity. // We go for 2nd option for simplicity.
if (!isIdentifiedObject(V)) if (!isIdentifiedObject(V))
return false; return false;
@@ -130,7 +130,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow // This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment. // reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
} }
@@ -163,7 +163,7 @@ namespace {
EK_SignExt, EK_SignExt,
EK_ZeroExt EK_ZeroExt
}; };
struct VariableGEPIndex { struct VariableGEPIndex {
const Value *V; const Value *V;
ExtensionKind Extension; ExtensionKind Extension;
@@ -200,7 +200,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
Offset = 0; Offset = 0;
return V; return V;
} }
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
switch (BOp->getOpcode()) { switch (BOp->getOpcode()) {
@@ -231,7 +231,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
} }
} }
} }
// Since GEP indices are sign extended anyway, we don't care about the high // Since GEP indices are sign extended anyway, we don't care about the high
// bits of a sign or zero extended value - just scales and offsets. The // bits of a sign or zero extended value - just scales and offsets. The
// extensions have to be consistent though. // extensions have to be consistent though.
@@ -248,10 +248,10 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
TD, Depth+1); TD, Depth+1);
Scale = Scale.zext(OldWidth); Scale = Scale.zext(OldWidth);
Offset = Offset.zext(OldWidth); Offset = Offset.zext(OldWidth);
return Result; return Result;
} }
Scale = 1; Scale = 1;
Offset = 0; Offset = 0;
return V; return V;
@@ -276,7 +276,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
const DataLayout *TD) { const DataLayout *TD) {
// Limit recursion depth to limit compile time in crazy cases. // Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6; unsigned MaxLookup = 6;
BaseOffs = 0; BaseOffs = 0;
do { do {
// See if this is a bitcast or GEP. // See if this is a bitcast or GEP.
@@ -291,7 +291,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
} }
return V; return V;
} }
if (Op->getOpcode() == Instruction::BitCast) { if (Op->getOpcode() == Instruction::BitCast) {
V = Op->getOperand(0); V = Op->getOperand(0);
continue; continue;
@@ -308,15 +308,15 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = Simplified; V = Simplified;
continue; continue;
} }
return V; return V;
} }
// Don't attempt to analyze GEPs over unsized objects. // Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(GEPOp->getOperand(0)->getType()) if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
->getElementType()->isSized()) ->getElementType()->isSized())
return V; return V;
// If we are lacking DataLayout information, we can't compute the offets of // If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent // elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs. // GEPs.
@@ -326,7 +326,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = GEPOp->getOperand(0); V = GEPOp->getOperand(0);
continue; continue;
} }
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp); gep_type_iterator GTI = gep_type_begin(GEPOp);
for (User::const_op_iterator I = GEPOp->op_begin()+1, for (User::const_op_iterator I = GEPOp->op_begin()+1,
@@ -337,38 +337,37 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// For a struct, add the member offset. // For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue; if (FieldNo == 0) continue;
BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo); BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
continue; continue;
} }
// For an array/pointer, add the element offset, explicitly scaled. // For an array/pointer, add the element offset, explicitly scaled.
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue; if (CIdx->isZero()) continue;
BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue(); BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
continue; continue;
} }
uint64_t Scale = TD->getTypeAllocSize(*GTI); uint64_t Scale = TD->getTypeAllocSize(*GTI);
ExtensionKind Extension = EK_NotExtended; ExtensionKind Extension = EK_NotExtended;
// If the integer type is smaller than the pointer size, it is implicitly // If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size. // sign extended to pointer size.
unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
if (TD->getPointerSizeInBits() > Width) if (TD->getPointerSizeInBits() > Width)
Extension = EK_SignExt; Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form. // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0); APInt IndexScale(Width, 0), IndexOffset(Width, 0);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
*TD, 0); *TD, 0);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
BaseOffs += IndexOffset.getSExtValue()*Scale; BaseOffs += IndexOffset.getSExtValue()*Scale;
Scale *= IndexScale.getSExtValue(); Scale *= IndexScale.getSExtValue();
// If we already had an occurrence of this index variable, merge this // If we already had an occurrence of this index variable, merge this
// scale into it. For example, we want to handle: // scale into it. For example, we want to handle:
// A[x][x] -> x*16 + x*4 -> x*20 // A[x][x] -> x*16 + x*4 -> x*20
@@ -381,25 +380,25 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
break; break;
} }
} }
// Make sure that we have a scale that makes sense for this target's // Make sure that we have a scale that makes sense for this target's
// pointer size. // pointer size.
if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) { if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
Scale <<= ShiftBits; Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits; Scale = (int64_t)Scale >> ShiftBits;
} }
if (Scale) { if (Scale) {
VariableGEPIndex Entry = {Index, Extension, VariableGEPIndex Entry = {Index, Extension,
static_cast<int64_t>(Scale)}; static_cast<int64_t>(Scale)};
VarIndices.push_back(Entry); VarIndices.push_back(Entry);
} }
} }
// Analyze the base pointer next. // Analyze the base pointer next.
V = GEPOp->getOperand(0); V = GEPOp->getOperand(0);
} while (--MaxLookup); } while (--MaxLookup);
// If the chain of expressions is too deep, just return early. // If the chain of expressions is too deep, just return early.
return V; return V;
} }
@@ -407,7 +406,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
/// GetIndexDifference - Dest and Src are the variable indices from two /// GetIndexDifference - Dest and Src are the variable indices from two
/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
/// difference between the two pointers. /// difference between the two pointers.
static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
const SmallVectorImpl<VariableGEPIndex> &Src) { const SmallVectorImpl<VariableGEPIndex> &Src) {
if (Src.empty()) return; if (Src.empty()) return;
@@ -416,12 +415,12 @@ static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
const Value *V = Src[i].V; const Value *V = Src[i].V;
ExtensionKind Extension = Src[i].Extension; ExtensionKind Extension = Src[i].Extension;
int64_t Scale = Src[i].Scale; int64_t Scale = Src[i].Scale;
// Find V in Dest. This is N^2, but pointer indices almost never have more // Find V in Dest. This is N^2, but pointer indices almost never have more
// than a few variable indexes. // than a few variable indexes.
for (unsigned j = 0, e = Dest.size(); j != e; ++j) { for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
if (Dest[j].V != V || Dest[j].Extension != Extension) continue; if (Dest[j].V != V || Dest[j].Extension != Extension) continue;
// If we found it, subtract off Scale V's from the entry in Dest. If it // If we found it, subtract off Scale V's from the entry in Dest. If it
// goes to zero, remove the entry. // goes to zero, remove the entry.
if (Dest[j].Scale != Scale) if (Dest[j].Scale != Scale)
@@ -431,7 +430,7 @@ static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
Scale = 0; Scale = 0;
break; break;
} }
// If we didn't consume this entry, add it to the end of the Dest list. // If we didn't consume this entry, add it to the end of the Dest list.
if (Scale) { if (Scale) {
VariableGEPIndex Entry = { V, Extension, -Scale }; VariableGEPIndex Entry = { V, Extension, -Scale };
@@ -526,7 +525,7 @@ namespace {
return (AliasAnalysis*)this; return (AliasAnalysis*)this;
return this; return this;
} }
private: private:
// AliasCache - Track alias queries to guard against recursion. // AliasCache - Track alias queries to guard against recursion.
typedef std::pair<Location, Location> LocPair; typedef std::pair<Location, Location> LocPair;
@@ -696,7 +695,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
"AliasAnalysis query involving multiple functions!"); "AliasAnalysis query involving multiple functions!");
const Value *Object = GetUnderlyingObject(Loc.Ptr, TD); const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
// If this is a tail call and Loc.Ptr points to a stack location, we know that // If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack. // the tail call cannot access or modify the local stack.
// We cannot exclude byval arguments here; these belong to the caller of // We cannot exclude byval arguments here; these belong to the caller of
@@ -706,7 +705,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall()) if (CI->isTailCall())
return NoModRef; return NoModRef;
// If the pointer is to a locally allocated object that does not escape, // If the pointer is to a locally allocated object that does not escape,
// then the call can not mod/ref the pointer unless the call takes the pointer // then the call can not mod/ref the pointer unless the call takes the pointer
// as an argument, and itself doesn't capture it. // as an argument, and itself doesn't capture it.
@@ -722,7 +721,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
if (!(*CI)->getType()->isPointerTy() || if (!(*CI)->getType()->isPointerTy() ||
(!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
continue; continue;
// If this is a no-capture pointer argument, see if we can tell that it // If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking. If not, we have to // is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't // assume that the call could touch the pointer, even though it doesn't
@@ -732,7 +731,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
break; break;
} }
} }
if (!PassedAsArg) if (!PassedAsArg)
return NoModRef; return NoModRef;
} }
@@ -821,7 +820,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
} }
// We can bound the aliasing properties of memset_pattern16 just as we can // We can bound the aliasing properties of memset_pattern16 just as we can
// for memcpy/memset. This is particularly important because the // for memcpy/memset. This is particularly important because the
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
// whenever possible. // whenever possible.
else if (TLI.has(LibFunc::memset_pattern16) && else if (TLI.has(LibFunc::memset_pattern16) &&
@@ -925,22 +924,22 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
GEP1VariableIndices.clear(); GEP1VariableIndices.clear();
} }
} }
// If we get a No or May, then return it immediately, no amount of analysis // If we get a No or May, then return it immediately, no amount of analysis
// will improve this situation. // will improve this situation.
if (BaseAlias != MustAlias) return BaseAlias; if (BaseAlias != MustAlias) return BaseAlias;
// Otherwise, we have a MustAlias. Since the base pointers alias each other // Otherwise, we have a MustAlias. Since the base pointers alias each other
// exactly, see if the computed offset from the common pointer tells us // exactly, see if the computed offset from the common pointer tells us
// about the relation of the resulting pointer. // about the relation of the resulting pointer.
const Value *GEP1BasePtr = const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
int64_t GEP2BaseOffset; int64_t GEP2BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr = const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
// DecomposeGEPExpression and GetUnderlyingObject should return the // DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout. // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
@@ -948,12 +947,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
"DecomposeGEPExpression and GetUnderlyingObject disagree!"); "DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias; return MayAlias;
} }
// Subtract the GEP2 pointer from the GEP1 pointer to find out their // Subtract the GEP2 pointer from the GEP1 pointer to find out their
// symbolic difference. // symbolic difference.
GEP1BaseOffset -= GEP2BaseOffset; GEP1BaseOffset -= GEP2BaseOffset;
GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
} else { } else {
// Check to see if these two pointers are related by the getelementptr // Check to see if these two pointers are related by the getelementptr
// instruction. If one pointer is a GEP with a non-zero index of the other // instruction. If one pointer is a GEP with a non-zero index of the other
@@ -975,7 +974,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP1BasePtr = const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
// DecomposeGEPExpression and GetUnderlyingObject should return the // DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout. // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) { if (GEP1BasePtr != UnderlyingV1) {
@@ -984,7 +983,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
return MayAlias; return MayAlias;
} }
} }
// In the two GEP Case, if there is no difference in the offsets of the // In the two GEP Case, if there is no difference in the offsets of the
// computed pointers, the resultant pointers are a must alias. This // computed pointers, the resultant pointers are a must alias. This
// hapens when we have two lexically identical GEP's (for example). // hapens when we have two lexically identical GEP's (for example).
@@ -1226,7 +1225,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
(isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
return NoAlias; return NoAlias;
// If one pointer is the result of a call/invoke or load and the other is a // If one pointer is the result of a call/invoke or load and the other is a
// non-escaping local object within the same function, then we know the // non-escaping local object within the same function, then we know the
// object couldn't escape to a point where the call could return it. // object couldn't escape to a point where the call could return it.
@@ -1248,7 +1247,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
(V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
return NoAlias; return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates // Check the cache before climbing up use-def chains. This also terminates
// otherwise infinitely recursive queries. // otherwise infinitely recursive queries.
LocPair Locs(Location(V1, V1Size, V1TBAAInfo), LocPair Locs(Location(V1, V1Size, V1TBAAInfo),