Rename a few more DataLayout variables from TD to DL.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201870 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Rafael Espindola 2014-02-21 18:34:28 +00:00
parent 4449ed2a70
commit 75ece7a355
4 changed files with 41 additions and 41 deletions

View File

@ -93,11 +93,11 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@ -105,7 +105,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const DataLayout &TD,
const DataLayout &DL,
const TargetLibraryInfo &TLI) {
// Note that the meanings of the "object" are slightly different in the
// following contexts:
@ -138,7 +138,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@ -146,8 +146,8 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size,
const DataLayout &TD, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, TD, TLI);
const DataLayout &DL, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, DL, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
}
@ -200,7 +200,7 @@ namespace {
/// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension,
const DataLayout &TD, unsigned Depth) {
const DataLayout &DL, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@ -217,23 +217,23 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD))
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL))
break;
// FALL THROUGH.
case Instruction::Add:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
TD, Depth+1);
DL, Depth+1);
Offset += RHSC->getValue();
return V;
case Instruction::Mul:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
TD, Depth+1);
DL, Depth+1);
Offset *= RHSC->getValue();
Scale *= RHSC->getValue();
return V;
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
TD, Depth+1);
DL, Depth+1);
Offset <<= RHSC->getValue().getLimitedValue();
Scale <<= RHSC->getValue().getLimitedValue();
return V;
@ -254,7 +254,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
TD, Depth+1);
DL, Depth+1);
Scale = Scale.zext(OldWidth);
Offset = Offset.zext(OldWidth);
@ -282,7 +282,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
const DataLayout *TD) {
const DataLayout *DL) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6;
@ -313,7 +313,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
if (const Instruction *I = dyn_cast<Instruction>(V))
// TODO: Get a DominatorTree and use it here.
if (const Value *Simplified =
SimplifyInstruction(const_cast<Instruction *>(I), TD)) {
SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
V = Simplified;
continue;
}
@ -328,7 +328,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (TD == 0) {
if (DL == 0) {
if (!GEPOp->hasAllZeroIndices())
return V;
V = GEPOp->getOperand(0);
@ -347,30 +347,30 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo);
continue;
}
// For an array/pointer, add the element offset, explicitly scaled.
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue;
BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
continue;
}
uint64_t Scale = TD->getTypeAllocSize(*GTI);
uint64_t Scale = DL->getTypeAllocSize(*GTI);
ExtensionKind Extension = EK_NotExtended;
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
if (TD->getPointerSizeInBits(AS) > Width)
if (DL->getPointerSizeInBits(AS) > Width)
Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
*TD, 0);
*DL, 0);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@ -392,7 +392,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's
// pointer size.
if (unsigned ShiftBits = 64 - TD->getPointerSizeInBits(AS)) {
if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}

View File

@ -874,7 +874,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
AA = &aa;
GFI = gfi;
LibInfo = li;
TD = DAG.getTarget().getDataLayout();
DL = DAG.getTarget().getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
@ -3328,7 +3328,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
DAG.getConstant(Offset, N.getValueType()));
}
@ -3342,7 +3342,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
EVT PTy = TLI->getPointerTy(AS);
unsigned PtrBits = PTy.getSizeInBits();
@ -3359,7 +3359,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// N = N + Idx * ElementSize;
APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS),
TD->getTypeAllocSize(Ty));
DL->getTypeAllocSize(Ty));
SDValue IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
@ -5355,7 +5355,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
SmallVector<Value *, 4> Allocas;
GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
@ -5608,7 +5608,7 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
if (const Constant *LoadCst =
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
Builder.TD))
Builder.DL))
return Builder.getValue(LoadCst);
}
@ -6104,7 +6104,7 @@ public:
/// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context,
const TargetLowering &TLI,
const DataLayout *TD) const {
const DataLayout *DL) const {
if (CallOperandVal == 0) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
@ -6130,7 +6130,7 @@ public:
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
unsigned BitSize = TD->getTypeSizeInBits(OpTy);
unsigned BitSize = DL->getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
@ -6319,7 +6319,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, TD).
OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL).
getSimpleVT();
}
@ -6794,11 +6794,11 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetLowering *TLI = TM.getTargetLowering();
const DataLayout &TD = *TLI->getDataLayout();
const DataLayout &DL = *TLI->getDataLayout();
SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
TD.getABITypeAlignment(I.getType()));
DL.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
@ -7341,7 +7341,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
const TargetLowering *TLI = getTargetLowering();
const DataLayout *TD = TLI->getDataLayout();
const DataLayout *DL = TLI->getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
@ -7373,7 +7373,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
TD->getABITypeAlignment(ArgTy);
DL->getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
@ -7397,7 +7397,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;

View File

@ -493,7 +493,7 @@ public:
static const unsigned LowestSDNodeOrder = 1;
SelectionDAG &DAG;
const DataLayout *TD;
const DataLayout *DL;
AliasAnalysis *AA;
const TargetLibraryInfo *LibInfo;

View File

@ -379,10 +379,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval/inalloca argument). If so, then it clearly
// overwrites any other store to the same object.
const DataLayout *TD = AA.getDataLayout();
const DataLayout *DL = AA.getDataLayout();
const Value *UO1 = GetUnderlyingObject(P1, TD),
*UO2 = GetUnderlyingObject(P2, TD);
const Value *UO1 = GetUnderlyingObject(P1, DL),
*UO2 = GetUnderlyingObject(P2, DL);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
@ -400,8 +400,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// pointers are equal, then we can reason about the two stores.
EarlierOff = 0;
LaterOff = 0;
const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
// If the base pointers still differ, we have two completely different stores.
if (BP1 != BP2)