Make DataLayout Non-Optional in the Module

Summary:
DataLayout keeps the string used for its creation.

As a side effect it is no longer needed in the Module.
This is "almost" NFC, the string is no longer
canonicalized, you can't rely on two "equals" DataLayout
having the same string returned by getStringRepresentation().

Get rid of DataLayoutPass: the DataLayout is in the Module

The DataLayout is "per-module", let's enforce this by not
duplicating it more than necessary.
One more step toward non-optionality of the DataLayout in the
module.

Make DataLayout Non-Optional in the Module

Module->getDataLayout() will never returns nullptr anymore.

Reviewers: echristo

Subscribers: resistor, llvm-commits, jholewinski

Differential Revision: http://reviews.llvm.org/D7992

From: Mehdi Amini <mehdi.amini@apple.com>

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231270 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Mehdi Amini
2015-03-04 18:43:29 +00:00
parent 2337c1fd83
commit c94da20917
163 changed files with 617 additions and 973 deletions

View File

@@ -622,8 +622,9 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
/// If the inlined function has non-byval align arguments, then
/// add @llvm.assume-based alignment assumptions to preserve this information.
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
if (!PreserveAlignmentAssumptions || !IFI.DL)
if (!PreserveAlignmentAssumptions)
return;
auto &DL = CS.getCaller()->getParent()->getDataLayout();
// To avoid inserting redundant assumptions, we should check for assumptions
// already in the caller. To do this, we might need a DT of the caller.
@@ -645,13 +646,12 @@ static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
// If we can already prove the asserted alignment in the context of the
// caller, then don't bother inserting the assumption.
Value *Arg = CS.getArgument(I->getArgNo());
if (getKnownAlignment(Arg, IFI.DL,
&IFI.ACT->getAssumptionCache(*CalledFunc),
if (getKnownAlignment(Arg, &DL, &IFI.ACT->getAssumptionCache(*CalledFunc),
CS.getInstruction(), &DT) >= Align)
continue;
IRBuilder<>(CS.getInstruction()).CreateAlignmentAssumption(*IFI.DL, Arg,
Align);
IRBuilder<>(CS.getInstruction())
.CreateAlignmentAssumption(DL, Arg, Align);
}
}
}
@@ -726,11 +726,7 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
IRBuilder<> Builder(InsertBlock->begin());
Value *Size;
if (IFI.DL == nullptr)
Size = ConstantExpr::getSizeOf(AggTy);
else
Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy));
Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
@@ -761,7 +757,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, IFI.DL,
auto &DL = Caller->getParent()->getDataLayout();
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, &DL,
&IFI.ACT->getAssumptionCache(*Caller),
TheCall) >= ByValAlignment)
return Arg;
@@ -771,10 +768,9 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
}
// Create the alloca. If we have DataLayout, use nice alignment.
unsigned Align = 1;
if (IFI.DL)
Align = IFI.DL->getPrefTypeAlignment(AggTy);
unsigned Align =
Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
// If the byval had an alignment specified, we *must* use at least that
// alignment, as it is required by the byval argument (and uses of the
// pointer inside the callee).
@@ -1008,6 +1004,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Keep a list of pair (dst, src) to emit byval initializations.
SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
auto &DL = Caller->getParent()->getDataLayout();
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -1042,9 +1040,9 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
&InlinedFunctionInfo, IFI.DL, TheCall);
&InlinedFunctionInfo, &DL, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
@@ -1065,7 +1063,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
CloneAliasScopeMetadata(CS, VMap);
// Add noalias metadata if necessary.
AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
AddAliasScopeMetadata(CS, VMap, &DL, IFI.AA);
// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
@@ -1173,18 +1171,17 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
ConstantInt *AllocaSize = nullptr;
if (ConstantInt *AIArraySize =
dyn_cast<ConstantInt>(AI->getArraySize())) {
if (IFI.DL) {
Type *AllocaType = AI->getAllocatedType();
uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
// Check that array size doesn't saturate uint64_t and doesn't
// overflow when it's multiplied by type size.
if (AllocaArraySize != ~0ULL &&
UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
AllocaArraySize * AllocaTypeSize);
}
auto &DL = Caller->getParent()->getDataLayout();
Type *AllocaType = AI->getAllocatedType();
uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
// Check that array size doesn't saturate uint64_t and doesn't
// overflow when it's multiplied by type size.
if (AllocaArraySize != ~0ULL &&
UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
AllocaArraySize * AllocaTypeSize);
}
}
@@ -1445,7 +1442,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr,
auto &DL = Caller->getParent()->getDataLayout();
if (Value *V = SimplifyInstruction(PHI, &DL, nullptr, nullptr,
&IFI.ACT->getAssumptionCache(*Caller))) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();