mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2026-04-21 08:17:40 +00:00
DataLayout is mandatory, update the API to reflect it with references.
Summary: Now that the DataLayout is a mandatory part of the module, let's start cleaning the codebase. This patch is a first attempt at doing that. This patch is not exactly NFC as for instance some places were passing a nullptr instead of the DataLayout, possibly just because there was a default value on the DataLayout argument to many functions in the API. Even though it is not purely NFC, there is no change in the validation. I turned as many pointer to DataLayout to references, this helped figuring out all the places where a nullptr could come up. I had initially a local version of this patch broken into over 30 independant, commits but some later commit were cleaning the API and touching part of the code modified in the previous commits, so it seemed cleaner without the intermediate state. Test Plan: Reviewers: echristo Subscribers: llvm-commits From: Mehdi Amini <mehdi.amini@apple.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231740 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -75,7 +75,7 @@ STATISTIC(NumFactor , "Number of factorizations");
|
||||
STATISTIC(NumReassoc , "Number of reassociations");
|
||||
|
||||
Value *InstCombiner::EmitGEPOffset(User *GEP) {
|
||||
return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
|
||||
return llvm::EmitGEPOffset(Builder, DL, GEP);
|
||||
}
|
||||
|
||||
/// ShouldChangeType - Return true if it is desirable to convert a computation
|
||||
@@ -84,13 +84,10 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
|
||||
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
|
||||
assert(From->isIntegerTy() && To->isIntegerTy());
|
||||
|
||||
// If we don't have DL, we don't know if the source/dest are legal.
|
||||
if (!DL) return false;
|
||||
|
||||
unsigned FromWidth = From->getPrimitiveSizeInBits();
|
||||
unsigned ToWidth = To->getPrimitiveSizeInBits();
|
||||
bool FromLegal = DL->isLegalInteger(FromWidth);
|
||||
bool ToLegal = DL->isLegalInteger(ToWidth);
|
||||
bool FromLegal = DL.isLegalInteger(FromWidth);
|
||||
bool ToLegal = DL.isLegalInteger(ToWidth);
|
||||
|
||||
// If this is a legal integer from type, and the result would be an illegal
|
||||
// type, don't do the transformation.
|
||||
@@ -445,7 +442,7 @@ getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
|
||||
/// This tries to simplify binary operations by factorizing out common terms
|
||||
/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
|
||||
static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
|
||||
const DataLayout *DL, BinaryOperator &I,
|
||||
const DataLayout &DL, BinaryOperator &I,
|
||||
Instruction::BinaryOps InnerOpcode, Value *A,
|
||||
Value *B, Value *C, Value *D) {
|
||||
|
||||
@@ -872,12 +869,9 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
||||
/// will land us at the specified offset. If so, fill them into NewIndices and
|
||||
/// return the resultant element type, otherwise return null.
|
||||
Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
|
||||
SmallVectorImpl<Value*> &NewIndices) {
|
||||
SmallVectorImpl<Value *> &NewIndices) {
|
||||
assert(PtrTy->isPtrOrPtrVectorTy());
|
||||
|
||||
if (!DL)
|
||||
return nullptr;
|
||||
|
||||
Type *Ty = PtrTy->getPointerElementType();
|
||||
if (!Ty->isSized())
|
||||
return nullptr;
|
||||
@@ -885,9 +879,9 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
|
||||
// Start with the index over the outer type. Note that the type size
|
||||
// might be zero (even if the offset isn't zero) if the indexed type
|
||||
// is something like [0 x {int, int}]
|
||||
Type *IntPtrTy = DL->getIntPtrType(PtrTy);
|
||||
Type *IntPtrTy = DL.getIntPtrType(PtrTy);
|
||||
int64_t FirstIdx = 0;
|
||||
if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
|
||||
if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
|
||||
FirstIdx = Offset/TySize;
|
||||
Offset -= FirstIdx*TySize;
|
||||
|
||||
@@ -905,11 +899,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
|
||||
// Index into the types. If we fail, set OrigBase to null.
|
||||
while (Offset) {
|
||||
// Indexing into tail padding between struct/array elements.
|
||||
if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
|
||||
if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
|
||||
return nullptr;
|
||||
|
||||
if (StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
const StructLayout *SL = DL->getStructLayout(STy);
|
||||
const StructLayout *SL = DL.getStructLayout(STy);
|
||||
assert(Offset < (int64_t)SL->getSizeInBytes() &&
|
||||
"Offset must stay within the indexed type");
|
||||
|
||||
@@ -920,7 +914,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
|
||||
Offset -= SL->getElementOffset(Elt);
|
||||
Ty = STy->getElementType(Elt);
|
||||
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
|
||||
uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
|
||||
uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
|
||||
assert(EltSize && "Cannot index into a zero-sized array");
|
||||
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
|
||||
Offset %= EltSize;
|
||||
@@ -1214,7 +1208,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
|
||||
// It may not be safe to reorder shuffles and things like div, urem, etc.
|
||||
// because we may trap when executing those ops on unknown vector elements.
|
||||
// See PR20059.
|
||||
if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr;
|
||||
if (!isSafeToSpeculativelyExecute(&Inst))
|
||||
return nullptr;
|
||||
|
||||
unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
|
||||
Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
|
||||
@@ -1300,37 +1295,37 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
|
||||
// Eliminate unneeded casts for indices, and replace indices which displace
|
||||
// by multiples of a zero size type with zero.
|
||||
if (DL) {
|
||||
bool MadeChange = false;
|
||||
Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
|
||||
bool MadeChange = false;
|
||||
Type *IntPtrTy = DL.getIntPtrType(GEP.getPointerOperandType());
|
||||
|
||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
|
||||
I != E; ++I, ++GTI) {
|
||||
// Skip indices into struct types.
|
||||
SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
|
||||
if (!SeqTy) continue;
|
||||
gep_type_iterator GTI = gep_type_begin(GEP);
|
||||
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
|
||||
++I, ++GTI) {
|
||||
// Skip indices into struct types.
|
||||
SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
|
||||
if (!SeqTy)
|
||||
continue;
|
||||
|
||||
// If the element type has zero size then any index over it is equivalent
|
||||
// to an index of zero, so replace it with zero if it is not zero already.
|
||||
if (SeqTy->getElementType()->isSized() &&
|
||||
DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
|
||||
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
|
||||
*I = Constant::getNullValue(IntPtrTy);
|
||||
MadeChange = true;
|
||||
}
|
||||
|
||||
Type *IndexTy = (*I)->getType();
|
||||
if (IndexTy != IntPtrTy) {
|
||||
// If we are using a wider index than needed for this platform, shrink
|
||||
// it to what we need. If narrower, sign-extend it to what we need.
|
||||
// This explicit cast can make subsequent optimizations more obvious.
|
||||
*I = Builder->CreateIntCast(*I, IntPtrTy, true);
|
||||
// If the element type has zero size then any index over it is equivalent
|
||||
// to an index of zero, so replace it with zero if it is not zero already.
|
||||
if (SeqTy->getElementType()->isSized() &&
|
||||
DL.getTypeAllocSize(SeqTy->getElementType()) == 0)
|
||||
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
|
||||
*I = Constant::getNullValue(IntPtrTy);
|
||||
MadeChange = true;
|
||||
}
|
||||
|
||||
Type *IndexTy = (*I)->getType();
|
||||
if (IndexTy != IntPtrTy) {
|
||||
// If we are using a wider index than needed for this platform, shrink
|
||||
// it to what we need. If narrower, sign-extend it to what we need.
|
||||
// This explicit cast can make subsequent optimizations more obvious.
|
||||
*I = Builder->CreateIntCast(*I, IntPtrTy, true);
|
||||
MadeChange = true;
|
||||
}
|
||||
if (MadeChange) return &GEP;
|
||||
}
|
||||
if (MadeChange)
|
||||
return &GEP;
|
||||
|
||||
// Check to see if the inputs to the PHI node are getelementptr instructions.
|
||||
if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
|
||||
@@ -1487,13 +1482,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
|
||||
}
|
||||
|
||||
if (DL && GEP.getNumIndices() == 1) {
|
||||
if (GEP.getNumIndices() == 1) {
|
||||
unsigned AS = GEP.getPointerAddressSpace();
|
||||
if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
|
||||
DL->getPointerSizeInBits(AS)) {
|
||||
DL.getPointerSizeInBits(AS)) {
|
||||
Type *PtrTy = GEP.getPointerOperandType();
|
||||
Type *Ty = PtrTy->getPointerElementType();
|
||||
uint64_t TyAllocSize = DL->getTypeAllocSize(Ty);
|
||||
uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
|
||||
|
||||
bool Matched = false;
|
||||
uint64_t C;
|
||||
@@ -1612,10 +1607,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
|
||||
Type *SrcElTy = StrippedPtrTy->getElementType();
|
||||
Type *ResElTy = PtrOp->getType()->getPointerElementType();
|
||||
if (DL && SrcElTy->isArrayTy() &&
|
||||
DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
|
||||
DL->getTypeAllocSize(ResElTy)) {
|
||||
Type *IdxType = DL->getIntPtrType(GEP.getType());
|
||||
if (SrcElTy->isArrayTy() &&
|
||||
DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
|
||||
DL.getTypeAllocSize(ResElTy)) {
|
||||
Type *IdxType = DL.getIntPtrType(GEP.getType());
|
||||
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
|
||||
Value *NewGEP = GEP.isInBounds() ?
|
||||
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
|
||||
@@ -1630,11 +1625,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
// %V = mul i64 %N, 4
|
||||
// %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
|
||||
// into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
|
||||
if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
|
||||
if (ResElTy->isSized() && SrcElTy->isSized()) {
|
||||
// Check that changing the type amounts to dividing the index by a scale
|
||||
// factor.
|
||||
uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
|
||||
uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
|
||||
uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
|
||||
uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
|
||||
if (ResSize && SrcSize % ResSize == 0) {
|
||||
Value *Idx = GEP.getOperand(1);
|
||||
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
|
||||
@@ -1642,7 +1637,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
|
||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||
// considerably simplifies the logic by eliminating implicit casts.
|
||||
assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
|
||||
assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
|
||||
"Index not cast to pointer width?");
|
||||
|
||||
bool NSW;
|
||||
@@ -1665,13 +1660,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
|
||||
// (where tmp = 8*tmp2) into:
|
||||
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
|
||||
if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
|
||||
SrcElTy->isArrayTy()) {
|
||||
if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
|
||||
// Check that changing to the array element type amounts to dividing the
|
||||
// index by a scale factor.
|
||||
uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
|
||||
uint64_t ArrayEltSize
|
||||
= DL->getTypeAllocSize(SrcElTy->getArrayElementType());
|
||||
uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
|
||||
uint64_t ArrayEltSize =
|
||||
DL.getTypeAllocSize(SrcElTy->getArrayElementType());
|
||||
if (ResSize && ArrayEltSize % ResSize == 0) {
|
||||
Value *Idx = GEP.getOperand(1);
|
||||
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
|
||||
@@ -1679,7 +1673,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
|
||||
// Earlier transforms ensure that the index has type IntPtrType, which
|
||||
// considerably simplifies the logic by eliminating implicit casts.
|
||||
assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
|
||||
assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
|
||||
"Index not cast to pointer width?");
|
||||
|
||||
bool NSW;
|
||||
@@ -1688,9 +1682,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
// If the multiplication NewIdx * Scale may overflow then the new
|
||||
// GEP may not be "inbounds".
|
||||
Value *Off[2] = {
|
||||
Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
|
||||
NewIdx
|
||||
};
|
||||
Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
|
||||
NewIdx};
|
||||
|
||||
Value *NewGEP = GEP.isInBounds() && NSW ?
|
||||
Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
|
||||
@@ -1704,9 +1697,6 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!DL)
|
||||
return nullptr;
|
||||
|
||||
// addrspacecast between types is canonicalized as a bitcast, then an
|
||||
// addrspacecast. To take advantage of the below bitcast + struct GEP, look
|
||||
// through the addrspacecast.
|
||||
@@ -1727,10 +1717,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
|
||||
Value *Operand = BCI->getOperand(0);
|
||||
PointerType *OpType = cast<PointerType>(Operand->getType());
|
||||
unsigned OffsetBits = DL->getPointerTypeSizeInBits(GEP.getType());
|
||||
unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
|
||||
APInt Offset(OffsetBits, 0);
|
||||
if (!isa<BitCastInst>(Operand) &&
|
||||
GEP.accumulateConstantOffset(*DL, Offset)) {
|
||||
GEP.accumulateConstantOffset(DL, Offset)) {
|
||||
|
||||
// If this GEP instruction doesn't move the pointer, just replace the GEP
|
||||
// with a bitcast of the real input to the dest type.
|
||||
@@ -2051,7 +2041,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
|
||||
Value *Cond = SI.getCondition();
|
||||
unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
computeKnownBits(Cond, KnownZero, KnownOne);
|
||||
computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
|
||||
unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
|
||||
unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
|
||||
|
||||
@@ -2070,8 +2060,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
|
||||
// x86 generates redundant zero-extenstion instructions if the operand is
|
||||
// truncated to i8 or i16.
|
||||
bool TruncCond = false;
|
||||
if (DL && BitWidth > NewWidth &&
|
||||
NewWidth >= DL->getLargestLegalIntTypeSize()) {
|
||||
if (BitWidth > NewWidth && NewWidth >= DL.getLargestLegalIntTypeSize()) {
|
||||
TruncCond = true;
|
||||
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
|
||||
Builder->SetInsertPoint(&SI);
|
||||
@@ -2632,7 +2621,7 @@ bool InstCombiner::run() {
|
||||
}
|
||||
|
||||
// Instruction isn't dead, see if we can constant propagate it.
|
||||
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
|
||||
if (!I->use_empty() && isa<Constant>(I->getOperand(0))) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
|
||||
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
|
||||
|
||||
@@ -2643,6 +2632,7 @@ bool InstCombiner::run() {
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// See if we can trivially sink this instruction to a successor basic block.
|
||||
if (I->hasOneUse()) {
|
||||
@@ -2756,10 +2746,9 @@ bool InstCombiner::run() {
|
||||
/// many instructions are dead or constant). Additionally, if we find a branch
|
||||
/// whose condition is a known constant, we only visit the reachable successors.
|
||||
///
|
||||
static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
SmallPtrSetImpl<BasicBlock*> &Visited,
|
||||
static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
|
||||
SmallPtrSetImpl<BasicBlock *> &Visited,
|
||||
InstCombineWorklist &ICWorklist,
|
||||
const DataLayout *DL,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
bool MadeIRChange = false;
|
||||
SmallVector<BasicBlock*, 256> Worklist;
|
||||
@@ -2797,23 +2786,22 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (DL) {
|
||||
// See if we can constant fold its operands.
|
||||
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
|
||||
i != e; ++i) {
|
||||
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
|
||||
if (CE == nullptr) continue;
|
||||
// See if we can constant fold its operands.
|
||||
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); i != e;
|
||||
++i) {
|
||||
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
|
||||
if (CE == nullptr)
|
||||
continue;
|
||||
|
||||
Constant*& FoldRes = FoldedConstants[CE];
|
||||
if (!FoldRes)
|
||||
FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
|
||||
if (!FoldRes)
|
||||
FoldRes = CE;
|
||||
Constant *&FoldRes = FoldedConstants[CE];
|
||||
if (!FoldRes)
|
||||
FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
|
||||
if (!FoldRes)
|
||||
FoldRes = CE;
|
||||
|
||||
if (FoldRes != CE) {
|
||||
*i = FoldRes;
|
||||
MadeIRChange = true;
|
||||
}
|
||||
if (FoldRes != CE) {
|
||||
*i = FoldRes;
|
||||
MadeIRChange = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2867,7 +2855,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
///
|
||||
/// This also does basic constant propagation and other forward fixing to make
|
||||
/// the combiner itself run much faster.
|
||||
static bool prepareICWorklistFromFunction(Function &F, const DataLayout *DL,
|
||||
static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
|
||||
TargetLibraryInfo *TLI,
|
||||
InstCombineWorklist &ICWorklist) {
|
||||
bool MadeIRChange = false;
|
||||
@@ -2877,7 +2865,7 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout *DL,
|
||||
// track of which blocks we visit.
|
||||
SmallPtrSet<BasicBlock *, 64> Visited;
|
||||
MadeIRChange |=
|
||||
AddReachableCodeToWorklist(F.begin(), Visited, ICWorklist, DL, TLI);
|
||||
AddReachableCodeToWorklist(F.begin(), DL, Visited, ICWorklist, TLI);
|
||||
|
||||
// Do a quick scan over the function. If we find any blocks that are
|
||||
// unreachable, remove any instructions inside of them. This prevents
|
||||
@@ -2916,12 +2904,12 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
|
||||
DominatorTree &DT, LoopInfo *LI = nullptr) {
|
||||
// Minimizing size?
|
||||
bool MinimizeSize = F.hasFnAttribute(Attribute::MinSize);
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
auto &DL = F.getParent()->getDataLayout();
|
||||
|
||||
/// Builder - This is an IRBuilder that automatically inserts new
|
||||
/// instructions into the worklist when they are created.
|
||||
IRBuilder<true, TargetFolder, InstCombineIRInserter> Builder(
|
||||
F.getContext(), TargetFolder(&DL), InstCombineIRInserter(Worklist, &AC));
|
||||
F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
|
||||
|
||||
// Lower dbg.declare intrinsics otherwise their value may be clobbered
|
||||
// by instcombiner.
|
||||
@@ -2935,10 +2923,10 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
|
||||
<< F.getName() << "\n");
|
||||
|
||||
bool Changed = false;
|
||||
if (prepareICWorklistFromFunction(F, &DL, &TLI, Worklist))
|
||||
if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))
|
||||
Changed = true;
|
||||
|
||||
InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, &DL, LI);
|
||||
InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, DL, LI);
|
||||
if (IC.run())
|
||||
Changed = true;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user