diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h index e4652874407..b6994d9c464 100644 --- a/include/llvm/IR/IRBuilder.h +++ b/include/llvm/IR/IRBuilder.h @@ -1055,12 +1055,12 @@ public: return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name); return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); } - Value *CreateInBoundsGEP(Value *Ptr, Value *Idx, const Twine &Name = "") { + Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx, + const Twine &Name = "") { if (Constant *PC = dyn_cast(Ptr)) if (Constant *IC = dyn_cast(Idx)) - return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, IC), - Name); - return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name); + return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name); + return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); } Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") { Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 1bdddebf9c9..61ce513a3d3 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -524,7 +524,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, } // Emit a GEP. - Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); + Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); rememberInstruction(GEP); return GEP; diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index 578401a34ea..32646ee1133 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -84,9 +84,11 @@ static void convertTransferToLoop( ind->addIncoming(ConstantInt::get(indType, 0), origBB); // load from srcAddr+ind - Value *val = loop.CreateLoad(loop.CreateGEP(srcAddr, ind), srcVolatile); + Value *val = loop.CreateLoad(loop.CreateGEP(loop.getInt8Ty(), srcAddr, ind), + srcVolatile); // store at dstAddr+ind - loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), dstVolatile); + loop.CreateStore(val, loop.CreateGEP(loop.getInt8Ty(), dstAddr, ind), + dstVolatile); // The value for ind coming from backedge is (ind + 1) Value *newind = loop.CreateAdd(ind, ConstantInt::get(indType, 1)); @@ -116,7 +118,7 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr, PHINode *ind = loop.CreatePHI(len->getType(), 0); ind->addIncoming(ConstantInt::get(len->getType(), 0), origBB); - loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), false); + loop.CreateStore(val, loop.CreateGEP(val->getType(), dstAddr, ind), false); Value *newind = loop.CreateAdd(ind, ConstantInt::get(len->getType(), 1)); ind->addIncoming(newind, loopBB); diff --git a/lib/Transforms/IPO/LowerBitSets.cpp b/lib/Transforms/IPO/LowerBitSets.cpp index fae75a8ac41..f3f8529911c 100644 --- a/lib/Transforms/IPO/LowerBitSets.cpp +++ b/lib/Transforms/IPO/LowerBitSets.cpp @@ -396,16 +396,17 @@ Value *LowerBitSets::createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI, } Constant *ByteArray = BAI->ByteArray; + Type *Ty = BAI->ByteArray->getValueType(); if (!LinkerSubsectionsViaSymbols && AvoidReuse) { // Each use of the byte array uses a different alias. This makes the // backend less likely to reuse previously computed byte array addresses, // improving the security of the CFI mechanism based on this pass. - ByteArray = GlobalAlias::create( - BAI->ByteArray->getType()->getElementType(), 0, - GlobalValue::PrivateLinkage, "bits_use", ByteArray, M); + ByteArray = GlobalAlias::create(BAI->ByteArray->getValueType(), 0, + GlobalValue::PrivateLinkage, "bits_use", + ByteArray, M); } - Value *ByteAddr = B.CreateGEP(ByteArray, BitOffset); + Value *ByteAddr = B.CreateGEP(Ty, ByteArray, BitOffset); Value *Byte = B.CreateLoad(ByteAddr); Value *ByteAndMask = B.CreateAnd(Byte, BAI->Mask); diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index b3925ee152f..dfa501da2da 100644 --- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1075,8 +1075,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, } case 2: { IRBuilder<> IRB(Pos); - Value *ShadowAddr1 = - IRB.CreateGEP(ShadowAddr, ConstantInt::get(DFS.IntptrTy, 1)); + Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr, + ConstantInt::get(DFS.IntptrTy, 1)); return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign), IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos); } @@ -1127,7 +1127,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align, BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F); DT.addNewBlock(NextBB, LastBr->getParent()); IRBuilder<> NextIRB(NextBB); - WideAddr = NextIRB.CreateGEP(WideAddr, ConstantInt::get(DFS.IntptrTy, 1)); + WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr, + ConstantInt::get(DFS.IntptrTy, 1)); Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign); ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow); LastBr->setSuccessor(0, NextBB); diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 7d63d1a57ce..5cb0e84741f 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -925,7 +925,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() { Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty()); Arg = std::next(Fn->arg_begin()); Arg->setName("counters"); - Value *GEP = Builder.CreateGEP(Arg, ZExtPred); + Value *GEP = Builder.CreateGEP(Type::getInt64PtrTy(*Ctx), Arg, ZExtPred); Value *Counter = Builder.CreateLoad(GEP, "counter"); Cond = Builder.CreateICmpEQ(Counter, Constant::getNullValue( diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 51e80416be1..ba44bec0793 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -1637,7 +1637,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, && "unit stride pointer IV must be i8*"); IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); - return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit"); + return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit"); } else { // In any other case, convert both IVInit and IVCount to integers before diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index acd85858906..72eada2d3d9 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -2135,7 +2135,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, // split the alloca again later. unsigned AS = AI->getType()->getAddressSpace(); Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS)); - V = Builder.CreateGEP(V, Builder.getInt64(NewOffset)); + V = Builder.CreateGEP(Builder.getInt8Ty(), V, Builder.getInt64(NewOffset)); IdxTy = NewElts[Idx]->getAllocatedType(); uint64_t EltSize = DL.getTypeAllocSize(IdxTy) - NewOffset; diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 1a04d74ade1..8af47530dbc 100644 --- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -757,14 +757,16 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs( } } // Create an ugly GEP with a single index for each index. - ResultPtr = Builder.CreateGEP(ResultPtr, Idx, "uglygep"); + ResultPtr = + Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep"); } } // Create a GEP with the constant offset index. if (AccumulativeByteOffset != 0) { Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset); - ResultPtr = Builder.CreateGEP(ResultPtr, Offset, "uglygep"); + ResultPtr = + Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep"); } if (ResultPtr->getType() != Variadic->getType()) ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType()); diff --git a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index 940ab58f158..3c1916f923e 100644 --- a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -482,7 +482,7 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis( if (InBounds) Reduced = Builder.CreateInBoundsGEP(Reduced, Bump); else - Reduced = Builder.CreateGEP(Reduced, Bump); + Reduced = Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump); Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType()); } else { // C = gep Basis, Bump @@ -491,7 +491,7 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis( if (InBounds) Reduced = Builder.CreateInBoundsGEP(Basis.Ins, Bump); else - Reduced = Builder.CreateGEP(Basis.Ins, Bump); + Reduced = Builder.CreateGEP(nullptr, Basis.Ins, Bump); } } break; diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 6d260088934..21588568ede 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -686,7 +686,7 @@ public: Index = B.CreateNeg(Index); else if (!StepValue->isOne()) Index = B.CreateMul(Index, StepValue); - return B.CreateGEP(StartValue, Index); + return B.CreateGEP(nullptr, StartValue, Index); case IK_NoInduction: return nullptr; @@ -1839,7 +1839,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { for (unsigned Part = 0; Part < UF; ++Part) { // Calculate the pointer for the specific unroll-part. - Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF)); + Value *PartPtr = + Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); if (Reverse) { // If we store to reverse consecutive memory locations then we need @@ -1847,8 +1848,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { StoredVal[Part] = reverseVector(StoredVal[Part]); // If the address is consecutive but reversed, then the // wide store needs to start at the last vector element. - PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF)); - PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF)); + PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); + PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); Mask[Part] = reverseVector(Mask[Part]); } @@ -1871,13 +1872,14 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { setDebugLocFromInst(Builder, LI); for (unsigned Part = 0; Part < UF; ++Part) { // Calculate the pointer for the specific unroll-part. - Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF)); + Value *PartPtr = + Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); if (Reverse) { // If the address is consecutive but reversed, then the // wide load needs to start at the last vector element. - PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF)); - PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF)); + PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); + PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); Mask[Part] = reverseVector(Mask[Part]); }