mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-27 13:30:05 +00:00
[opaque pointer type] More GEP API migrations in IRBuilder uses
The plan here is to push the API changes out from the common components (like Constant::getGetElementPtr and IRBuilder::CreateGEP related functions) and just update callers to either pass the type if it's obvious, or pass null. Do this with LoadInst as well and anything else that comes up, then to start porting specific uses to not pass null anymore - this may require some refactoring in each case. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@234042 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
675e22e7ad
commit
cf57d81b6e
@ -1055,12 +1055,12 @@ public:
|
||||
return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
|
||||
return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
|
||||
}
|
||||
Value *CreateInBoundsGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
|
||||
Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
|
||||
const Twine &Name = "") {
|
||||
if (Constant *PC = dyn_cast<Constant>(Ptr))
|
||||
if (Constant *IC = dyn_cast<Constant>(Idx))
|
||||
return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, IC),
|
||||
Name);
|
||||
return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
|
||||
return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
|
||||
return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
|
||||
}
|
||||
Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
|
||||
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
|
||||
|
@ -524,7 +524,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
}
|
||||
|
||||
// Emit a GEP.
|
||||
Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
|
||||
Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
|
||||
rememberInstruction(GEP);
|
||||
|
||||
return GEP;
|
||||
|
@ -84,9 +84,11 @@ static void convertTransferToLoop(
|
||||
ind->addIncoming(ConstantInt::get(indType, 0), origBB);
|
||||
|
||||
// load from srcAddr+ind
|
||||
Value *val = loop.CreateLoad(loop.CreateGEP(srcAddr, ind), srcVolatile);
|
||||
Value *val = loop.CreateLoad(loop.CreateGEP(loop.getInt8Ty(), srcAddr, ind),
|
||||
srcVolatile);
|
||||
// store at dstAddr+ind
|
||||
loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), dstVolatile);
|
||||
loop.CreateStore(val, loop.CreateGEP(loop.getInt8Ty(), dstAddr, ind),
|
||||
dstVolatile);
|
||||
|
||||
// The value for ind coming from backedge is (ind + 1)
|
||||
Value *newind = loop.CreateAdd(ind, ConstantInt::get(indType, 1));
|
||||
@ -116,7 +118,7 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr,
|
||||
PHINode *ind = loop.CreatePHI(len->getType(), 0);
|
||||
ind->addIncoming(ConstantInt::get(len->getType(), 0), origBB);
|
||||
|
||||
loop.CreateStore(val, loop.CreateGEP(dstAddr, ind), false);
|
||||
loop.CreateStore(val, loop.CreateGEP(val->getType(), dstAddr, ind), false);
|
||||
|
||||
Value *newind = loop.CreateAdd(ind, ConstantInt::get(len->getType(), 1));
|
||||
ind->addIncoming(newind, loopBB);
|
||||
|
@ -396,16 +396,17 @@ Value *LowerBitSets::createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI,
|
||||
}
|
||||
|
||||
Constant *ByteArray = BAI->ByteArray;
|
||||
Type *Ty = BAI->ByteArray->getValueType();
|
||||
if (!LinkerSubsectionsViaSymbols && AvoidReuse) {
|
||||
// Each use of the byte array uses a different alias. This makes the
|
||||
// backend less likely to reuse previously computed byte array addresses,
|
||||
// improving the security of the CFI mechanism based on this pass.
|
||||
ByteArray = GlobalAlias::create(
|
||||
BAI->ByteArray->getType()->getElementType(), 0,
|
||||
GlobalValue::PrivateLinkage, "bits_use", ByteArray, M);
|
||||
ByteArray = GlobalAlias::create(BAI->ByteArray->getValueType(), 0,
|
||||
GlobalValue::PrivateLinkage, "bits_use",
|
||||
ByteArray, M);
|
||||
}
|
||||
|
||||
Value *ByteAddr = B.CreateGEP(ByteArray, BitOffset);
|
||||
Value *ByteAddr = B.CreateGEP(Ty, ByteArray, BitOffset);
|
||||
Value *Byte = B.CreateLoad(ByteAddr);
|
||||
|
||||
Value *ByteAndMask = B.CreateAnd(Byte, BAI->Mask);
|
||||
|
@ -1075,8 +1075,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
|
||||
}
|
||||
case 2: {
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowAddr1 =
|
||||
IRB.CreateGEP(ShadowAddr, ConstantInt::get(DFS.IntptrTy, 1));
|
||||
Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
|
||||
ConstantInt::get(DFS.IntptrTy, 1));
|
||||
return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
|
||||
IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
|
||||
}
|
||||
@ -1127,7 +1127,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
|
||||
BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
|
||||
DT.addNewBlock(NextBB, LastBr->getParent());
|
||||
IRBuilder<> NextIRB(NextBB);
|
||||
WideAddr = NextIRB.CreateGEP(WideAddr, ConstantInt::get(DFS.IntptrTy, 1));
|
||||
WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
|
||||
ConstantInt::get(DFS.IntptrTy, 1));
|
||||
Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
|
||||
ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
|
||||
LastBr->setSuccessor(0, NextBB);
|
||||
|
@ -925,7 +925,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
|
||||
Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty());
|
||||
Arg = std::next(Fn->arg_begin());
|
||||
Arg->setName("counters");
|
||||
Value *GEP = Builder.CreateGEP(Arg, ZExtPred);
|
||||
Value *GEP = Builder.CreateGEP(Type::getInt64PtrTy(*Ctx), Arg, ZExtPred);
|
||||
Value *Counter = Builder.CreateLoad(GEP, "counter");
|
||||
Cond = Builder.CreateICmpEQ(Counter,
|
||||
Constant::getNullValue(
|
||||
|
@ -1637,7 +1637,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
||||
&& "unit stride pointer IV must be i8*");
|
||||
|
||||
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
||||
return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit");
|
||||
return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit");
|
||||
}
|
||||
else {
|
||||
// In any other case, convert both IVInit and IVCount to integers before
|
||||
|
@ -2135,7 +2135,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
|
||||
// split the alloca again later.
|
||||
unsigned AS = AI->getType()->getAddressSpace();
|
||||
Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS));
|
||||
V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
|
||||
V = Builder.CreateGEP(Builder.getInt8Ty(), V, Builder.getInt64(NewOffset));
|
||||
|
||||
IdxTy = NewElts[Idx]->getAllocatedType();
|
||||
uint64_t EltSize = DL.getTypeAllocSize(IdxTy) - NewOffset;
|
||||
|
@ -757,14 +757,16 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
|
||||
}
|
||||
}
|
||||
// Create an ugly GEP with a single index for each index.
|
||||
ResultPtr = Builder.CreateGEP(ResultPtr, Idx, "uglygep");
|
||||
ResultPtr =
|
||||
Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
|
||||
}
|
||||
}
|
||||
|
||||
// Create a GEP with the constant offset index.
|
||||
if (AccumulativeByteOffset != 0) {
|
||||
Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
|
||||
ResultPtr = Builder.CreateGEP(ResultPtr, Offset, "uglygep");
|
||||
ResultPtr =
|
||||
Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
|
||||
}
|
||||
if (ResultPtr->getType() != Variadic->getType())
|
||||
ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
|
||||
|
@ -482,7 +482,7 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis(
|
||||
if (InBounds)
|
||||
Reduced = Builder.CreateInBoundsGEP(Reduced, Bump);
|
||||
else
|
||||
Reduced = Builder.CreateGEP(Reduced, Bump);
|
||||
Reduced = Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump);
|
||||
Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType());
|
||||
} else {
|
||||
// C = gep Basis, Bump
|
||||
@ -491,7 +491,7 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis(
|
||||
if (InBounds)
|
||||
Reduced = Builder.CreateInBoundsGEP(Basis.Ins, Bump);
|
||||
else
|
||||
Reduced = Builder.CreateGEP(Basis.Ins, Bump);
|
||||
Reduced = Builder.CreateGEP(nullptr, Basis.Ins, Bump);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -686,7 +686,7 @@ public:
|
||||
Index = B.CreateNeg(Index);
|
||||
else if (!StepValue->isOne())
|
||||
Index = B.CreateMul(Index, StepValue);
|
||||
return B.CreateGEP(StartValue, Index);
|
||||
return B.CreateGEP(nullptr, StartValue, Index);
|
||||
|
||||
case IK_NoInduction:
|
||||
return nullptr;
|
||||
@ -1839,7 +1839,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
||||
|
||||
for (unsigned Part = 0; Part < UF; ++Part) {
|
||||
// Calculate the pointer for the specific unroll-part.
|
||||
Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF));
|
||||
Value *PartPtr =
|
||||
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
|
||||
|
||||
if (Reverse) {
|
||||
// If we store to reverse consecutive memory locations then we need
|
||||
@ -1847,8 +1848,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
||||
StoredVal[Part] = reverseVector(StoredVal[Part]);
|
||||
// If the address is consecutive but reversed, then the
|
||||
// wide store needs to start at the last vector element.
|
||||
PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
|
||||
PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
|
||||
PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
|
||||
PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
|
||||
Mask[Part] = reverseVector(Mask[Part]);
|
||||
}
|
||||
|
||||
@ -1871,13 +1872,14 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
||||
setDebugLocFromInst(Builder, LI);
|
||||
for (unsigned Part = 0; Part < UF; ++Part) {
|
||||
// Calculate the pointer for the specific unroll-part.
|
||||
Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF));
|
||||
Value *PartPtr =
|
||||
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
|
||||
|
||||
if (Reverse) {
|
||||
// If the address is consecutive but reversed, then the
|
||||
// wide load needs to start at the last vector element.
|
||||
PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
|
||||
PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
|
||||
PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
|
||||
PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
|
||||
Mask[Part] = reverseVector(Mask[Part]);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user