mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-05 01:31:05 +00:00
Change GetPointerBaseWithConstantOffset's DataLayout argument from a
reference to a pointer, so that it can handle the case where DataLayout is not available and behave conservatively. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174024 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
8ed701da9a
commit
a070d2a035
@ -117,10 +117,10 @@ namespace llvm {
|
||||
/// it can be expressed as a base pointer plus a constant offset. Return the
|
||||
/// base and offset to the caller.
|
||||
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
|
||||
const DataLayout &TD);
|
||||
const DataLayout *TD);
|
||||
static inline const Value *
|
||||
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
|
||||
const DataLayout &TD) {
|
||||
const DataLayout *TD) {
|
||||
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
|
||||
}
|
||||
|
||||
|
@ -412,51 +412,49 @@ void Lint::visitMemoryReference(Instruction &I,
|
||||
}
|
||||
|
||||
// Check for buffer overflows and misalignment.
|
||||
if (TD) {
|
||||
// Only handles memory references that read/write something simple like an
|
||||
// alloca instruction or a global variable.
|
||||
int64_t Offset = 0;
|
||||
if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) {
|
||||
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
|
||||
// something we can handle and if so extract the size of this base object
|
||||
// along with its alignment.
|
||||
uint64_t BaseSize = AliasAnalysis::UnknownSize;
|
||||
unsigned BaseAlign = 0;
|
||||
// Only handles memory references that read/write something simple like an
|
||||
// alloca instruction or a global variable.
|
||||
int64_t Offset = 0;
|
||||
if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, TD)) {
|
||||
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
|
||||
// something we can handle and if so extract the size of this base object
|
||||
// along with its alignment.
|
||||
uint64_t BaseSize = AliasAnalysis::UnknownSize;
|
||||
unsigned BaseAlign = 0;
|
||||
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
|
||||
Type *ATy = AI->getAllocatedType();
|
||||
if (!AI->isArrayAllocation() && ATy->isSized())
|
||||
BaseSize = TD->getTypeAllocSize(ATy);
|
||||
BaseAlign = AI->getAlignment();
|
||||
if (BaseAlign == 0 && ATy->isSized())
|
||||
BaseAlign = TD->getABITypeAlignment(ATy);
|
||||
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
|
||||
// If the global may be defined differently in another compilation unit
|
||||
// then don't warn about funky memory accesses.
|
||||
if (GV->hasDefinitiveInitializer()) {
|
||||
Type *GTy = GV->getType()->getElementType();
|
||||
if (GTy->isSized())
|
||||
BaseSize = TD->getTypeAllocSize(GTy);
|
||||
BaseAlign = GV->getAlignment();
|
||||
if (BaseAlign == 0 && GTy->isSized())
|
||||
BaseAlign = TD->getABITypeAlignment(GTy);
|
||||
}
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
|
||||
Type *ATy = AI->getAllocatedType();
|
||||
if (TD && !AI->isArrayAllocation() && ATy->isSized())
|
||||
BaseSize = TD->getTypeAllocSize(ATy);
|
||||
BaseAlign = AI->getAlignment();
|
||||
if (TD && BaseAlign == 0 && ATy->isSized())
|
||||
BaseAlign = TD->getABITypeAlignment(ATy);
|
||||
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
|
||||
// If the global may be defined differently in another compilation unit
|
||||
// then don't warn about funky memory accesses.
|
||||
if (GV->hasDefinitiveInitializer()) {
|
||||
Type *GTy = GV->getType()->getElementType();
|
||||
if (TD && GTy->isSized())
|
||||
BaseSize = TD->getTypeAllocSize(GTy);
|
||||
BaseAlign = GV->getAlignment();
|
||||
if (TD && BaseAlign == 0 && GTy->isSized())
|
||||
BaseAlign = TD->getABITypeAlignment(GTy);
|
||||
}
|
||||
|
||||
// Accesses from before the start or after the end of the object are not
|
||||
// defined.
|
||||
Assert1(Size == AliasAnalysis::UnknownSize ||
|
||||
BaseSize == AliasAnalysis::UnknownSize ||
|
||||
(Offset >= 0 && Offset + Size <= BaseSize),
|
||||
"Undefined behavior: Buffer overflow", &I);
|
||||
|
||||
// Accesses that say that the memory is more aligned than it is are not
|
||||
// defined.
|
||||
if (Align == 0 && Ty && Ty->isSized())
|
||||
Align = TD->getABITypeAlignment(Ty);
|
||||
Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
|
||||
"Undefined behavior: Memory reference address is misaligned", &I);
|
||||
}
|
||||
|
||||
// Accesses from before the start or after the end of the object are not
|
||||
// defined.
|
||||
Assert1(Size == AliasAnalysis::UnknownSize ||
|
||||
BaseSize == AliasAnalysis::UnknownSize ||
|
||||
(Offset >= 0 && Offset + Size <= BaseSize),
|
||||
"Undefined behavior: Buffer overflow", &I);
|
||||
|
||||
// Accesses that say that the memory is more aligned than it is are not
|
||||
// defined.
|
||||
if (TD && Align == 0 && Ty && Ty->isSized())
|
||||
Align = TD->getABITypeAlignment(Ty);
|
||||
Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
|
||||
"Undefined behavior: Memory reference address is misaligned", &I);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,8 +57,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
|
||||
unsigned Align, const DataLayout *TD) {
|
||||
int64_t ByteOffset = 0;
|
||||
Value *Base = V;
|
||||
if (TD)
|
||||
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, *TD);
|
||||
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, TD);
|
||||
|
||||
if (ByteOffset < 0) // out of bounds
|
||||
return false;
|
||||
|
@ -262,7 +262,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
|
||||
|
||||
// If we haven't already computed the base/offset of MemLoc, do so now.
|
||||
if (MemLocBase == 0)
|
||||
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, *TD);
|
||||
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
|
||||
|
||||
unsigned Size = MemoryDependenceAnalysis::
|
||||
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
|
||||
@ -287,7 +287,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
// Get the base of this load.
|
||||
int64_t LIOffs = 0;
|
||||
const Value *LIBase =
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, TD);
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
|
||||
|
||||
// If the two pointers are not based on the same pointer, we can't tell that
|
||||
// they are related.
|
||||
|
@ -1671,8 +1671,10 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
|
||||
/// it can be expressed as a base pointer plus a constant offset. Return the
|
||||
/// base and offset to the caller.
|
||||
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
|
||||
const DataLayout &TD) {
|
||||
unsigned BitWidth = TD.getPointerSizeInBits();
|
||||
const DataLayout *TD) {
|
||||
// Without DataLayout, conservatively assume 64-bit offsets, which is
|
||||
// the widest we support.
|
||||
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
|
||||
APInt ByteOffset(BitWidth, 0);
|
||||
while (1) {
|
||||
if (Ptr->getType()->isVectorTy())
|
||||
@ -1680,7 +1682,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
|
||||
|
||||
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
||||
APInt GEPOffset(BitWidth, 0);
|
||||
if (!GEP->accumulateConstantOffset(TD, GEPOffset))
|
||||
if (TD && !GEP->accumulateConstantOffset(*TD, GEPOffset))
|
||||
break;
|
||||
ByteOffset += GEPOffset;
|
||||
Ptr = GEP->getPointerOperand();
|
||||
|
@ -376,10 +376,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
|
||||
// Check to see if the later store is to the entire object (either a global,
|
||||
// an alloca, or a byval argument). If so, then it clearly overwrites any
|
||||
// other store to the same object.
|
||||
const DataLayout &TD = *AA.getDataLayout();
|
||||
const DataLayout *TD = AA.getDataLayout();
|
||||
|
||||
const Value *UO1 = GetUnderlyingObject(P1, &TD),
|
||||
*UO2 = GetUnderlyingObject(P2, &TD);
|
||||
const Value *UO1 = GetUnderlyingObject(P1, TD),
|
||||
*UO2 = GetUnderlyingObject(P2, TD);
|
||||
|
||||
// If we can't resolve the same pointers to the same object, then we can't
|
||||
// analyze them at all.
|
||||
|
@ -849,8 +849,8 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
|
||||
return -1;
|
||||
|
||||
int64_t StoreOffset = 0, LoadOffset = 0;
|
||||
Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
|
||||
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
|
||||
Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);
|
||||
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD);
|
||||
if (StoreBase != LoadBase)
|
||||
return -1;
|
||||
|
||||
@ -945,7 +945,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
|
||||
// then we should widen it!
|
||||
int64_t LoadOffs = 0;
|
||||
const Value *LoadBase =
|
||||
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD);
|
||||
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);
|
||||
unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
|
||||
|
||||
unsigned Size = MemoryDependenceAnalysis::
|
||||
|
@ -192,10 +192,10 @@ TEST(InstructionsTest, VectorGep) {
|
||||
"2:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80"
|
||||
":128:128-n8:16:32:64-S128");
|
||||
// Make sure we don't crash
|
||||
GetPointerBaseWithConstantOffset(Gep0, Offset, TD);
|
||||
GetPointerBaseWithConstantOffset(Gep1, Offset, TD);
|
||||
GetPointerBaseWithConstantOffset(Gep2, Offset, TD);
|
||||
GetPointerBaseWithConstantOffset(Gep3, Offset, TD);
|
||||
GetPointerBaseWithConstantOffset(Gep0, Offset, &TD);
|
||||
GetPointerBaseWithConstantOffset(Gep1, Offset, &TD);
|
||||
GetPointerBaseWithConstantOffset(Gep2, Offset, &TD);
|
||||
GetPointerBaseWithConstantOffset(Gep3, Offset, &TD);
|
||||
|
||||
// Gep of Geps
|
||||
GetElementPtrInst *GepII0 = GetElementPtrInst::Create(Gep0, C2xi32b);
|
||||
|
Loading…
x
Reference in New Issue
Block a user