mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-29 10:32:47 +00:00
Switch to a signed representation for the dynamic offsets while walking
across the uses of the alloca. It's entirely possible for negative numbers to come up here, and in some rare cases simply doing the 2's complement arithmetic isn't the correct decision. Notably, we can't zext the index of the GEP. The definition of GEP is that these offsets are sign extended or truncated to the size of the pointer, and then wrapping 2's complement arithmetic used. This patch fixes an issue that comes up with *no* input from the buildbots or bootstrap afaict. The only place where it manifested, disturbingly, is Clang's own regression test suite. A reduced and targeted collection of tests are added to cope with this. Note that I've tried to pin down the potential cases of overflow, but may have missed some cases. I've tried to add a few cases to test this, but its hard because LLVM has quite limited support for >64bit constructs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164475 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
85aa4f6eee
commit
02e92a0b5d
@ -403,15 +403,15 @@ protected:
|
||||
|
||||
struct OffsetUse {
|
||||
Use *U;
|
||||
uint64_t Offset;
|
||||
int64_t Offset;
|
||||
};
|
||||
SmallVector<OffsetUse, 8> Queue;
|
||||
|
||||
// The active offset and use while visiting.
|
||||
Use *U;
|
||||
uint64_t Offset;
|
||||
int64_t Offset;
|
||||
|
||||
void enqueueUsers(Instruction &I, uint64_t UserOffset) {
|
||||
void enqueueUsers(Instruction &I, int64_t UserOffset) {
|
||||
SmallPtrSet<User *, 8> UserSet;
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
|
||||
UI != UE; ++UI) {
|
||||
@ -423,7 +423,7 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
bool computeConstantGEPOffset(GetElementPtrInst &GEPI, uint64_t &GEPOffset) {
|
||||
bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
|
||||
GEPOffset = Offset;
|
||||
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
|
||||
GTI != GTE; ++GTI) {
|
||||
@ -437,12 +437,37 @@ protected:
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
unsigned ElementIdx = OpC->getZExtValue();
|
||||
const StructLayout *SL = TD.getStructLayout(STy);
|
||||
GEPOffset += SL->getElementOffset(ElementIdx);
|
||||
uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
|
||||
// Check that we can continue to model this GEP in a signed 64-bit offset.
|
||||
if (ElementOffset > INT64_MAX ||
|
||||
(GEPOffset >= 0 &&
|
||||
((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
|
||||
DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
|
||||
<< "what can be represented in an int64_t!\n"
|
||||
<< " alloca: " << P.AI << "\n");
|
||||
return false;
|
||||
}
|
||||
if (GEPOffset < 0)
|
||||
GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
|
||||
else
|
||||
GEPOffset += ElementOffset;
|
||||
continue;
|
||||
}
|
||||
|
||||
GEPOffset
|
||||
+= OpC->getZExtValue() * TD.getTypeAllocSize(GTI.getIndexedType());
|
||||
APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
|
||||
Index *= APInt(Index.getBitWidth(),
|
||||
TD.getTypeAllocSize(GTI.getIndexedType()));
|
||||
Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
|
||||
/*isSigned*/true);
|
||||
// Check if the result can be stored in our int64_t offset.
|
||||
if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
|
||||
DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
|
||||
<< "what can be represented in an int64_t!\n"
|
||||
<< " alloca: " << P.AI << "\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
GEPOffset = Index.getSExtValue();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -495,12 +520,11 @@ private:
|
||||
return false;
|
||||
}
|
||||
|
||||
void insertUse(Instruction &I, uint64_t Offset, uint64_t Size,
|
||||
void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
|
||||
bool IsSplittable = false) {
|
||||
uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
|
||||
|
||||
// Completely skip uses which start outside of the allocation.
|
||||
if (BeginOffset >= AllocSize) {
|
||||
// Completely skip uses which don't overlap the allocation.
|
||||
if ((Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
|
||||
(Offset < 0 && (uint64_t)-Offset >= Size)) {
|
||||
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
|
||||
<< " which starts past the end of the " << AllocSize
|
||||
<< " byte alloca:\n"
|
||||
@ -509,8 +533,22 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
// Clamp the size to the allocation.
|
||||
if (EndOffset > AllocSize) {
|
||||
// Clamp the start to the beginning of the allocation.
|
||||
if (Offset < 0) {
|
||||
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
|
||||
<< " to start at the beginning of the alloca:\n"
|
||||
<< " alloca: " << P.AI << "\n"
|
||||
<< " use: " << I << "\n");
|
||||
Size -= (uint64_t)-Offset;
|
||||
Offset = 0;
|
||||
}
|
||||
|
||||
uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
|
||||
|
||||
// Clamp the end offset to the end of the allocation. Note that this is
|
||||
// formulated to handle even the case where "BeginOffset + Size" overflows.
|
||||
assert(AllocSize >= BeginOffset); // Established above.
|
||||
if (Size > AllocSize - BeginOffset) {
|
||||
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
|
||||
<< " to remain within the " << AllocSize << " byte alloca:\n"
|
||||
<< " alloca: " << P.AI << "\n"
|
||||
@ -530,7 +568,7 @@ private:
|
||||
P.Partitions.push_back(New);
|
||||
}
|
||||
|
||||
bool handleLoadOrStore(Type *Ty, Instruction &I, uint64_t Offset) {
|
||||
bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
|
||||
uint64_t Size = TD.getTypeStoreSize(Ty);
|
||||
|
||||
// If this memory access can be shown to *statically* extend outside the
|
||||
@ -540,7 +578,8 @@ private:
|
||||
// risk of overflow.
|
||||
// FIXME: We should instead consider the pointer to have escaped if this
|
||||
// function is being instrumented for addressing bugs or race conditions.
|
||||
if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize) {
|
||||
if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
|
||||
Size > (AllocSize - (uint64_t)Offset)) {
|
||||
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
|
||||
<< (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
|
||||
<< " which extends past the end of the " << AllocSize
|
||||
@ -560,7 +599,7 @@ private:
|
||||
}
|
||||
|
||||
bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
|
||||
uint64_t GEPOffset;
|
||||
int64_t GEPOffset;
|
||||
if (!computeConstantGEPOffset(GEPI, GEPOffset))
|
||||
return markAsEscaping(GEPI);
|
||||
|
||||
@ -784,16 +823,25 @@ private:
|
||||
P.DeadUsers.push_back(&I);
|
||||
}
|
||||
|
||||
void insertUse(Instruction &User, uint64_t Offset, uint64_t Size) {
|
||||
uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
|
||||
|
||||
void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
|
||||
// If the use extends outside of the allocation, record it as a dead use
|
||||
// for elimination later.
|
||||
if (BeginOffset >= AllocSize || Size == 0)
|
||||
if ((uint64_t)Offset >= AllocSize ||
|
||||
(Offset < 0 && (uint64_t)-Offset >= Size))
|
||||
return markAsDead(User);
|
||||
|
||||
// Bound the use by the size of the allocation.
|
||||
if (EndOffset > AllocSize)
|
||||
// Clamp the start to the beginning of the allocation.
|
||||
if (Offset < 0) {
|
||||
Size -= (uint64_t)-Offset;
|
||||
Offset = 0;
|
||||
}
|
||||
|
||||
uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
|
||||
|
||||
// Clamp the end offset to the end of the allocation. Note that this is
|
||||
// formulated to handle even the case where "BeginOffset + Size" overflows.
|
||||
assert(AllocSize >= BeginOffset); // Established above.
|
||||
if (Size > AllocSize - BeginOffset)
|
||||
EndOffset = AllocSize;
|
||||
|
||||
// NB: This only works if we have zero overlapping partitions.
|
||||
@ -812,14 +860,15 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void handleLoadOrStore(Type *Ty, Instruction &I, uint64_t Offset) {
|
||||
void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
|
||||
uint64_t Size = TD.getTypeStoreSize(Ty);
|
||||
|
||||
// If this memory access can be shown to *statically* extend outside the
|
||||
// bounds of of the allocation, it's behavior is undefined, so simply
|
||||
// ignore it. Note that this is more strict than the generic clamping
|
||||
// behavior of insertUse.
|
||||
if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize)
|
||||
if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
|
||||
Size > (AllocSize - (uint64_t)Offset))
|
||||
return markAsDead(I);
|
||||
|
||||
insertUse(I, Offset, Size);
|
||||
@ -836,7 +885,7 @@ private:
|
||||
if (GEPI.use_empty())
|
||||
return markAsDead(GEPI);
|
||||
|
||||
uint64_t GEPOffset;
|
||||
int64_t GEPOffset;
|
||||
if (!computeConstantGEPOffset(GEPI, GEPOffset))
|
||||
llvm_unreachable("Unable to compute constant offset for use");
|
||||
|
||||
|
@ -774,3 +774,61 @@ entry:
|
||||
%val = load i64* %gep
|
||||
ret i32 undef
|
||||
}
|
||||
|
||||
define i32 @test20() {
|
||||
; Ensure we can track negative offsets (before the beginning of the alloca) and
|
||||
; negative relative offsets from offsets starting past the end of the alloca.
|
||||
; CHECK: @test20
|
||||
; CHECK-NOT: alloca
|
||||
; CHECK: %[[sum1:.*]] = add i32 1, 2
|
||||
; CHECK: %[[sum2:.*]] = add i32 %[[sum1]], 3
|
||||
; CHECK: ret i32 %[[sum2]]
|
||||
|
||||
entry:
|
||||
%a = alloca [3 x i32]
|
||||
%gep1 = getelementptr [3 x i32]* %a, i32 0, i32 0
|
||||
store i32 1, i32* %gep1
|
||||
%gep2.1 = getelementptr [3 x i32]* %a, i32 0, i32 -2
|
||||
%gep2.2 = getelementptr i32* %gep2.1, i32 3
|
||||
store i32 2, i32* %gep2.2
|
||||
%gep3.1 = getelementptr [3 x i32]* %a, i32 0, i32 14
|
||||
%gep3.2 = getelementptr i32* %gep3.1, i32 -12
|
||||
store i32 3, i32* %gep3.2
|
||||
|
||||
%load1 = load i32* %gep1
|
||||
%load2 = load i32* %gep2.2
|
||||
%load3 = load i32* %gep3.2
|
||||
%sum1 = add i32 %load1, %load2
|
||||
%sum2 = add i32 %sum1, %load3
|
||||
ret i32 %sum2
|
||||
}
|
||||
|
||||
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
|
||||
|
||||
define i8 @test21() {
|
||||
; Test allocations and offsets which border on overflow of the int64_t used
|
||||
; internally. This is really awkward to really test as LLVM doesn't really
|
||||
; support such extreme constructs cleanly.
|
||||
; CHECK: @test21
|
||||
; CHECK-NOT: alloca
|
||||
; CHECK: or i8 -1, -1
|
||||
|
||||
entry:
|
||||
%a = alloca [2305843009213693951 x i8]
|
||||
%gep0 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
|
||||
store i8 255, i8* %gep0
|
||||
%gep1 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
|
||||
%gep2 = getelementptr i8* %gep1, i64 -1
|
||||
call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
|
||||
%gep3 = getelementptr i8* %gep1, i64 9223372036854775807
|
||||
%gep4 = getelementptr i8* %gep3, i64 9223372036854775807
|
||||
%gep5 = getelementptr i8* %gep4, i64 -6917529027641081857
|
||||
store i8 255, i8* %gep5
|
||||
%cast1 = bitcast i8* %gep4 to i32*
|
||||
store i32 0, i32* %cast1
|
||||
%load = load i8* %gep0
|
||||
%gep6 = getelementptr i8* %gep0, i32 1
|
||||
%load2 = load i8* %gep6
|
||||
%result = or i8 %load, %load2
|
||||
ret i8 %result
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user