mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-07 14:33:15 +00:00
Fix memcheck interval ends for pointers with negative strides
Summary: The checking pointer grouping algorithm assumes that the starts/ends of the pointers are well formed (start <= end). The runtime memory checking algorithm also assumes this by doing: start0 < end1 && start1 < end0 to detect conflicts. This check only works if start0 <= end0 and start1 <= end1. This change correctly orders the interval ends by either checking the stride (if it is constant) or by using min/max SCEV expressions. Reviewers: anemet, rengolin Subscribers: rengolin, llvm-commits Differential Revision: http://reviews.llvm.org/D11149 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242400 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
5649b37b0e
commit
e4877f25bd
@ -127,9 +127,25 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
|
|||||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
|
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
|
||||||
assert(AR && "Invalid addrec expression");
|
assert(AR && "Invalid addrec expression");
|
||||||
const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
|
const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
|
||||||
|
|
||||||
|
const SCEV *ScStart = AR->getStart();
|
||||||
const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
|
const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
|
||||||
Pointers.emplace_back(Ptr, AR->getStart(), ScEnd, WritePtr, DepSetId, ASId,
|
const SCEV *Step = AR->getStepRecurrence(*SE);
|
||||||
Sc);
|
|
||||||
|
// For expressions with negative step, the upper bound is ScStart and the
|
||||||
|
// lower bound is ScEnd.
|
||||||
|
if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) {
|
||||||
|
if (CStep->getValue()->isNegative())
|
||||||
|
std::swap(ScStart, ScEnd);
|
||||||
|
} else {
|
||||||
|
// Fallback case: the step is not constant, but the we can still
|
||||||
|
// get the upper and lower bounds of the interval by using min/max
|
||||||
|
// expressions.
|
||||||
|
ScStart = SE->getUMinExpr(ScStart, ScEnd);
|
||||||
|
ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
|
||||||
|
}
|
||||||
|
|
||||||
|
Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RuntimePointerChecking::needsChecking(
|
bool RuntimePointerChecking::needsChecking(
|
||||||
|
89
test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
Normal file
89
test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
|
||||||
|
|
||||||
|
; The runtime memory check code and the access grouping
|
||||||
|
; algorithm both assume that the start and end values
|
||||||
|
; for an access range are ordered (start <= stop).
|
||||||
|
; When generating checks for accesses with negative stride
|
||||||
|
; we need to take this into account and swap the interval
|
||||||
|
; ends.
|
||||||
|
;
|
||||||
|
; for (i = 0; i < 10000; i++) {
|
||||||
|
; B[i] = A[15000 - i] * 3;
|
||||||
|
; }
|
||||||
|
|
||||||
|
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
|
||||||
|
target triple = "aarch64--linux-gnueabi"
|
||||||
|
|
||||||
|
; CHECK: function 'f':
|
||||||
|
; CHECK: (Low: (20000 + %a) High: (60000 + %a))
|
||||||
|
|
||||||
|
@B = common global i32* null, align 8
|
||||||
|
@A = common global i32* null, align 8
|
||||||
|
|
||||||
|
define void @f() {
|
||||||
|
entry:
|
||||||
|
%a = load i32*, i32** @A, align 8
|
||||||
|
%b = load i32*, i32** @B, align 8
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.body: ; preds = %for.body, %entry
|
||||||
|
%idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
||||||
|
%negidx = sub i64 15000, %idx
|
||||||
|
|
||||||
|
%arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
|
||||||
|
%loadA0 = load i32, i32* %arrayidxA0, align 2
|
||||||
|
|
||||||
|
%res = mul i32 %loadA0, 3
|
||||||
|
|
||||||
|
%add = add nuw nsw i64 %idx, 1
|
||||||
|
|
||||||
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
|
||||||
|
store i32 %res, i32* %arrayidxB, align 2
|
||||||
|
|
||||||
|
%exitcond = icmp eq i64 %idx, 10000
|
||||||
|
br i1 %exitcond, label %for.end, label %for.body
|
||||||
|
|
||||||
|
for.end: ; preds = %for.body
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; CHECK: function 'g':
|
||||||
|
; When the stride is not constant, we are forced to do umin/umax to get
|
||||||
|
; the interval limits.
|
||||||
|
|
||||||
|
; for (i = 0; i < 10000; i++) {
|
||||||
|
; B[i] = A[15000 - step * i] * 3;
|
||||||
|
; }
|
||||||
|
|
||||||
|
; Here it is not obvious what the limits are, since 'step' could be negative.
|
||||||
|
|
||||||
|
; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a)))))
|
||||||
|
; CHECK: High: ((60000 + %a) umax (60000 + (-40000 * %step) + %a))
|
||||||
|
|
||||||
|
define void @g(i64 %step) {
|
||||||
|
entry:
|
||||||
|
%a = load i32*, i32** @A, align 8
|
||||||
|
%b = load i32*, i32** @B, align 8
|
||||||
|
br label %for.body
|
||||||
|
|
||||||
|
for.body: ; preds = %for.body, %entry
|
||||||
|
%idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
||||||
|
%idx_mul = mul i64 %idx, %step
|
||||||
|
%negidx = sub i64 15000, %idx_mul
|
||||||
|
|
||||||
|
%arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
|
||||||
|
%loadA0 = load i32, i32* %arrayidxA0, align 2
|
||||||
|
|
||||||
|
%res = mul i32 %loadA0, 3
|
||||||
|
|
||||||
|
%add = add nuw nsw i64 %idx, 1
|
||||||
|
|
||||||
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
|
||||||
|
store i32 %res, i32* %arrayidxB, align 2
|
||||||
|
|
||||||
|
%exitcond = icmp eq i64 %idx, 10000
|
||||||
|
br i1 %exitcond, label %for.end, label %for.body
|
||||||
|
|
||||||
|
for.end: ; preds = %for.body
|
||||||
|
ret void
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user