mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 04:30:23 +00:00
Refine memory dependence's notion of volatile semantics
According to my reading of the LangRef, volatiles are only ordered with respect to other volatiles. It is entirely legal and profitable to forward unrelated loads over the volatile load. This patch implements this for GVN by refining the transition rules MemoryDependenceAnalysis uses when encountering a volatile. The added test cases show where the extra flexibility is profitable for local dependence optimizations. I have a related change (227110) which will extend this to non-local dependence (i.e. PRE), but that's essentially orthogonal to the semantic change in this patch. I have tested the two together and can confirm that PRE works over a volatile load with both changes. I will be submitting a PRE w/volatiles test case seperately in the near future. Differential Revision: http://reviews.llvm.org/D6901 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227112 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
956d6f0cf5
commit
cce3c83917
@ -362,6 +362,17 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
}
|
||||
}
|
||||
|
||||
static bool isVolatile(Instruction *Inst) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
|
||||
return LI->isVolatile();
|
||||
else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
|
||||
return SI->isVolatile();
|
||||
else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
|
||||
return AI->isVolatile();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/// getPointerDependencyFrom - Return the instruction on which a memory
|
||||
/// location depends. If isLoad is true, this routine ignores may-aliases with
|
||||
/// read-only operations. If isLoad is false, this routine ignores may-aliases
|
||||
@ -448,12 +459,26 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
|
||||
// does not alias with when this atomic load indicates that another thread may
|
||||
// be accessing the location.
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
|
||||
|
||||
// While volatile access cannot be eliminated, they do not have to clobber
|
||||
// non-aliasing locations, as normal accesses, for example, can be safely
|
||||
// reordered with volatile accesses.
|
||||
if (LI->isVolatile()) {
|
||||
if (!QueryInst)
|
||||
// Original QueryInst *may* be volatile
|
||||
return MemDepResult::getClobber(LI);
|
||||
if (isVolatile(QueryInst))
|
||||
// Ordering required if QueryInst is itself volatile
|
||||
return MemDepResult::getClobber(LI);
|
||||
// Otherwise, volatile doesn't imply any special ordering
|
||||
}
|
||||
|
||||
// Atomic loads have complications involved.
|
||||
// A Monotonic (or higher) load is OK if the query inst is itself not atomic.
|
||||
// An Acquire (or higher) load sets the HasSeenAcquire flag, so that any
|
||||
// release store will know to return getClobber.
|
||||
// FIXME: This is overly conservative.
|
||||
if (!LI->isUnordered()) {
|
||||
if (LI->isAtomic() && LI->getOrdering() > Unordered) {
|
||||
if (!QueryInst)
|
||||
return MemDepResult::getClobber(LI);
|
||||
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
|
||||
@ -470,13 +495,6 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
|
||||
HasSeenAcquire = true;
|
||||
}
|
||||
|
||||
// FIXME: this is overly conservative.
|
||||
// While volatile access cannot be eliminated, they do not have to clobber
|
||||
// non-aliasing locations, as normal accesses can for example be reordered
|
||||
// with volatile accesses.
|
||||
if (LI->isVolatile())
|
||||
return MemDepResult::getClobber(LI);
|
||||
|
||||
AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
|
||||
|
||||
// If we found a pointer, check if it could be the same as our pointer.
|
||||
@ -890,14 +908,7 @@ getNonLocalPointerDependency(Instruction *QueryInst,
|
||||
// Doing so would require piping through the QueryInst all the way through.
|
||||
// TODO: volatiles can't be elided, but they can be reordered with other
|
||||
// non-volatile accesses.
|
||||
auto isVolatile = [](Instruction *Inst) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
|
||||
return LI->isVolatile();
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
||||
return SI->isVolatile();
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
// We currently give up on any instruction which is ordered, but we do handle
|
||||
// atomic instructions which are unordered.
|
||||
// TODO: Handle ordered instructions
|
||||
|
75
test/Transforms/GVN/volatile.ll
Normal file
75
test/Transforms/GVN/volatile.ll
Normal file
@ -0,0 +1,75 @@
|
||||
; Tests that check our handling of volatile instructions encountered
|
||||
; when scanning for dependencies
|
||||
; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
|
||||
|
||||
; Check that we can bypass a volatile load when searching
|
||||
; for dependencies of a non-volatile load
|
||||
define i32 @test1(i32* nocapture %p, i32* nocapture %q) {
|
||||
; CHECK-LABEL: test1
|
||||
; CHECK: %0 = load volatile i32* %q
|
||||
; CHECK-NEXT: ret i32 0
|
||||
entry:
|
||||
%x = load i32* %p
|
||||
load volatile i32* %q
|
||||
%y = load i32* %p
|
||||
%add = sub i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; We can not value forward if the query instruction is
|
||||
; volatile, this would be (in effect) removing the volatile load
|
||||
define i32 @test2(i32* nocapture %p, i32* nocapture %q) {
|
||||
; CHECK-LABEL: test2
|
||||
; CHECK: %x = load i32* %p
|
||||
; CHECK-NEXT: %y = load volatile i32* %p
|
||||
; CHECK-NEXT: %add = sub i32 %y, %x
|
||||
entry:
|
||||
%x = load i32* %p
|
||||
%y = load volatile i32* %p
|
||||
%add = sub i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; If the query instruction is itself volatile, we *cannot*
|
||||
; reorder it even if p and q are noalias
|
||||
define i32 @test3(i32* noalias nocapture %p, i32* noalias nocapture %q) {
|
||||
; CHECK-LABEL: test3
|
||||
; CHECK: %x = load i32* %p
|
||||
; CHECK-NEXT: %0 = load volatile i32* %q
|
||||
; CHECK-NEXT: %y = load volatile i32* %p
|
||||
entry:
|
||||
%x = load i32* %p
|
||||
load volatile i32* %q
|
||||
%y = load volatile i32* %p
|
||||
%add = sub i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; If an encountered instruction is both volatile and ordered,
|
||||
; we need to use the strictest ordering of either. In this
|
||||
; case, the ordering prevents forwarding.
|
||||
define i32 @test4(i32* noalias nocapture %p, i32* noalias nocapture %q) {
|
||||
; CHECK-LABEL: test4
|
||||
; CHECK: %x = load i32* %p
|
||||
; CHECK-NEXT: %0 = load atomic volatile i32* %q seq_cst
|
||||
; CHECK-NEXT: %y = load atomic i32* %p seq_cst
|
||||
entry:
|
||||
%x = load i32* %p
|
||||
load atomic volatile i32* %q seq_cst, align 4
|
||||
%y = load atomic i32* %p seq_cst, align 4
|
||||
%add = sub i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; Value forwarding from a volatile load is perfectly legal
|
||||
define i32 @test5(i32* nocapture %p, i32* nocapture %q) {
|
||||
; CHECK-LABEL: test5
|
||||
; CHECK: %x = load volatile i32* %p
|
||||
; CHECK-NEXT: ret i32 0
|
||||
entry:
|
||||
%x = load volatile i32* %p
|
||||
%y = load i32* %p
|
||||
%add = sub i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user