mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-31 10:34:17 +00:00
[PM/AA] Fix *numerous* serious bugs in GlobalsModRef found by
inspection. While we want to handle calls specially in this code because they should have been modeled by the call graph analysis that precedes it, we should *not* be re-implementing the predicates for whether an instruction reads or writes memory. Those are well defined already. Notably, at least the following issues seem to be clearly missed before: - Ordered atomic loads can "write" to memory by causing writes from other threads to become visible. Similarly for ordered atomic stores. - AtomicRMW instructions quite obviously both read and write to memory. - AtomicCmpXchg instructions also read and write to memory. - Fences read and write to memory. - Invokes of intrinsics or memory allocation functions. I don't have any test cases, and I suspect this has never really come up in the real world. But there is no reason why it wouldn't, and it makes the code simpler to do this the right way. While here, I've tried to make the loops significantly simpler as well and added helpful comments as to what is going on. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242281 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
a018099669
commit
7d51923226
@ -439,31 +439,40 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
|
||||
}
|
||||
|
||||
// Scan the function bodies for explicit loads or stores.
|
||||
for (unsigned i = 0, e = SCC.size(); i != e && FunctionEffect != ModRef;
|
||||
++i)
|
||||
for (inst_iterator II = inst_begin(SCC[i]->getFunction()),
|
||||
E = inst_end(SCC[i]->getFunction());
|
||||
II != E && FunctionEffect != ModRef; ++II)
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(&*II)) {
|
||||
FunctionEffect |= Ref;
|
||||
if (LI->isVolatile())
|
||||
// Volatile loads may have side-effects, so mark them as writing
|
||||
// memory (for example, a flag inside the processor).
|
||||
FunctionEffect |= Mod;
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(&*II)) {
|
||||
FunctionEffect |= Mod;
|
||||
if (SI->isVolatile())
|
||||
// Treat volatile stores as reading memory somewhere.
|
||||
FunctionEffect |= Ref;
|
||||
} else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) {
|
||||
FunctionEffect |= ModRef;
|
||||
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
|
||||
// The callgraph doesn't include intrinsic calls.
|
||||
Function *Callee = Intrinsic->getCalledFunction();
|
||||
ModRefBehavior Behaviour = AliasAnalysis::getModRefBehavior(Callee);
|
||||
FunctionEffect |= (Behaviour & ModRef);
|
||||
for (auto *Node : SCC) {
|
||||
if (FunctionEffect == ModRef)
|
||||
break; // The mod/ref lattice saturates here.
|
||||
for (Instruction &I : inst_range(Node->getFunction())) {
|
||||
if (FunctionEffect == ModRef)
|
||||
break; // The mod/ref lattice saturates here.
|
||||
|
||||
// We handle calls specially because the graph-relevant aspects are
|
||||
// handled above.
|
||||
if (auto CS = CallSite(&I)) {
|
||||
if (isAllocationFn(&I, TLI) || isFreeCall(&I, TLI)) {
|
||||
// FIXME: It is completely unclear why this is necessary and not
|
||||
// handled by the above graph code.
|
||||
FunctionEffect |= ModRef;
|
||||
} else if (Function *Callee = CS.getCalledFunction()) {
|
||||
// The callgraph doesn't include intrinsic calls.
|
||||
if (Callee->isIntrinsic()) {
|
||||
ModRefBehavior Behaviour =
|
||||
AliasAnalysis::getModRefBehavior(Callee);
|
||||
FunctionEffect |= (Behaviour & ModRef);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// All non-call instructions we use the primary predicates for whether
|
||||
// thay read or write memory.
|
||||
if (I.mayReadFromMemory())
|
||||
FunctionEffect |= Ref;
|
||||
if (I.mayWriteToMemory())
|
||||
FunctionEffect |= Mod;
|
||||
}
|
||||
}
|
||||
|
||||
if ((FunctionEffect & Mod) == 0)
|
||||
++NumReadMemFunctions;
|
||||
if (FunctionEffect == 0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user