diff --git a/compiler/test/TestCallgraph.kt b/compiler/test/TestCallgraph.kt index b087a8280..fd1275c6e 100644 --- a/compiler/test/TestCallgraph.kt +++ b/compiler/test/TestCallgraph.kt @@ -197,10 +197,7 @@ class TestCallgraph: FunSpec({ callgraph.checkRecursiveCalls(errors) errors.errors.size shouldBe 0 errors.warnings.size shouldBe 4 - errors.warnings[0] shouldContain "contains recursive subroutine calls" - errors.warnings[1] shouldContain "start at" - errors.warnings[2] shouldContain "recurse1 at" - errors.warnings[3] shouldContain "recurse2 at" + errors.warnings[0] shouldContain "contains recursive subroutines" } test("no recursion warning if reference isn't a call") { diff --git a/compilerAst/src/prog8/compiler/CallGraph.kt b/compilerAst/src/prog8/compiler/CallGraph.kt index 510e711dc..026adef34 100644 --- a/compilerAst/src/prog8/compiler/CallGraph.kt +++ b/compilerAst/src/prog8/compiler/CallGraph.kt @@ -129,54 +129,35 @@ class CallGraph(private val program: Program, private val allowMissingIdentifier } fun checkRecursiveCalls(errors: IErrorReporter) { - val cycles = recursionCycles() - if(cycles.any()) { - errors.warn("Program contains recursive subroutine calls. These only works in very specific limited scenarios!", cycles[0][0].position) - val printed = mutableSetOf() - for(chain in cycles) { - if(chain[0] !in printed) { - val chainStr = chain.joinToString(" <-- ") { "${it.name} at ${it.position}" } - errors.warn("Cycle in (a subroutine call in) $chainStr", chain[0].position) - printed.add(chain[0]) - } + val recursiveSubroutines = recursionCycles() + if(recursiveSubroutines.any()) { + errors.warn("Program contains recursive subroutines. These only works in very specific limited scenarios!", recursiveSubroutines.first().position) + for(subroutine in recursiveSubroutines) { + errors.warn("recursive subroutine '${subroutine.name}'", subroutine.position) } } } - private fun recursionCycles(): List> { - val chains = mutableListOf>() + private fun recursionCycles(): Set { + val cycles = mutableSetOf() + for(caller in calls.keys) { - val visited = calls.keys.associateWith { false }.toMutableMap() - val recStack = calls.keys.associateWith { false }.toMutableMap() - val chain = mutableListOf() - if(hasCycle(caller, visited, recStack, chain)) - chains.add(chain) + if(hasRecursionCycle(caller)) + cycles.add(caller) } - return chains + return cycles } - private fun hasCycle(sub: Subroutine, visited: MutableMap, recStack: MutableMap, chain: MutableList): Boolean { - // mark current node as visited and add to recursion stack - if(recStack[sub]==true) - return true - if(visited[sub]==true) - return false - - // mark visited and add to recursion stack - visited[sub] = true - recStack[sub] = true - - // recurse for all neighbours - for(called in calls.getValue(sub)) { - if(hasCycle(called, visited, recStack, chain)) { - chain.add(called) - return true + private fun hasRecursionCycle(sub: Subroutine): Boolean { + val callCloud = calls.getValue(sub).toMutableSet() + var previousCloudSize = -1 + while(callCloud.size > previousCloudSize && sub !in callCloud) { + previousCloudSize = callCloud.size + for(element in callCloud.toList()) { + callCloud.addAll(calls.getValue(element)) } } - - // pop from recursion stack - recStack[sub] = false - return false + return sub in callCloud } fun unused(module: Module) = module !in usedModules diff --git a/docs/source/todo.rst b/docs/source/todo.rst index a1108c451..94f87b343 100644 --- a/docs/source/todo.rst +++ b/docs/source/todo.rst @@ -3,8 +3,14 @@ TODO For next release ^^^^^^^^^^^^^^^^ -- can the recursive cycle detector print the actual LINES that do the call? - +- what to do with the rouding difference in signed divide by 2 / 4 (double ror)? it rounds towards minus infinity (so -5 / 2 = -3) + while the NON-optimized routine produces -2 . Virtual machine also produces -3? + What rounding do we want? +- add item to XyzZeropage that enables an option that if zeropage=FULL or KERNALSAFE, moves the cx16 virtual registers to ZP, same location as on x16 + (can be done on C64 only for now) Remove those addresses from the ZP free pool = allocate them in ZP like Cx16Zeropage does + Adapt the code in AstPreprocessor that relocates the registers as well. +- for uword pointer variables: allow pointer[uword] array indexing >255 , rewrite it to @(pointer+index) + DO NOT allow this for regular array indexing because normal arrays can never exceed size 256 ... @@ -18,13 +24,6 @@ Need help with Future Things and Ideas ^^^^^^^^^^^^^^^^^^^^^^^ Compiler: - -- add item to XZeropage that enables an option that if zeropage=FULL or KERNALSAFE, moves the cx16 virtual registers to ZP, same location as on x16 - (can be done on C64 only for now) Remove those addresses from the ZP free pool = allocate them in ZP like Cx16Zeropage does - Adapt the code in AstPreprocessor that relocates the registers as well. -- for uword pointer variables: allow pointer[uword] array indexing >255 , rewrite it to @(pointer+index) - DO NOT allow this for regular array indexing because normal arrays can never exceed size 256 - - vm Instruction needs to know what the read-registers/memory are, and what the write-register/memory is. this info is needed for more advanced optimizations and later code generation steps. - vm: implement remaining sin/cos functions in math.p8 diff --git a/examples/test.p8 b/examples/test.p8 index 1057397e3..86a43eb66 100644 --- a/examples/test.p8 +++ b/examples/test.p8 @@ -1,16 +1,30 @@ %import textio +%import string %zeropage basicsafe main { sub start() { - ubyte ci - ubyte from=10 - ubyte end=1 - - for ci in from to end { - txt.print_ub(ci) - txt.spc() - } + word bb = -15 + bb /= 4 + txt.print_w(bb) txt.nl() + bb = 15 + bb /= 4 + txt.print_w(bb) + txt.nl() + uword ubb = 15 + ubb /= 4 + txt.print_uw(ubb) + txt.nl() + + recurse1() + } + + sub recurse1() { + recurse2() + } + + sub recurse2() { + start() } }