c64 zeropage: added a few more locations to Kernalsafe free list that should be safe

this makes $02-$21 inclusive, available for use later (x16 virtual registers are placed here on x16...)
This commit is contained in:
Irmen de Jong 2022-07-17 12:12:47 +02:00
parent a07c52e112
commit cfb31377fc
3 changed files with 16 additions and 17 deletions

View File

@ -29,10 +29,9 @@ class C64Zeropage(options: CompilationOptions) : Zeropage(options) {
} else {
if (options.zeropage == ZeropageType.KERNALSAFE || options.zeropage == ZeropageType.FLOATSAFE) {
free.addAll(listOf(
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x16, 0x17, 0x18, 0x19, 0x1a,
0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21,
0x22, 0x23, 0x24, 0x25,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46,
0x47, 0x48, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x51, 0x52, 0x53,
0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
@ -48,8 +47,8 @@ class C64Zeropage(options: CompilationOptions) : Zeropage(options) {
if (options.zeropage == ZeropageType.FLOATSAFE) {
// remove the zeropage locations used for floating point operations from the free list
free.removeAll(listOf(
0x22, 0x23, 0x24, 0x25,
0x10, 0x11, 0x12, 0x26, 0x27, 0x28, 0x29, 0x2a,
0x03, 0x04, 0x10, 0x11, 0x12,
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72,

View File

@ -117,9 +117,9 @@ class TestC64Zeropage: FunSpec({
val zp1 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.BASICSAFE, emptyList(), true, false, c64target, 999u))
zp1.availableBytes() shouldBe 18
val zp2 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FLOATSAFE, emptyList(), false, false, c64target, 999u))
zp2.availableBytes() shouldBe 85
zp2.availableBytes() shouldBe 92
val zp3 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.KERNALSAFE, emptyList(), false, false, c64target, 999u))
zp3.availableBytes() shouldBe 125
zp3.availableBytes() shouldBe 134
val zp4 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FULL, emptyList(), false, false, c64target, 999u))
zp4.availableBytes() shouldBe 239
zp4.allocate(listOf("test"), DataType.UBYTE, null, null, errors)

View File

@ -3,11 +3,11 @@ TODO
For next release
^^^^^^^^^^^^^^^^
- see if we can let for loops skip the loop if end<start, without adding a lot of code size/duplicating the loop condition
this is documented behavior to now loop around but it's too easy to forget about
Lot of work because of so many special cases in ForLoopsAsmgen.....
How is it for the vm target? -> just 2 special cases in CodeGen.
- move the vm unit tests to codeGenVirtual module and remove virtualmachine dependency in the compiler module
- add item to XZeropage that enables an option that if zeropage=FULL or KERNALSAFE, moves the cx16 virtual registers to ZP, same location as on x16
(can be done on C64 only for now) Remove those addresses from the ZP free pool!!!
needs the dynamic base address for the symbols in syslib.p8
also needs a trick to allocate them in ZP like Cx16Zeropage already does
...
@ -22,9 +22,6 @@ Future Things and Ideas
^^^^^^^^^^^^^^^^^^^^^^^
Compiler:
- on non-cx16 targets: have an option that if zeropage=FULL, moves the cx16 virtual registers to ZP (same location as on x16?)
needs the dynamic base address for the symbols in syslib.p8
also needs a trick to allocate them in ZP like Cx16Zeropage already does
- vm Instruction needs to know what the read-registers/memory are, and what the write-register/memory is.
this info is needed for more advanced optimizations and later code generation steps.
- vm: implement remaining sin/cos functions in math.p8
@ -34,7 +31,10 @@ Compiler:
- vm: how to remove all unused subroutines? (in the 6502 assembly codegen, we let 64tass solve this for us)
- vm: rather than being able to jump to any 'address' (IPTR), use 'blocks' that have entry and exit points -> even better dead code elimination possible too
- vm: add ore optimizations in VmPeepholeOptimizer
- move the vm unit tests to codeGenVirtual module and remove virtualmachine dependency in the compiler module
- see if we can let for loops skip the loop if end<start, without adding a lot of code size/duplicating the loop condition
this is documented behavior to now loop around but it's too easy to forget about
Lot of work because of so many special cases in ForLoopsAsmgen.....
How is it for the vm target? -> just 2 special cases in CodeGen.
- when the vm is stable and *if* its language can get promoted to prog8 IL, the variable allocation should be changed.
It's now done before the vm code generation, but the IL should probably not depend on the allocations already performed.
So the CodeGen doesn't do VariableAlloc *before* the codegen, but as a last step.