on C64, the cx16.r0...cx16.r15 virtual regs are now in zeropage as well when using kernalsafe or full

This commit is contained in:
Irmen de Jong 2022-07-28 18:55:47 +02:00
parent 046dceb5c2
commit f531daa872
12 changed files with 130 additions and 81 deletions

View File

@ -117,4 +117,6 @@ abstract class Zeropage(protected val options: CompilationOptions) {
require(size>0)
return free.containsAll((address until address+size.toUInt()).toList())
}
abstract fun allocateCx16VirtualRegisters()
}

View File

@ -12,7 +12,6 @@ class AtariZeropage(options: CompilationOptions) : Zeropage(options) {
override val SCRATCH_W1 = 0xcdu // temp storage 1 for a word $cd+$ce
override val SCRATCH_W2 = 0xcfu // temp storage 2 for a word $cf+$d0 TODO is $d0 okay to use?
init {
if (options.floats && options.zeropage !in arrayOf(
ZeropageType.FLOATSAFE,
@ -42,4 +41,8 @@ class AtariZeropage(options: CompilationOptions) : Zeropage(options) {
removeReservedFromFreePool()
}
override fun allocateCx16VirtualRegisters() {
TODO("Not known if atari can put the virtual regs in ZP")
}
}

View File

@ -40,4 +40,8 @@ class C128Zeropage(options: CompilationOptions) : Zeropage(options) {
removeReservedFromFreePool()
}
override fun allocateCx16VirtualRegisters() {
TODO("Not known if C128 can put the virtual regs in ZP")
}
}

View File

@ -1,9 +1,6 @@
package prog8.code.target.c64
import prog8.code.core.CompilationOptions
import prog8.code.core.InternalCompilerException
import prog8.code.core.Zeropage
import prog8.code.core.ZeropageType
import prog8.code.core.*
class C64Zeropage(options: CompilationOptions) : Zeropage(options) {
@ -69,5 +66,26 @@ class C64Zeropage(options: CompilationOptions) : Zeropage(options) {
}
removeReservedFromFreePool()
if(options.zeropage==ZeropageType.FULL || options.zeropage==ZeropageType.KERNALSAFE) {
// in these cases there is enough space on the zero page to stick the cx16 virtual registers in there as well.
allocateCx16VirtualRegisters()
}
}
override fun allocateCx16VirtualRegisters() {
// Note: the 16 virtual registers R0-R15 are not regular allocated variables, they're *memory mapped* elsewhere to fixed addresses.
// However, to be able for the compiler to "see" them as zero page variables, we have to register them here as well.
// This is important because the compiler sometimes treats ZP variables more efficiently (for example if it's a pointer)
for(reg in 0..15) {
allocatedVariables[listOf("cx16", "r${reg}")] = ZpAllocation((4+reg*2).toUInt(), DataType.UWORD, 2) // cx16.r0 .. cx16.r15
allocatedVariables[listOf("cx16", "r${reg}s")] = ZpAllocation((4+reg*2).toUInt(), DataType.WORD, 2) // cx16.r0s .. cx16.r15s
allocatedVariables[listOf("cx16", "r${reg}L")] = ZpAllocation((4+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0L .. cx16.r15L
allocatedVariables[listOf("cx16", "r${reg}H")] = ZpAllocation((5+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0H .. cx16.r15H
allocatedVariables[listOf("cx16", "r${reg}sL")] = ZpAllocation((4+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sL .. cx16.r15sL
allocatedVariables[listOf("cx16", "r${reg}sH")] = ZpAllocation((5+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sH .. cx16.r15sH
free.remove((4+reg*2).toUInt())
free.remove((5+reg*2).toUInt())
}
}
}

View File

@ -45,17 +45,21 @@ class CX16Zeropage(options: CompilationOptions) : Zeropage(options) {
removeReservedFromFreePool()
// Note: the 16 virtual registers R0-R15 are not regular allocated variables, they're *memory mapped* elsewhere to fixed addresses.
// However, to be able for the compiler to "see" them as zero page variables, we have to register them here as well.
// This is important because the compiler sometimes treats ZP variables more efficiently (for example if it's a pointer)
for(reg in 0..15) {
allocatedVariables[listOf("cx16", "r${reg}")] = ZpAllocation((2+reg*2).toUInt(), DataType.UWORD, 2) // cx16.r0 .. cx16.r15
allocatedVariables[listOf("cx16", "r${reg}s")] = ZpAllocation((2+reg*2).toUInt(), DataType.WORD, 2) // cx16.r0s .. cx16.r15s
allocatedVariables[listOf("cx16", "r${reg}L")] = ZpAllocation((2+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0L .. cx16.r15L
allocatedVariables[listOf("cx16", "r${reg}H")] = ZpAllocation((3+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0H .. cx16.r15H
allocatedVariables[listOf("cx16", "r${reg}sL")] = ZpAllocation((2+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sL .. cx16.r15sL
allocatedVariables[listOf("cx16", "r${reg}sH")] = ZpAllocation((3+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sH .. cx16.r15sH
}
allocateCx16VirtualRegisters()
}
}
override fun allocateCx16VirtualRegisters() {
// Note: the 16 virtual registers R0-R15 are not regular allocated variables, they're *memory mapped* elsewhere to fixed addresses.
// However, to be able for the compiler to "see" them as zero page variables, we have to register them here as well.
// This is important because the compiler sometimes treats ZP variables more efficiently (for example if it's a pointer)
for(reg in 0..15) {
allocatedVariables[listOf("cx16", "r${reg}")] = ZpAllocation((2+reg*2).toUInt(), DataType.UWORD, 2) // cx16.r0 .. cx16.r15
allocatedVariables[listOf("cx16", "r${reg}s")] = ZpAllocation((2+reg*2).toUInt(), DataType.WORD, 2) // cx16.r0s .. cx16.r15s
allocatedVariables[listOf("cx16", "r${reg}L")] = ZpAllocation((2+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0L .. cx16.r15L
allocatedVariables[listOf("cx16", "r${reg}H")] = ZpAllocation((3+reg*2).toUInt(), DataType.UBYTE, 1) // cx16.r0H .. cx16.r15H
allocatedVariables[listOf("cx16", "r${reg}sL")] = ZpAllocation((2+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sL .. cx16.r15sL
allocatedVariables[listOf("cx16", "r${reg}sH")] = ZpAllocation((3+reg*2).toUInt(), DataType.BYTE, 1) // cx16.r0sH .. cx16.r15sH
}
}
}

View File

@ -328,7 +328,7 @@ fun determineCompilationOptions(program: Program, compTarget: ICompilationTarget
private fun processAst(program: Program, errors: IErrorReporter, compilerOptions: CompilationOptions) {
println("Analyzing code...")
program.preprocessAst(errors, compilerOptions.compTarget)
program.preprocessAst(errors, compilerOptions)
program.checkIdentifiers(errors, compilerOptions)
errors.report()
program.charLiteralsToUByteLiterals(compilerOptions.compTarget, errors)

View File

@ -92,8 +92,8 @@ internal fun Program.verifyFunctionArgTypes(errors: IErrorReporter) {
fixer.visit(this)
}
internal fun Program.preprocessAst(errors: IErrorReporter, target: ICompilationTarget) {
val transforms = AstPreprocessor(this, errors, target)
internal fun Program.preprocessAst(errors: IErrorReporter, options: CompilationOptions) {
val transforms = AstPreprocessor(this, errors, options)
transforms.visit(this)
var mods = transforms.applyModifications()
while(mods>0)

View File

@ -8,39 +8,48 @@ import prog8.ast.statements.*
import prog8.ast.walk.AstWalker
import prog8.ast.walk.IAstModification
import prog8.code.core.*
import prog8.code.target.C64Target
import prog8.code.target.Cx16Target
class AstPreprocessor(val program: Program, val errors: IErrorReporter, val compTarget: ICompilationTarget) : AstWalker() {
class AstPreprocessor(val program: Program,
val errors: IErrorReporter,
val options: CompilationOptions) : AstWalker() {
override fun before(program: Program): Iterable<IAstModification> {
if(compTarget.name!=Cx16Target.NAME) {
// reset the address of the virtual registers to be inside the evaluation stack.
// (we don't do this on CommanderX16 itself as the registers have a fixed location in Zeropage there)
val cx16block = program.allBlocks.single { it.name=="cx16" }
val memVars = cx16block.statements
.filterIsInstance<VarDecl>()
.associateBy { it.name }
val estack = compTarget.machine.ESTACK_HI
for(regnum in 0u..15u) {
val rX = memVars.getValue("r$regnum")
val rXL = memVars.getValue("r${regnum}L")
val rXH = memVars.getValue("r${regnum}H")
val rXs = memVars.getValue("r${regnum}s")
val rXsL = memVars.getValue("r${regnum}sL")
val rXsH = memVars.getValue("r${regnum}sH")
setAddress(rX, estack + 2u*regnum)
setAddress(rXL, estack + 2u*regnum)
setAddress(rXH, estack + 2u*regnum +1u)
setAddress(rXs, estack + 2u*regnum)
setAddress(rXsL, estack + 2u*regnum)
setAddress(rXsH, estack + 2u*regnum + 1u)
}
if(options.compTarget.name==C64Target.NAME) {
relocateCx16VirtualRegisters(program, 0x0002u) // same address as CommanderX16
}
else if(options.compTarget.name!=Cx16Target.NAME) {
relocateCx16VirtualRegisters(program, options.compTarget.machine.ESTACK_HI)
}
return noModifications
}
private fun relocateCx16VirtualRegisters(program: Program, baseAddress: UInt) {
// reset the address of the virtual registers to be inside the evaluation stack.
// (we don't do this on CommanderX16 itself as the registers have a fixed location in Zeropage there)
val cx16block = program.allBlocks.single { it.name == "cx16" }
val memVars = cx16block.statements
.filterIsInstance<VarDecl>()
.associateBy { it.name }
for (regnum in 0u..15u) {
val rX = memVars.getValue("r$regnum")
val rXL = memVars.getValue("r${regnum}L")
val rXH = memVars.getValue("r${regnum}H")
val rXs = memVars.getValue("r${regnum}s")
val rXsL = memVars.getValue("r${regnum}sL")
val rXsH = memVars.getValue("r${regnum}sH")
setAddress(rX, baseAddress + 2u * regnum)
setAddress(rXL, baseAddress + 2u * regnum)
setAddress(rXH, baseAddress + 2u * regnum + 1u)
setAddress(rXs, baseAddress + 2u * regnum)
setAddress(rXsL, baseAddress + 2u * regnum)
setAddress(rXsH, baseAddress + 2u * regnum + 1u)
}
}
private fun setAddress(vardecl: VarDecl, address: UInt) {
val oldAddr = vardecl.value as NumericLiteral
vardecl.value = NumericLiteral(oldAddr.type, address.toDouble(), oldAddr.position)
@ -48,13 +57,13 @@ class AstPreprocessor(val program: Program, val errors: IErrorReporter, val comp
override fun before(char: CharLiteral, parent: Node): Iterable<IAstModification> {
if(char.encoding== Encoding.DEFAULT)
char.encoding = compTarget.defaultEncoding
char.encoding = options.compTarget.defaultEncoding
return noModifications
}
override fun before(string: StringLiteral, parent: Node): Iterable<IAstModification> {
if(string.encoding==Encoding.DEFAULT)
string.encoding = compTarget.defaultEncoding
string.encoding = options.compTarget.defaultEncoding
return super.before(string, parent)
}

View File

@ -34,6 +34,9 @@ class TestAbstractZeropage: FunSpec({
removeReservedFromFreePool()
}
override fun allocateCx16VirtualRegisters() {
}
}
@ -119,18 +122,20 @@ class TestC64Zeropage: FunSpec({
val zp2 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FLOATSAFE, emptyList(), false, false, c64target, 999u))
zp2.availableBytes() shouldBe 92
val zp3 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.KERNALSAFE, emptyList(), false, false, c64target, 999u))
zp3.availableBytes() shouldBe 134
zp3.availableBytes() shouldBe 102
val zp4 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FULL, emptyList(), false, false, c64target, 999u))
zp4.availableBytes() shouldBe 239
zp4.availableBytes() shouldBe 207
zp4.allocate(listOf("test"), DataType.UBYTE, null, null, errors)
zp4.availableBytes() shouldBe 238
zp4.availableBytes() shouldBe 206
zp4.allocate(listOf("test2"), DataType.UBYTE, null, null, errors)
zp4.availableBytes() shouldBe 237
zp4.availableBytes() shouldBe 205
}
test("testReservedSpace") {
val zp1 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FULL, emptyList(), false, false, c64target, 999u))
zp1.availableBytes() shouldBe 239
zp1.availableBytes() shouldBe 207
4u shouldNotBeIn zp1.free
35u shouldNotBeIn zp1.free
50u shouldBeIn zp1.free
100u shouldBeIn zp1.free
49u shouldBeIn zp1.free
@ -139,7 +144,9 @@ class TestC64Zeropage: FunSpec({
255u shouldBeIn zp1.free
199u shouldBeIn zp1.free
val zp2 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FULL, listOf(50u .. 100u, 200u..255u), false, false, c64target, 999u))
zp2.availableBytes() shouldBe 139
zp2.availableBytes() shouldBe 107
4u shouldNotBeIn zp2.free
35u shouldNotBeIn zp2.free
50u shouldNotBeIn zp2.free
100u shouldNotBeIn zp2.free
49u shouldBeIn zp2.free
@ -147,6 +154,10 @@ class TestC64Zeropage: FunSpec({
200u shouldNotBeIn zp2.free
255u shouldNotBeIn zp2.free
199u shouldBeIn zp2.free
val zp3 = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FLOATSAFE, listOf(50u .. 100u, 200u..255u), false, false, c64target, 999u))
zp2.availableBytes() shouldBe 107
4u shouldBeIn zp3.free
35u shouldNotBeIn zp3.free
}
test("testBasicsafeAllocation") {
@ -173,7 +184,7 @@ class TestC64Zeropage: FunSpec({
test("testFullAllocation") {
val zp = C64Zeropage(CompilationOptions(OutputType.RAW, CbmPrgLauncherType.NONE, ZeropageType.FULL, emptyList(), false, false, c64target, 999u))
zp.availableBytes() shouldBe 239
zp.availableBytes() shouldBe 207
zp.hasByteAvailable() shouldBe true
zp.hasWordAvailable() shouldBe true
var result = zp.allocate(emptyList(), DataType.UWORD, null, null, errors)

View File

@ -51,7 +51,6 @@ Directives
- type ``basic`` : add a tiny C64 BASIC program, whith a SYS statement calling into the machine code
- type ``none`` : no launcher logic is added at all
.. data:: %zeropage <style>
Level: module.
@ -80,6 +79,16 @@ Directives
Also read :ref:`zeropage`.
.. note::
``kernalsafe`` and ``full`` on the C64 leave enough room in the zeropage to reallocate the
16 virtual registers cx16.r0...cx16.r15 from the Commander X16 into the zeropage as well
(but not on the same locations). They are relocated automatically by the compiler.
The other options need those locations for other things so those virtual registers have
to be put into memory elsewhere (outside of the zeropage). Trying to use them as zero page
variables or pointers etc. will be a lot slower in those cases!
On the CommanderX16 the registers are always in zeropage. On other targets, for now, they
are always outside of the zeropage.
.. data:: %zpreserved <fromaddress>,<toaddress>
Level: module.

View File

@ -3,12 +3,6 @@ TODO
For next release
^^^^^^^^^^^^^^^^
- add item to XyzZeropage that enables an option that if zeropage=FULL or KERNALSAFE, moves the cx16 virtual registers to ZP, same location as on x16
(can be done on C64 only for now) Remove those addresses from the ZP free pool = allocate them in ZP like Cx16Zeropage does
Adapt the code in AstPreprocessor that relocates the registers as well.
- for uword pointer variables: allow pointer[uword] array indexing >255 , rewrite it to @(pointer+index)
DO NOT allow this for regular array indexing because normal arrays can never exceed size 256
...

View File

@ -1,31 +1,26 @@
%import textio
%import string
%zeropage basicsafe
%zeropage kernalsafe
main {
sub derp(word num, ubyte a1, ubyte a2, ubyte a3, ubyte a4) {
txt.print_w(num)
txt.nl()
}
sub start() {
word qq = 1
word bb = -5051
derp((bb*qq)/-2, 1,2,3,4)
bb /= -2
txt.print_w(bb)
cx16.r0 = $ea31
cx16.r15 = $ff99
str name = "irmen"
txt.print_uwhex(cx16.r0, true)
txt.spc()
txt.print_uwhex(cx16.r15, true)
txt.nl()
bb = -5051
bb = -bb/2
txt.print_w(bb)
txt.nl()
bb = 5051
bb /= -2
txt.print_w(bb)
txt.nl()
uword ubb = 5051
ubb /= 2
txt.print_uw(ubb)
cx16.r7 = &name
txt.chrout(cx16.r7[0])
txt.chrout(cx16.r7[1])
txt.chrout(cx16.r7[2])
txt.chrout(cx16.r7[3])
txt.chrout(cx16.r7[4])
txt.nl()
repeat {
}
}
}