preparing version 4.3

This commit is contained in:
Irmen de Jong 2020-09-22 21:50:56 +02:00
parent 25cf0d2b94
commit af6731c9c8
9 changed files with 37 additions and 39 deletions

View File

@ -6,7 +6,9 @@
; assumes bitmap screen memory is $2000-$3fff
graphics {
const uword bitmap_address = $2000
const uword BITMAP_ADDRESS = $2000
const uword WIDTH = 320
const ubyte HEIGHT = 200
sub enable_bitmap_mode() {
; enable bitmap screen, erase it and set colors to black/white.
@ -16,7 +18,7 @@ graphics {
}
sub clear_screen(ubyte pixelcolor, ubyte bgcolor) {
memset(bitmap_address, 320*200/8, 0)
memset(BITMAP_ADDRESS, 320*200/8, 0)
txt.fill_screen(pixelcolor << 4 | bgcolor, 0)
}
@ -177,7 +179,7 @@ graphics {
; here is the non-asm code for the plot routine below:
; sub plot_nonasm(uword px, ubyte py) {
; ubyte[] ormask = [128, 64, 32, 16, 8, 4, 2, 1]
; uword addr = bitmap_address + 320*(py>>3) + (py & 7) + (px & %0000000111111000)
; uword addr = BITMAP_ADDRESS + 320*(py>>3) + (py & 7) + (px & %0000000111111000)
; @(addr) |= ormask[lsb(px) & 7]
; }
@ -226,7 +228,7 @@ _ormask .byte 128, 64, 32, 16, 8, 4, 2, 1
; note: this can be even faster if we also have a 256 byte x-lookup table, but hey.
; see http://codebase64.org/doku.php?id=base:various_techniques_to_calculate_adresses_fast_common_screen_formats_for_pixel_graphics
; the y lookup tables encodes this formula: bitmap_address + 320*(py>>3) + (py & 7) (y from 0..199)
; the y lookup tables encodes this formula: BITMAP_ADDRESS + 320*(py>>3) + (py & 7) (y from 0..199)
; We use the 64tass syntax for range expressions to calculate this table on assembly time.
_plot_y_values := $2000 + 320*(range(200)>>3) + (range(200) & 7)

View File

@ -6,6 +6,8 @@
; only black/white monchrome 320x200 for now.
graphics {
const uword WIDTH = 320
const ubyte HEIGHT = 200
sub enable_bitmap_mode() {
; enable bitmap screen, erase it and set colors to black/white.

View File

@ -31,7 +31,7 @@ _enterloop lsr P8ZP_SCRATCH_REG
.pend
multiply_bytes_16 .proc
multiply_bytes_into_word .proc
; -- multiply 2 bytes A and Y, result as word in A/Y (unsigned)
sta P8ZP_SCRATCH_B1
sty P8ZP_SCRATCH_REG

View File

@ -1 +1 @@
4.3-SNAPSHOT
4.3

View File

@ -191,7 +191,6 @@ internal class AugmentableAssignmentAsmGen(private val program: Program,
else -> {
println("warning: slow stack evaluation used (1): ${memory.addressExpression::class.simpleName} at ${memory.addressExpression.position}") // TODO optimize...
asmgen.translateExpression(memory.addressExpression)
// TODO buggy?:
asmgen.out(" jsr prog8_lib.read_byte_from_address_on_stack | sta P8ZP_SCRATCH_B1")
val zp = CompilationTarget.instance.machine.zeropage
when {
@ -451,6 +450,7 @@ internal class AugmentableAssignmentAsmGen(private val program: Program,
"-" -> asmgen.out(" lda $name | sec | sbc P8ESTACK_LO+1,x | sta $name")
"*" -> {
TODO("var mul byte expr")
// check optimizedByteMultiplications
// asmgen.out(" jsr prog8_lib.mul_byte")
}
"/" -> {

View File

@ -9,7 +9,6 @@ TODO
- implement @stack for asmsub parameters
- make it possible to use cpu opcodes such as 'nop' as variable names by prefixing all asm vars with something such as '_'
- option to load the built-in library files from a directory instead of the embedded ones (for easier library development/debugging)
- aliases for imported symbols for example perhaps '%alias print = c64scr.print' ?
- see if we can group some errors together for instance the (now single) errors about unidentified symbols
@ -20,14 +19,13 @@ Add more compiler optimizations to the existing ones.
- more targeted optimizations for assigment asm code, such as the following:
- subroutine calling convention? like: 1 byte arg -> pass in A, 2 bytes -> pass in A+Y, return value likewise.
- remove unreachable code after an exit(), return or goto
- can such parameter passing to subroutines be optimized to avoid copying?
- add a compiler option to not include variable initialization code (useful if the program is expected to run only once, such as a game)
the program will then rely solely on the values as they are in memory at the time of program startup.
- Also some library routines and code patterns could perhaps be optimized further
- can the parameter passing to subroutines be optimized to avoid copying?
- more optimizations on the language AST level
- more optimizations on the final assembly source level
- note: abandoned subroutine inlining because of problems referencing non-local stuff. Can't move everything around.
- note: subroutine inlining is abandoned because of problems referencing non-local stuff. Can't move everything around.
Eval stack redesign? (lot of work)

View File

@ -86,10 +86,6 @@ main {
}
}
const uword screen_width = 320
const ubyte screen_height = 200
sub draw_lines() {
ubyte @zp i
for i in len(edgesFrom) -1 downto 0 {
@ -97,10 +93,10 @@ main {
ubyte @zp vTo = edgesTo[i]
word @zp persp1 = 256 + rotatedz[vFrom]/256
word @zp persp2 = 256 + rotatedz[vTo]/256
graphics.line(rotatedx[vFrom] / persp1 + screen_width/2 as uword,
rotatedy[vFrom] / persp1 + screen_height/2 as ubyte,
rotatedx[vTo] / persp2 + screen_width/2 as uword,
rotatedy[vTo] / persp2 + screen_height/2 as ubyte)
graphics.line(rotatedx[vFrom] / persp1 + graphics.WIDTH/2 as uword,
rotatedy[vFrom] / persp1 + graphics.HEIGHT/2 as ubyte,
rotatedx[vTo] / persp2 + graphics.WIDTH/2 as uword,
rotatedy[vTo] / persp2 + graphics.HEIGHT/2 as ubyte)
}
}
}

View File

@ -23,10 +23,10 @@ main {
sub draw_lines() {
ubyte i
for i in 0 to 255 step 4 {
uword x1 = ((320-256)/2 as uword) + sin8u(i)
uword y1 = (200-128)/2 + cos8u(i)/2
uword x2 = ((320-64)/2 as uword) + sin8u(i)/4
uword y2 = (200-64)/2 + cos8u(i)/4
uword x1 = ((graphics.WIDTH-256)/2 as uword) + sin8u(i)
uword y1 = (graphics.HEIGHT-128)/2 + cos8u(i)/2
uword x2 = ((graphics.WIDTH-64)/2 as uword) + sin8u(i)/4
uword y2 = (graphics.HEIGHT-64)/2 + cos8u(i)/4
graphics.line(x1, lsb(y1), x2, lsb(y2))
}
}

View File

@ -8,25 +8,25 @@ main {
sub start() {
decisionOver2 += 2*yy+1 ; TODO why is the +1 not converted to decisionOver2++ separately?
ubyte v = 1
@($c000+v) = 10
; cx16.screen_set_mode(128)
txt.print_ub(@($c001))
txt.chrout('\n')
ubyte width = txt.width()
ubyte height = txt.height()
@($c000+v) ++
txt.print_ub(@($c001))
txt.chrout('\n')
ubyte x
@($c000+v) += 10
txt.print_ub(@($c001))
txt.chrout('\n')
@($c000+v) *= 10
txt.print_ub(@($c001))
txt.chrout('\n')
; @($c000) *= 99 ; TODO implement
repeat 999 {
ubyte xpos = rnd() % (width-1)
txt.setcc(xpos, 0, 81, 6)
ubyte ypos = rnd() % (height-1)+1
txt.setcc(width-1, ypos, 81, 2)
txt.scroll_left(true)
txt.scroll_down(true)
repeat 2000 {
x++
}
}
}
}