4cade/src/fx.lib.a

711 lines
15 KiB
Plaintext
Raw Normal View History

;license:MIT
;(c) 2024 by 4am
;
; common routines used by graphic effects
;
; Note: launcher code can call these routines directly. However,
; graphic effects are assembled as separate targets and must call
; these routines indirectly via the vectors defined in constants.a,
; e.g. iBuildHGRTables instead of BuildHGRTables.
;
2024-05-27 17:32:07 +00:00
; Public functions:
; - WaitForKeyWithTimeout
; - BuildHGRTables
; - BuildHGRMirrorTables
; - BuildHGRMirrorCols
; - BuildHGRDitherMasks
; - BuildHGRSparseBitmasks1Bit
; - BuildDHGRSparseBitmasks1Bit
; - ReverseCoordinates1Bit
; - RippleCoordinates1Bit
; - RippleCoordinates1Bit2
; - RippleCoordinates1Bit3
; - RippleCoordinates1Bit4
; - BuildHGRSparseBitmasks2Bit
; - ReverseCoordinates2Bit
; - RippleCoordinates2Bit
; - SetupPrecomputed3Bit
; - ReverseCoordinates3Bit
; - RippleCoordinates3Bit
WaitForKeyWithTimeout
; in: A = timeout length (like standard $FCA8 wait routine)
; out: A clobbered (not always 0 if key is pressed, but also not the key pressed)
; X/Y preserved
sec
@wait1 pha
@wait2 sbc #1
bne @wait2
pla
bit KBD
bmi @exit
sbc #1
bne @wait1
@exit rts
; based on routine by John Brooks
; posted on comp.sys.apple2 on 2018-07-11
; https://groups.google.com/d/msg/comp.sys.apple2/v2HOfHOmeNQ/zD76fJg_BAAJ
BuildHGRTables
; out: populates tables at $0201 (hgrlo) and $0301 (hgrhi)
; A clobbered
; X=$C0 (important! some callers rely on this)
; Z=1
; Y preserved
ldx #0
- txa
and #$F8
bpl +
ora #5
+ asl
bpl +
ora #5
+ asl
asl
sta hgrlo, x
txa
and #7
rol
asl hgrlo, x
rol
ora #$20
sta hgrhi, x
inx
cpx #$C0
bne -
rts
2024-05-27 17:32:07 +00:00
BuildHGRMirrorTables
ldx #$C0
ldy #0
- tya
and #$F8
bpl +
ora #5
+ asl
bpl +
ora #5
+ asl
asl
sta hgrlomirror-1, x
tya
and #7
rol
asl hgrlomirror-1, x
rol
ora #$20
sta hgr1himirror-1, x
iny
dex
bne -
rts
BuildHGRMirrorCols
; in: none
; out: mirror_cols populated with lookup table to get $27-y for y in $00..$27
; all registers and flags clobbered
ldx #$27
ldy #$00
- tya
sta mirror_cols, x
iny
dex
bpl -
rts
BuildHGRDitherMasks
ldy #40
- lda #%10110011
sta dithermasks, y
lda #%11100110
sta dithermasks+1, y
lda #%11001100
sta dithermasks+2, y
lda #%10011001
sta dithermasks+3, y
dey
dey
dey
dey
bpl -
ldy #43
lda #$FF
- sta no_masks, y
dey
bpl -
rts
BuildHGRSparseBitmasks1Bit
lda #%10000001
sta copymasks1bit
sta mirror_copymasks1bit+$C0
lda #%10000010
sta copymasks1bit+$20
sta mirror_copymasks1bit+$A0
lda #%10000100
sta copymasks1bit+$40
sta mirror_copymasks1bit+$80
lda #%10001000
sta copymasks1bit+$60
sta mirror_copymasks1bit+$60
lda #%10010000
sta copymasks1bit+$80
sta mirror_copymasks1bit+$40
lda #%10100000
sta copymasks1bit+$A0
sta mirror_copymasks1bit+$20
lda #%11000000
sta copymasks1bit+$C0
sta mirror_copymasks1bit
rts
BuildDHGRSparseBitmasks1Bit
; out: X=0
ldx #$00
txa
- sta copymasks1bit, x
sta $C005
sta copymasks1bit, x
sta $C004
inx
bne -
; X=0
lda #%00000111
sta copymasks1bit+$80
sta mirror_copymasks1bit+$40
lda #%00011000
sta copymasks1bit+$A0
sta mirror_copymasks1bit+$20
lda #%01100000
sta copymasks1bit+$C0
sta mirror_copymasks1bit
sta $C005
lda #%10000011
sta copymasks1bit
sta mirror_copymasks1bit+$C0
lda #%10001100
sta copymasks1bit+$20
sta mirror_copymasks1bit+$A0
lda #%10110000
sta copymasks1bit+$40
sta mirror_copymasks1bit+$80
lda #%11000000
sta copymasks1bit+$60
sta mirror_copymasks1bit+$60
sta $C004
rts
ReverseCoordinates1Bit
ldy #0 ; <Coordinates1Bit
sty $f0
lda #>Coordinates1Bit
sta $f1
lda #<(EndCoordinates1Bit - 2)
sta $f2
lda #>(EndCoordinates1Bit - 2)
sta $f3
clc
!byte $24
- sec
-- lda ($f0), y
pha
lda ($f2), y
sta ($f0), y
pla
sta ($f2), y
iny
bcc -
ldy #0
!byte $24
- clc
inc $f0
bne +
inc $f1
+ lda $f1
eor #>(Coordinates1Bit + $1A40)
bne +
lda $f0
eor #<(Coordinates1Bit + $1A40)
beq ++
+ lda $f2
bne +
dec $f3
+ dec $f2
bcs -
bcc -- ; always
++ rts
!zone {
RippleCoordinates1Bit4
lda #<aslmod4
+HIDE_NEXT_2_BYTES
RippleCoordinates1Bit3
lda #<aslmod3
+HIDE_NEXT_2_BYTES
RippleCoordinates1Bit2
lda #<aslmod2
+HIDE_NEXT_2_BYTES
RippleCoordinates1Bit
lda #<aslmod
sta @jsr+1
lda #2 ; <(Coordinates1Bit + 2)
sta $f0
ldy #0
sty $f1
lda #$16 ; <(Coordinates1Bit + 22)
sta $f2
sty $f3
lda #$1f
sta $ee
lda #$0d
sta $ef
lda Coordinates1Bit + 2
sta $e0
lda Coordinates1Bit + 3
sta $e1
lda Coordinates1Bit + 22
sta $e2
lda Coordinates1Bit + 23
sta $e3
--- ldx #4
-- ldy $ee, x
lda $ef, x
@jsr jsr aslmod ; SMC on entry
sty $ee, x
sta $ef, x
sty $ec
clc
adc #>Coordinates1Bit
sta $ed
ldy #0
!byte $24
- sec
lda ($ec), y
pha
lda $de, x
sta ($ec), y
pla
sta $de, x
inx
iny
bcc -
dex
dex
dex
dex
bne --
dec $ee
bne ---
dec $ef
bpl ---
bmi exit ; always branches
aslmod4 jsr aslmod
aslmod3 jsr aslmod
aslmod2 jsr aslmod
aslmod cmp #$1A
!if (>aslmod != >aslmod4) {
!serious "aslmod entry points are not on the same page"
}
bcc +
bne ++
cpy #$40
bcc +
++ iny
+ pha
tya
asl
tay
pla
rol
cmp #$34
bcc exit
bne ++
cpy #$80
bcc exit
++ pha
tya
sbc #$80
tay
pla
sbc #$34
exit rts
}
2024-05-27 17:32:07 +00:00
BuildHGRSparseBitmasks2Bit
lda #%10000011
sta copymasks2bit
sta mirror_copymasks2bit+$E0
lda #%10001100
sta copymasks2bit+$20
sta mirror_copymasks2bit+$C0
lda #%10110000
sta copymasks2bit+$40
sta mirror_copymasks2bit+$A0
lda #%11000000
sta copymasks2bit+$60
sta mirror_copymasks2bit+$80
lda #%10000001
sta copymasks2bit+$80
sta mirror_copymasks2bit+$60
lda #%10000110
sta copymasks2bit+$A0
sta mirror_copymasks2bit+$40
lda #%10011000
sta copymasks2bit+$C0
sta mirror_copymasks2bit+$20
lda #%11100000
sta copymasks2bit+$E0
sta mirror_copymasks2bit
rts
ReverseCoordinates2Bit
ldy #0 ; <Coordinates2Bit
sty $f0
lda #>Coordinates2Bit
sta $f1
lda #<(EndCoordinates2Bit - 2)
sta $f2
lda #>(EndCoordinates2Bit - 2)
sta $f3
ldx #$1E ; #$3C/2
clc
!byte $24
- sec
-- lda ($f0), y
pha
lda ($f2), y
sta ($f0), y
pla
sta ($f2), y
iny
bcc -
ldy #0
!byte $24
- clc
inc $f0
bne +
inc $f1
dex
beq ++
+ lda $f2
bne +
dec $f3
+ dec $f2
bcs -
bcc -- ; always branches
++ rts
RippleCoordinates2Bit
ldy #0
ldx #$33
- lda @ptrtbl, x
sta $c0, x
dex
bpl -
lda #$9b
sta $fe
iny
sty $ff
ldx #6
- lda Coordinates2Bit + 1, x
sta $7f, x
lda Coordinates2Bit + 9, x
sta $85, x
lda Coordinates2Bit + 17, x
sta $8b, x
lda Coordinates2Bit + 65, x
sta $9b, x
dex
bne -
lda Coordinates2Bit + 28
sta $92
lda Coordinates2Bit + 29
sta $93
ldx #4
- lda Coordinates2Bit + 33, x
sta $93, x
lda Coordinates2Bit + 41, x
sta $97, x
lda Coordinates2Bit + 83, x
sta $a1, x
dex
bne -
ldx #2
- lda Coordinates2Bit + 125, x
sta $a5, x
lda Coordinates2Bit + 131, x
sta $a7, x
lda Coordinates2Bit + 139, x
sta $a9, x
lda Coordinates2Bit + 169, x
sta $ab, x
lda Coordinates2Bit + 237, x
sta $ad, x
lda Coordinates2Bit + 2193, x
sta $af, x
lda Coordinates2Bit + 6581, x
sta $b1, x
dex
bne -
--- ldx #$34
-- lda $be, x
tay
ora $bf, x
beq +
lda $bf, x
jsr @aslmod
sty $be, x
sta $bf, x
sty $fc
clc
adc #>Coordinates2Bit
sta $fd
ldy #0
!byte $24
- sec
lda ($fc), y
pha
lda $7e, x
sta ($fc), y
pla
sta $7e, x
inx
iny
bcc -
dex
dex
+ dex
dex
bne --
ldy #1
lda $fe
eor #<(411 - 2)
beq +
ldy #9
eor #<(411 - 2) xor <(411 - 136)
bne ++
+
- ldx @zerotbl, y
sta $0, x
sta $1, x
dey
bpl -
++ dec $fe
bne ---
dec $ff
bpl ---
bmi @exit ; always branches
@aslmod jsr +
+ cmp #$1E
bcc +
iny
+ pha
tya
asl
tay
pla
rol
cmp #$3C
bcc @exit
sbc #$3C
@exit rts
@ptrtbl !word 2, 4, 6, 10, 12, 14, 18, 20
!word 22, 28, 34, 36, 42, 44, 66, 68
!word 70, 84, 86, 126, 132, 140, 170, 238
!word 2194, 6582
@zerotbl !byte $f0, $f2, $ca, $d2, $d8, $e0, $e2, $e6, $ea, $ee
2024-05-27 17:32:07 +00:00
SetupPrecomputed3Bit
; build regular HGR lookup tables, then split them
jsr BuildHGRTables
ldx #$BF
ldy #$3F
- lda hgrlo, x
sta hgrlo3c, y
sta hgrlo3c+$40, y
lda hgrhi, x
sta hgrhi3c, y
sta hgrhi3c+$40, y
dex
lda hgrlo, x
sta hgrlo3b, y
sta hgrlo3b+$40, y
lda hgrhi, x
sta hgrhi3b, y
sta hgrhi3b+$40, y
dex
lda hgrlo, x
sta hgrlo3a, y
sta hgrlo3a+$40, y
lda hgrhi, x
sta hgrhi3a, y
sta hgrhi3a+$40, y
dex
dey
bpl -
; build lookup table to get $20+y for y in $00..$07
ldx #$07
ldy #$27
- tya
sta extra_cols-$20, y
dey
dex
bpl -
; build sparse lookup tables for bitmasks
lda #%10000011
sta copymasks3bit
lda #%10001100
sta copymasks3bit+$20
lda #%10110000
sta copymasks3bit+$40
lda #%11000000
sta copymasks3bit+$60
lda #%10000001
sta copymasks3bit+$80
lda #%10000110
sta copymasks3bit+$A0
lda #%10011000
sta copymasks3bit+$C0
lda #%11100000
sta copymasks3bit+$E0
rts
ReverseCoordinates3Bit
ldy #0 ; <Coordinates3Bit
sty $f0
lda #>Coordinates3Bit
sta $f1
lda #<(EndCoordinates3Bit - 2)
sta $f2
lda #>(EndCoordinates3Bit - 2)
sta $f3
ldx #$28 ; #$50/2
clc
!byte $24
- sec
-- lda ($f0), y
pha
lda ($f2), y
sta ($f0), y
pla
sta ($f2), y
iny
bcc -
ldy #0
!byte $24
- clc
inc $f0
bne +
inc $f1
dex
beq ++
+ lda $f2
bne +
dec $f3
+ dec $f2
bcs -
bcc -- ; always branches
++ rts
RippleCoordinates3Bit
ldx #$1B
- lda @ripplezp, x
sta $e0, x
dex
bpl -
--- ldx #$0c
-- ldy $ee, x
lda $ef, x
jsr @aslmod
sty $ee, x
sta $ef, x
sty $ec
clc
adc #>Coordinates3Bit
sta $ed
ldy #0
!byte $24
- sec
lda ($ec), y
pha
lda $de, x
sta ($ec), y
pla
sta $de, x
inx
iny
bcc -
dex
dex
dex
dex
bne --
dec $ee
bne ---
dec $ef
bpl ---
bmi @exit ; always branches
@aslmod jsr +
+ cmp #$28
bcc +
iny
+ pha
tya
asl
tay
pla
rol
cmp #$50
bcc @exit
sbc #$50
@exit rts
@ripplezp
!byte $1F,$F3,$20,$F3,$20,$14,$20,$D3
!byte $1E,$F3,$1F,$54,$00,$00,$AA,$06
!byte $02,$00,$04,$00,$06,$00,$0C,$00
!byte $16,$00,$1A,$00