Introduction more general notion of update priority used to increase

weight of diffs that persist across multiple frames.

For each frame, zero out update priority of bytes that no longer have
a pending diff, and add the edit distance of the remaining diffs.

Zero these out as opcodes are retired.

Replace hamming distance with Damerau-Levenshtein distance of the
encoded pixel colours in the byte, e.g. 0x2A --> GGG0 (taking into
account the half-pixel)

This has a couple of benefits over hamming distance of the bit patterns:
- transposed pixels are weighted less (edit distance 1, not 2+ for
  Hamming)
- coloured pixels are weighted equally as white pixels (not half as
  much)
- weighting changes in palette bit that flip multiple pixel colours

While I'm here, the RLE opcode should emit run_length - 1 so that we
can encode runs of 256 bytes.
This commit is contained in:
kris 2019-03-04 23:09:00 +00:00
parent d3522c817f
commit 6e2c83c1e5
3 changed files with 113 additions and 38 deletions

View File

@ -1,4 +1,5 @@
import enum import enum
import numpy as np
from typing import Iterator, Tuple from typing import Iterator, Tuple
import screen import screen
@ -20,12 +21,13 @@ class State:
"""Represents virtual machine state.""" """Represents virtual machine state."""
def __init__(self, cycle_counter: CycleCounter, def __init__(self, cycle_counter: CycleCounter,
memmap: screen.MemoryMap): memmap: screen.MemoryMap, update_priority: np.array):
self.page = 0x20 self.page = 0x20
self.content = 0x7f self.content = 0x7f
self.memmap = memmap self.memmap = memmap
self.cycle_counter = cycle_counter self.cycle_counter = cycle_counter
self.update_priority = update_priority
def emit(self, last_opcode: "Opcode", opcode: "Opcode") -> Iterator[int]: def emit(self, last_opcode: "Opcode", opcode: "Opcode") -> Iterator[int]:
cmd = opcode.emit_command(last_opcode, opcode) cmd = opcode.emit_command(last_opcode, opcode)
@ -127,6 +129,8 @@ class Store(Opcode):
def apply(self, state): def apply(self, state):
state.memmap.write(state.page, self.offset, state.content) state.memmap.write(state.page, self.offset, state.content)
# TODO: screen page
state.update_priority[state.page - 32, self.offset] = 0
class SetContent(Opcode): class SetContent(Opcode):
@ -193,7 +197,7 @@ class RLE(Opcode):
def emit_data(self): def emit_data(self):
# print(" RLE @ %02x * %02x" % (self.start_offset, self.run_length)) # print(" RLE @ %02x * %02x" % (self.start_offset, self.run_length))
yield self.start_offset yield self.start_offset
yield self.run_length yield self.run_length - 1
@property @property
def cycles(self): def cycles(self):
@ -201,10 +205,10 @@ class RLE(Opcode):
def apply(self, state): def apply(self, state):
for i in range(self.run_length): for i in range(self.run_length):
state.memmap.write( offset = (self.start_offset + i) & 0xff
state.page, (self.start_offset + i) & 0xff, state.memmap.write(state.page, offset, state.content)
state.content # TODO: screen page
) state.update_priority[state.page - 32, offset] = 0
class Tick(Opcode): class Tick(Opcode):

View File

@ -31,12 +31,12 @@ class HeuristicPageFirstScheduler(OpcodeScheduler):
page_weights = collections.defaultdict(int) page_weights = collections.defaultdict(int)
page_content_weights = {} page_content_weights = {}
for ch in changes: for ch in changes:
xor_weight, page, offset, content, run_length = ch update_priority, page, offset, content, run_length = ch
data.setdefault((page, content), list()).append( data.setdefault((page, content), list()).append(
(xor_weight, run_length, offset)) (update_priority, run_length, offset))
page_weights[page] += xor_weight page_weights[page] += update_priority
page_content_weights.setdefault(page, collections.defaultdict( page_content_weights.setdefault(page, collections.defaultdict(
int))[content] += xor_weight int))[content] += update_priority
# Weight each page and content within page by total xor weight and # Weight each page and content within page by total xor weight and
# traverse in this order, with a random nonce so that we don't # traverse in this order, with a random nonce so that we don't
@ -87,12 +87,12 @@ class HeuristicContentFirstScheduler(OpcodeScheduler):
content_weights = collections.defaultdict(int) content_weights = collections.defaultdict(int)
content_page_weights = {} content_page_weights = {}
for ch in changes: for ch in changes:
xor_weight, page, offset, content, run_length = ch update_priority, page, offset, content, run_length = ch
data.setdefault((page, content), list()).append( data.setdefault((page, content), list()).append(
(xor_weight, run_length, offset)) (update_priority, run_length, offset))
content_weights[content] += xor_weight content_weights[content] += update_priority
content_page_weights.setdefault(content, collections.defaultdict( content_page_weights.setdefault(content, collections.defaultdict(
int))[page] += xor_weight int))[page] += update_priority
# Weight each page and content within page by total xor weight and # Weight each page and content within page by total xor weight and
# traverse in this order # traverse in this order
@ -129,7 +129,7 @@ class OldHeuristicPageFirstScheduler(OpcodeScheduler):
"""Group by page first then content byte. """Group by page first then content byte.
This uses a deterministic order of pages and content bytes, and ignores This uses a deterministic order of pages and content bytes, and ignores
xor_weight altogether update_priority altogether
""" """
# Median similarity: 0.854613 ( @ 15 fps, 10M output) # Median similarity: 0.854613 ( @ 15 fps, 10M output)
@ -141,7 +141,7 @@ class OldHeuristicPageFirstScheduler(OpcodeScheduler):
def schedule(self, changes): def schedule(self, changes):
data = {} data = {}
for ch in changes: for ch in changes:
xor_weight, page, offset, content, run_length = ch update_priority, page, offset, content, run_length = ch
data.setdefault(page, {}).setdefault(content, set()).add( data.setdefault(page, {}).setdefault(content, set()).add(
(run_length, offset)) (run_length, offset))
@ -234,7 +234,7 @@ class OldHeuristicPageFirstScheduler(OpcodeScheduler):
# # Heuristic: group by content byte first then page # # Heuristic: group by content byte first then page
# data = {} # data = {}
# for ch in changes: # for ch in changes:
# xor_weight, page, offset, content = ch # update_priority, page, offset, content = ch
# data.setdefault(content, {}).setdefault(page, set()).add(offset) # data.setdefault(content, {}).setdefault(page, set()).add(offset)
# #
# for content, page_offsets in data.items(): # for content, page_offsets in data.items():

113
video.py
View File

@ -1,6 +1,9 @@
import functools import functools
from typing import Iterator, Tuple, Iterable from typing import Iterator, Tuple, Iterable
import numpy as np
from similarity.damerau import Damerau
import opcodes import opcodes
import scheduler import scheduler
import screen import screen
@ -14,6 +17,56 @@ def hamming_weight(n):
return n return n
@functools.lru_cache(None)
def edit_weight(a: int, b: int, is_odd_offset: bool):
d = Damerau()
a_pixels = byte_to_colour_string(a, is_odd_offset)
b_pixels = byte_to_colour_string(b, is_odd_offset)
return d.distance(a_pixels, b_pixels)
@functools.lru_cache(None)
def byte_to_colour_string(b: int, is_odd_offset: bool) -> str:
pixels = []
idx = 0
if is_odd_offset:
pixels.append("01"[b & 0x01])
idx += 1
# K = black
# G = green
# V = violet
# W = white
palettes = (
(
"K", # 0x00
"V", # 0x01
"G", # 0x10
"W" # 0x11
), (
"K", # 0x00
"O", # 0x01
"B", # 0x10
"W" # 0x11
)
)
palette = palettes[b & 0x80 != 0]
for _ in range(3):
pixel = palette[(b >> idx) & 0b11]
pixels.append(pixel)
idx += 2
if not is_odd_offset:
pixels.append("01"[b & 0x40 != 0])
idx += 1
return "".join(pixels)
class Video: class Video:
"""Apple II screen memory map encoding a bitmapped frame.""" """Apple II screen memory map encoding a bitmapped frame."""
@ -32,7 +85,11 @@ class Video:
self.cycle_counter = opcodes.CycleCounter() self.cycle_counter = opcodes.CycleCounter()
self.state = opcodes.State(self.cycle_counter, self.memory_map) # Accumulates pending edit weights across frames
self.update_priority = np.zeros((32, 256), dtype=np.int)
self.state = opcodes.State(
self.cycle_counter, self.memory_map, self.update_priority)
self.frame_rate = frame_rate self.frame_rate = frame_rate
self.stream_pos = 0 self.stream_pos = 0
@ -43,7 +100,8 @@ class Video:
self._last_op = opcodes.Nop() self._last_op = opcodes.Nop()
def encode_frame(self, frame: screen.MemoryMap) -> Iterator[opcodes.Opcode]: def encode_frame(self, target: screen.MemoryMap) -> Iterator[
opcodes.Opcode]:
"""Update to match content of frame within provided budget. """Update to match content of frame within provided budget.
Emits encoded byte stream for rendering the image. Emits encoded byte stream for rendering the image.
@ -66,14 +124,8 @@ class Video:
it optimizes the bytestream. it optimizes the bytestream.
""" """
# Target screen memory map for new frame
target = frame
# Sort by highest xor weight and take the estimated number of change
# operations
# TODO: changes should be a class # TODO: changes should be a class
changes = sorted(list(self._index_changes(self.memory_map, target)), changes = self._index_changes(self.memory_map, target)
reverse=True)
yield from self.scheduler.schedule(changes) yield from self.scheduler.schedule(changes)
@ -92,7 +144,7 @@ class Video:
num_changes_in_run = 0 num_changes_in_run = 0
# Total weight of differences accumulated in run # Total weight of differences accumulated in run
total_xor_in_run = 0 total_update_priority_in_run = 0
def end_run(): def end_run():
# Decide if it's worth emitting as a run vs single stores # Decide if it's worth emitting as a run vs single stores
@ -109,7 +161,9 @@ class Video:
# ) # )
# print(run) # print(run)
yield ( yield (
total_xor_in_run, start_offset, cur_content, run_length) total_update_priority_in_run, start_offset, cur_content,
run_length
)
else: else:
for ch in run: for ch in run:
if ch[0]: if ch[0]:
@ -126,7 +180,7 @@ class Video:
run = [] run = []
run_length = 0 run_length = 0
num_changes_in_run = 0 num_changes_in_run = 0
total_xor_in_run = 0 total_update_priority_in_run = 0
cur_content = tc cur_content = tc
if cur_content is None: if cur_content is None:
@ -136,7 +190,7 @@ class Video:
run.append((bd, offset, tc, 1)) run.append((bd, offset, tc, 1))
if bd: if bd:
num_changes_in_run += 1 num_changes_in_run += 1
total_xor_in_run += bd total_update_priority_in_run += bd
if run: if run:
# End of run # End of run
@ -149,22 +203,39 @@ class Video:
) -> Iterator[Tuple[int, int, int, int, int]]: ) -> Iterator[Tuple[int, int, int, int, int]]:
"""Transform encoded screen to sequence of change tuples. """Transform encoded screen to sequence of change tuples.
Change tuple is (xor_weight, page, offset, content, run_length) Change tuple is (update_priority, page, offset, content, run_length)
""" """
# TODO: don't use 256 bytes if XMAX is smaller, or we may compute RLE diff_weights = np.zeros((32, 256), dtype=np.uint8)
# (with bit errors) over the full page!
diff_weights = hamming_weight(source.page_offset ^ target.page_offset) it = np.nditer(
source.page_offset ^ target.page_offset,
flags=['multi_index'])
while not it.finished:
diff_weights[it.multi_index] = edit_weight(
source.page_offset[it.multi_index],
target.page_offset[it.multi_index],
it.multi_index[1] % 2 == 1
)
it.iternext()
# Clear any update priority entries that have resolved themselves
# with new frame
self.update_priority[diff_weights == 0] = 0
self.update_priority += diff_weights
for page in range(32): for page in range(32):
for change in self._index_page( for change in self._index_page(
diff_weights[page], target.page_offset[page]): self.update_priority[page], target.page_offset[page]):
total_xor_in_run, start_offset, target_content, run_length = \ (
change total_priority_in_run, start_offset, target_content,
run_length
) = change
# TODO: handle screen page # TODO: handle screen page
yield ( yield (
total_xor_in_run, page + 32, start_offset, total_priority_in_run, page + 32, start_offset,
target_content, run_length target_content, run_length
) )