2019-01-05 23:31:56 +00:00
|
|
|
import functools
|
2019-03-07 23:07:24 +00:00
|
|
|
import heapq
|
|
|
|
import random
|
2019-03-10 22:42:31 +00:00
|
|
|
import os
|
|
|
|
import threading
|
|
|
|
import queue
|
|
|
|
import subprocess
|
2019-01-05 23:31:56 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
from typing import List, Iterator, Tuple, Iterable
|
|
|
|
|
|
|
|
from PIL import Image
|
2019-03-04 23:09:00 +00:00
|
|
|
import numpy as np
|
2019-03-10 22:42:31 +00:00
|
|
|
import skvideo.io
|
|
|
|
import weighted_levenshtein
|
2019-03-04 23:09:00 +00:00
|
|
|
|
2019-01-05 23:31:56 +00:00
|
|
|
import opcodes
|
|
|
|
import screen
|
|
|
|
|
|
|
|
|
2019-02-23 23:52:25 +00:00
|
|
|
def hamming_weight(n):
|
2019-01-05 23:31:56 +00:00
|
|
|
"""Compute hamming weight of 8-bit int"""
|
|
|
|
n = (n & 0x55) + ((n & 0xAA) >> 1)
|
|
|
|
n = (n & 0x33) + ((n & 0xCC) >> 2)
|
|
|
|
n = (n & 0x0F) + ((n & 0xF0) >> 4)
|
|
|
|
return n
|
|
|
|
|
2019-03-10 23:07:44 +00:00
|
|
|
# TODO: what about increasing transposition cost? Might be better to have
|
|
|
|
# any pixel at the right place even if the wrong colour?
|
2019-01-05 23:31:56 +00:00
|
|
|
|
2019-03-10 23:07:44 +00:00
|
|
|
substitute_costs = np.ones((128, 128), dtype=np.float64)
|
2019-03-10 22:42:31 +00:00
|
|
|
|
|
|
|
# Penalty for turning on/off a black bit
|
|
|
|
for c in "01GVWOB":
|
2019-03-10 23:07:44 +00:00
|
|
|
substitute_costs[(ord('K'), ord(c))] = 5
|
|
|
|
substitute_costs[(ord(c), ord('K'))] = 5
|
2019-03-10 22:42:31 +00:00
|
|
|
|
|
|
|
# Penalty for changing colour
|
|
|
|
for c in "01GVWOB":
|
|
|
|
for d in "01GVWOB":
|
2019-03-10 23:07:44 +00:00
|
|
|
substitute_costs[(ord(c), ord(d))] = 1
|
|
|
|
substitute_costs[(ord(d), ord(c))] = 1
|
2019-03-04 23:09:00 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
insert_costs = np.ones(128, dtype=np.float64) * 1000
|
|
|
|
delete_costs = np.ones(128, dtype=np.float64) * 1000
|
|
|
|
|
|
|
|
|
|
|
|
@functools.lru_cache(None)
|
2019-03-10 23:07:44 +00:00
|
|
|
def edit_weight(a: int, b: int, is_odd_offset: bool):
|
2019-03-04 23:09:00 +00:00
|
|
|
a_pixels = byte_to_colour_string(a, is_odd_offset)
|
|
|
|
b_pixels = byte_to_colour_string(b, is_odd_offset)
|
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
dist = weighted_levenshtein.dam_lev(
|
|
|
|
a_pixels, b_pixels,
|
|
|
|
insert_costs=insert_costs,
|
|
|
|
delete_costs=delete_costs,
|
|
|
|
substitute_costs=substitute_costs,
|
|
|
|
)
|
|
|
|
return np.int64(dist)
|
2019-03-04 23:09:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
@functools.lru_cache(None)
|
|
|
|
def byte_to_colour_string(b: int, is_odd_offset: bool) -> str:
|
|
|
|
pixels = []
|
|
|
|
|
|
|
|
idx = 0
|
|
|
|
if is_odd_offset:
|
|
|
|
pixels.append("01"[b & 0x01])
|
|
|
|
idx += 1
|
|
|
|
|
|
|
|
# K = black
|
|
|
|
# G = green
|
|
|
|
# V = violet
|
|
|
|
# W = white
|
|
|
|
palettes = (
|
|
|
|
(
|
|
|
|
"K", # 0x00
|
|
|
|
"V", # 0x01
|
|
|
|
"G", # 0x10
|
|
|
|
"W" # 0x11
|
|
|
|
), (
|
|
|
|
"K", # 0x00
|
2019-03-10 23:07:44 +00:00
|
|
|
"B", # 0x01
|
|
|
|
"O", # 0x10
|
2019-03-04 23:09:00 +00:00
|
|
|
"W" # 0x11
|
|
|
|
)
|
|
|
|
)
|
2019-03-10 23:07:44 +00:00
|
|
|
palette = palettes[(b & 0x80) != 0]
|
2019-03-04 23:09:00 +00:00
|
|
|
|
|
|
|
for _ in range(3):
|
|
|
|
pixel = palette[(b >> idx) & 0b11]
|
|
|
|
pixels.append(pixel)
|
|
|
|
idx += 2
|
|
|
|
|
|
|
|
if not is_odd_offset:
|
|
|
|
pixels.append("01"[b & 0x40 != 0])
|
|
|
|
idx += 1
|
|
|
|
|
|
|
|
return "".join(pixels)
|
|
|
|
|
|
|
|
|
2019-01-05 23:31:56 +00:00
|
|
|
class Video:
|
|
|
|
"""Apple II screen memory map encoding a bitmapped frame."""
|
|
|
|
|
2019-02-23 23:52:25 +00:00
|
|
|
CLOCK_SPEED = 1024 * 1024
|
|
|
|
|
2019-03-07 23:07:24 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
2019-03-10 22:42:31 +00:00
|
|
|
filename: str):
|
|
|
|
self.filename = filename # type: str
|
|
|
|
|
|
|
|
self._reader = skvideo.io.FFmpegReader(filename)
|
|
|
|
|
|
|
|
# Compute frame rate from input video
|
|
|
|
data = skvideo.io.ffprobe(self.filename)['video']
|
|
|
|
rate_data = data['@r_frame_rate'].split("/") # e.g. 12000/1001
|
|
|
|
self._input_frame_rate = float(rate_data[0]) / float(rate_data[1])
|
|
|
|
|
|
|
|
self.cycles_per_frame = 1024. * 1024 / self._input_frame_rate
|
|
|
|
self.frame_number = 0
|
2019-01-05 23:31:56 +00:00
|
|
|
|
|
|
|
# Initialize empty
|
2019-03-03 22:21:28 +00:00
|
|
|
self.memory_map = screen.MemoryMap(
|
2019-03-10 22:42:31 +00:00
|
|
|
screen_page=1) # type: screen.MemoryMap
|
2019-02-23 23:52:25 +00:00
|
|
|
|
2019-03-04 23:09:00 +00:00
|
|
|
# Accumulates pending edit weights across frames
|
2019-03-10 22:42:31 +00:00
|
|
|
self.update_priority = np.zeros((32, 256), dtype=np.int64)
|
|
|
|
|
|
|
|
def tick(self, cycles) -> bool:
|
|
|
|
# print(cycles, self.cycles_per_frame, self.cycles_per_frame *
|
|
|
|
# self.frame_number)
|
|
|
|
if cycles > (self.cycles_per_frame * self.frame_number):
|
|
|
|
self.frame_number += 1
|
|
|
|
return True
|
|
|
|
return False
|
2019-03-04 23:09:00 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
def _frame_grabber(self):
|
|
|
|
for frame_array in self._reader.nextFrame():
|
|
|
|
yield Image.fromarray(frame_array)
|
2019-01-05 23:31:56 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
def frames(self) -> Iterator[screen.MemoryMap]:
|
|
|
|
"""Encode frame to HGR using bmp2dhr.
|
2019-01-05 23:31:56 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
We do the encoding in a background thread to parallelize.
|
2019-01-05 23:31:56 +00:00
|
|
|
"""
|
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
frame_dir = self.filename.split(".")[0]
|
|
|
|
try:
|
|
|
|
os.mkdir(frame_dir)
|
|
|
|
except FileExistsError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
q = queue.Queue(maxsize=10)
|
|
|
|
|
|
|
|
def worker():
|
|
|
|
for _idx, _frame in enumerate(self._frame_grabber()):
|
|
|
|
outfile = "%s/%08dC.BIN" % (frame_dir, _idx)
|
|
|
|
bmpfile = "%s/%08d.bmp" % (frame_dir, _idx)
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.stat(outfile)
|
|
|
|
except FileNotFoundError:
|
|
|
|
_frame = _frame.resize((280, 192))
|
|
|
|
_frame.save(bmpfile)
|
|
|
|
|
|
|
|
subprocess.call(
|
|
|
|
["/usr/local/bin/bmp2dhr", bmpfile, "hgr", "D9"])
|
|
|
|
|
|
|
|
os.remove(bmpfile)
|
|
|
|
|
|
|
|
_frame = np.fromfile(outfile, dtype=np.uint8)
|
|
|
|
q.put(_frame)
|
|
|
|
|
|
|
|
q.put(None)
|
|
|
|
|
|
|
|
t = threading.Thread(target=worker)
|
|
|
|
t.start()
|
|
|
|
|
|
|
|
while True:
|
|
|
|
frame = q.get()
|
|
|
|
if frame is None:
|
|
|
|
break
|
|
|
|
|
|
|
|
yield screen.FlatMemoryMap(
|
|
|
|
screen_page=1, data=frame).to_memory_map()
|
|
|
|
q.task_done()
|
|
|
|
|
|
|
|
t.join()
|
|
|
|
|
|
|
|
def encode_frame(
|
|
|
|
self, target: screen.MemoryMap
|
|
|
|
) -> Iterator[opcodes.Opcode]:
|
|
|
|
"""Update to match content of frame within provided budget."""
|
|
|
|
|
|
|
|
print("Similarity %f" % (self.update_priority.mean()))
|
2019-03-07 23:07:24 +00:00
|
|
|
yield from self._index_changes(self.memory_map, target)
|
2019-02-23 23:52:25 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
def _diff_weights(
|
2019-02-23 23:52:25 +00:00
|
|
|
self,
|
2019-03-03 22:21:28 +00:00
|
|
|
source: screen.MemoryMap,
|
|
|
|
target: screen.MemoryMap
|
2019-03-10 22:42:31 +00:00
|
|
|
):
|
|
|
|
diff_weights = np.zeros((32, 256), dtype=np.int64)
|
2019-03-04 23:09:00 +00:00
|
|
|
|
|
|
|
it = np.nditer(
|
2019-03-07 23:07:24 +00:00
|
|
|
source.page_offset ^ target.page_offset, flags=['multi_index'])
|
2019-03-04 23:09:00 +00:00
|
|
|
while not it.finished:
|
2019-03-07 23:07:24 +00:00
|
|
|
# If no diff, don't need to bother
|
|
|
|
if not it[0]:
|
|
|
|
it.iternext()
|
|
|
|
continue
|
|
|
|
|
2019-03-04 23:09:00 +00:00
|
|
|
diff_weights[it.multi_index] = edit_weight(
|
|
|
|
source.page_offset[it.multi_index],
|
|
|
|
target.page_offset[it.multi_index],
|
|
|
|
it.multi_index[1] % 2 == 1
|
|
|
|
)
|
|
|
|
it.iternext()
|
2019-03-10 22:42:31 +00:00
|
|
|
return diff_weights
|
2019-03-04 23:09:00 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
def _heapify_priorities(self) -> List:
|
2019-03-07 23:07:24 +00:00
|
|
|
priorities = []
|
|
|
|
it = np.nditer(self.update_priority, flags=['multi_index'])
|
|
|
|
while not it.finished:
|
|
|
|
priority = it[0]
|
|
|
|
if not priority:
|
|
|
|
it.iternext()
|
|
|
|
continue
|
|
|
|
|
|
|
|
page, offset = it.multi_index
|
|
|
|
# Don't use deterministic order for page, offset
|
2019-03-10 22:42:31 +00:00
|
|
|
nonce = random.random()
|
2019-03-07 23:07:24 +00:00
|
|
|
heapq.heappush(priorities, (-priority, nonce, page, offset))
|
|
|
|
it.iternext()
|
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
return priorities
|
|
|
|
|
|
|
|
@functools.lru_cache(None)
|
|
|
|
def _compute_delta(self, content, target, old, is_odd):
|
|
|
|
return edit_weight(content, target, is_odd, error=True) - old
|
|
|
|
|
|
|
|
def _compute_error(self, page, content, target, old_error):
|
|
|
|
offsets = []
|
|
|
|
|
|
|
|
old_error_page = old_error[page]
|
|
|
|
tpo = target.page_offset[page]
|
|
|
|
|
|
|
|
page_priorities = [(-p, random.random(), o) for o, p in enumerate(
|
|
|
|
self.update_priority[page]) if p]
|
|
|
|
heapq.heapify(page_priorities)
|
|
|
|
|
|
|
|
# Iterate in descending priority order and take first 3 offsets with
|
|
|
|
# negative delta
|
|
|
|
while page_priorities:
|
|
|
|
_, _, o = heapq.heappop(page_priorities)
|
|
|
|
|
|
|
|
# If we store content at this offset, what is the difference
|
|
|
|
# between this edit distance and the ideal target edit distance?
|
|
|
|
delta = self._compute_delta(
|
|
|
|
content, tpo[o], o % 2 == 1, old_error_page[o])
|
|
|
|
|
|
|
|
# Getting further away from goal, no thanks!
|
|
|
|
if delta >= 0:
|
|
|
|
continue
|
|
|
|
#
|
|
|
|
# # print("Offset %d prio %d: %d -> %d = %d" % (
|
|
|
|
# # o, p, content,
|
|
|
|
# # target.page_offset[page, o],
|
|
|
|
# # delta
|
|
|
|
# # ))
|
|
|
|
offsets.append(o)
|
|
|
|
if len(offsets) == 3:
|
|
|
|
break
|
|
|
|
|
|
|
|
return offsets
|
|
|
|
|
|
|
|
def _index_changes(
|
|
|
|
self,
|
|
|
|
source: screen.MemoryMap,
|
|
|
|
target: screen.MemoryMap
|
|
|
|
) -> Iterator[Tuple[int, int, int, int, int]]:
|
|
|
|
"""Transform encoded screen to sequence of change tuples.
|
|
|
|
|
|
|
|
Change tuple is (update_priority, page, offset, content, run_length)
|
|
|
|
"""
|
|
|
|
|
|
|
|
diff_weights = self._diff_weights(source, target)
|
|
|
|
|
|
|
|
# Clear any update priority entries that have resolved themselves
|
|
|
|
# with new frame
|
|
|
|
self.update_priority[diff_weights == 0] = 0
|
|
|
|
|
|
|
|
self.update_priority += diff_weights
|
|
|
|
|
|
|
|
priorities = self._heapify_priorities()
|
|
|
|
while priorities:
|
|
|
|
_, _, page, offset = heapq.heappop(priorities)
|
|
|
|
# Check whether we've already cleared this diff while processing
|
|
|
|
# an earlier opcode
|
|
|
|
if self.update_priority[page, offset] == 0:
|
2019-03-07 23:07:24 +00:00
|
|
|
continue
|
2019-03-10 22:42:31 +00:00
|
|
|
|
2019-03-07 23:07:24 +00:00
|
|
|
offsets = [offset]
|
|
|
|
content = target.page_offset[page, offset]
|
2019-03-10 22:42:31 +00:00
|
|
|
# print("Priority %d: page %d offset %d content %d" % (
|
2019-03-07 23:07:24 +00:00
|
|
|
# priority, page, offset, content))
|
|
|
|
|
|
|
|
# Clear priority for the offset we're emitting
|
|
|
|
self.update_priority[page, offset] = 0
|
2019-03-10 22:42:31 +00:00
|
|
|
self.memory_map.page_offset[page, offset] = content
|
2019-03-07 23:07:24 +00:00
|
|
|
|
|
|
|
# Need to find 3 more offsets to fill this opcode
|
2019-03-10 22:42:31 +00:00
|
|
|
for o in self._compute_error(
|
|
|
|
page,
|
2019-03-07 23:07:24 +00:00
|
|
|
content,
|
2019-03-10 22:42:31 +00:00
|
|
|
target,
|
|
|
|
diff_weights
|
|
|
|
):
|
|
|
|
offsets.append(o)
|
|
|
|
# Clear priority for the offset we're emitting
|
|
|
|
self.update_priority[page, o] = 0
|
|
|
|
self.memory_map.page_offset[page, o] = content
|
|
|
|
|
|
|
|
# Pad to 4 if we didn't find enough
|
2019-03-07 23:07:24 +00:00
|
|
|
for _ in range(len(offsets), 4):
|
|
|
|
offsets.append(offsets[0])
|
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
# print("Page %d, content %d: offsets %s" % (page+32, content,
|
2019-03-07 23:07:24 +00:00
|
|
|
# offsets))
|
2019-03-10 22:42:31 +00:00
|
|
|
yield (page + 32, content, offsets)
|
2019-02-23 23:52:25 +00:00
|
|
|
|
2019-03-10 22:42:31 +00:00
|
|
|
# If we run out of things to do, pad forever
|
|
|
|
content = target.page_offset[(0, 0)]
|
|
|
|
while True:
|
|
|
|
yield (32, content, [0, 0, 0, 0])
|