mirror of
https://github.com/KrisKennaway/ii-vision.git
synced 2025-01-20 14:31:35 +00:00
Read video frame rate and encode a new frame when the cycle count
has ticked past the appropriate time. - optimize the frame encoding a bit - use int64 consistently to avoid casting Fix a bug - when retiring an offset, also update our memory map with the new content, oops If we run out of changes to index, keep emitting stores for content at page=32,offset=0 forever Switch to a weighted D-L implementation so we can weight e.g. different substitutions differently (e.g. weighting diffs to/from black pixels differently than color errors)
This commit is contained in:
parent
aed439c0b3
commit
7db5c1c444
1
audio.py
1
audio.py
@ -25,7 +25,6 @@ class Audio:
|
||||
def audio_stream(self):
|
||||
with audioread.audio_open(self.filename) as f:
|
||||
for buf in f.read_data(128 * 1024):
|
||||
print(f.channels, f.samplerate, f.duration)
|
||||
|
||||
data = np.frombuffer(buf, dtype='int16').astype(
|
||||
'float32').reshape((f.channels, -1), order='F')
|
||||
|
16
main.py
16
main.py
@ -1,18 +1,20 @@
|
||||
import frame_grabber
|
||||
import movie
|
||||
import opcodes
|
||||
import screen
|
||||
import video
|
||||
|
||||
MAX_OUT = 10 * 1024 * 1024
|
||||
MAX_OUT = 100 * 1024 * 1024
|
||||
VIDEO_FPS = 30
|
||||
APPLE_FPS = 30
|
||||
|
||||
|
||||
def main():
|
||||
filename = "Computer Chronicles - 06x05 - The Apple II.mp4"
|
||||
filename = "apple_ii_forever.m4v"
|
||||
|
||||
m = movie.Movie(filename, audio_normalization=3.0)
|
||||
# filename = "Computer Chronicles - 06x05 - The Apple II.mp4"
|
||||
# filename = (
|
||||
# "Rick Astley - Never Gonna Give You Up (Official "
|
||||
# "Music Video).mp4"
|
||||
# )
|
||||
|
||||
m = movie.Movie(filename, audio_normalization=2.0)
|
||||
|
||||
with open("out.bin", "wb") as out:
|
||||
for bytes_out, b in enumerate(m.emit_stream(m.encode())):
|
||||
|
28
movie.py
28
movie.py
@ -3,22 +3,18 @@
|
||||
from typing import Iterable, Iterator
|
||||
|
||||
import audio
|
||||
import frame_grabber
|
||||
import opcodes
|
||||
import video
|
||||
|
||||
|
||||
class Movie:
|
||||
def __init__(self, filename: str, audio_normalization:float=1.0):
|
||||
def __init__(self, filename: str, audio_normalization: float = 1.0):
|
||||
self.filename = filename # type: str
|
||||
self.audio = audio.Audio(
|
||||
filename, normalization=audio_normalization) # type: audio.Audio
|
||||
# TODO: get from input file
|
||||
self.video = video.Video() # type: video.Video
|
||||
self.video = video.Video(filename) # type: video.Video
|
||||
|
||||
self.cycles = 0
|
||||
self.ticks_per_video_frame = (
|
||||
self.audio.sample_rate / self.video.frame_rate)
|
||||
|
||||
self.stream_pos = 0 # type: int
|
||||
|
||||
@ -32,26 +28,16 @@ class Movie:
|
||||
|
||||
self._last_op = opcodes.Nop()
|
||||
|
||||
def frames(self):
|
||||
yield from frame_grabber.bmp2dhr_frame_grabber(self.filename)
|
||||
|
||||
def encode(self) -> Iterator[opcodes.Opcode]:
|
||||
ticks = 0
|
||||
frames = 0
|
||||
video_frames = self.video.frames()
|
||||
video_seq = None
|
||||
|
||||
video_frames = self.frames()
|
||||
|
||||
for au in self.audio.audio_stream():
|
||||
if ticks % self.ticks_per_video_frame == 0:
|
||||
frames += 1
|
||||
video_seq = self.video.encode_frame(next(video_frames))
|
||||
|
||||
print("Starting frame %d" % frames)
|
||||
# TODO: compute similarity
|
||||
|
||||
ticks += 1
|
||||
self.cycles += self.audio.cycles_per_tick
|
||||
if self.video.tick(self.cycles):
|
||||
print("Starting frame %d" % self.video.frame_number)
|
||||
video_frame = next(video_frames)
|
||||
video_seq = self.video.encode_frame(video_frame)
|
||||
|
||||
# au has range -15 .. 16 (step=1)
|
||||
# Tick cycles are units of 2
|
||||
|
290
video.py
290
video.py
@ -1,13 +1,19 @@
|
||||
import functools
|
||||
import heapq
|
||||
import random
|
||||
from typing import Iterator, Tuple, Iterable
|
||||
import os
|
||||
import threading
|
||||
import queue
|
||||
import subprocess
|
||||
|
||||
from typing import List, Iterator, Tuple, Iterable
|
||||
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from similarity.damerau import Damerau
|
||||
import skvideo.io
|
||||
import weighted_levenshtein
|
||||
|
||||
import opcodes
|
||||
import scheduler
|
||||
import screen
|
||||
|
||||
|
||||
@ -19,14 +25,40 @@ def hamming_weight(n):
|
||||
return n
|
||||
|
||||
|
||||
@functools.lru_cache(None)
|
||||
def edit_weight(a: int, b: int, is_odd_offset: bool):
|
||||
d = Damerau()
|
||||
# K G V W
|
||||
# O B
|
||||
|
||||
error_substitute_costs = np.ones((128, 128), dtype=np.float64)
|
||||
|
||||
# Penalty for turning on/off a black bit
|
||||
for c in "01GVWOB":
|
||||
error_substitute_costs[(ord('K'), ord(c))] = 5
|
||||
error_substitute_costs[(ord(c), ord('K'))] = 5
|
||||
|
||||
# Penalty for changing colour
|
||||
for c in "01GVWOB":
|
||||
for d in "01GVWOB":
|
||||
error_substitute_costs[(ord(c), ord(d))] = 1
|
||||
error_substitute_costs[(ord(d), ord(c))] = 1
|
||||
|
||||
insert_costs = np.ones(128, dtype=np.float64) * 1000
|
||||
delete_costs = np.ones(128, dtype=np.float64) * 1000
|
||||
|
||||
|
||||
@functools.lru_cache(None)
|
||||
def edit_weight(a: int, b: int, is_odd_offset: bool, error=False):
|
||||
a_pixels = byte_to_colour_string(a, is_odd_offset)
|
||||
b_pixels = byte_to_colour_string(b, is_odd_offset)
|
||||
|
||||
return d.distance(a_pixels, b_pixels)
|
||||
substitute_costs = error_substitute_costs # if error else None
|
||||
|
||||
dist = weighted_levenshtein.dam_lev(
|
||||
a_pixels, b_pixels,
|
||||
insert_costs=insert_costs,
|
||||
delete_costs=delete_costs,
|
||||
substitute_costs=substitute_costs,
|
||||
)
|
||||
return np.int64(dist)
|
||||
|
||||
|
||||
@functools.lru_cache(None)
|
||||
@ -76,42 +108,101 @@ class Video:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
frame_rate: int = 30,
|
||||
screen_page: int = 1,
|
||||
opcode_scheduler: scheduler.OpcodeScheduler = None):
|
||||
self.screen_page = screen_page
|
||||
self.frame_rate = frame_rate
|
||||
filename: str):
|
||||
self.filename = filename # type: str
|
||||
|
||||
self._reader = skvideo.io.FFmpegReader(filename)
|
||||
|
||||
# Compute frame rate from input video
|
||||
data = skvideo.io.ffprobe(self.filename)['video']
|
||||
rate_data = data['@r_frame_rate'].split("/") # e.g. 12000/1001
|
||||
self._input_frame_rate = float(rate_data[0]) / float(rate_data[1])
|
||||
|
||||
self.cycles_per_frame = 1024. * 1024 / self._input_frame_rate
|
||||
self.frame_number = 0
|
||||
|
||||
# Initialize empty
|
||||
self.memory_map = screen.MemoryMap(
|
||||
self.screen_page) # type: screen.MemoryMap
|
||||
|
||||
self.scheduler = (
|
||||
opcode_scheduler or scheduler.HeuristicPageFirstScheduler())
|
||||
screen_page=1) # type: screen.MemoryMap
|
||||
|
||||
# Accumulates pending edit weights across frames
|
||||
self.update_priority = np.zeros((32, 256), dtype=np.int)
|
||||
self.update_priority = np.zeros((32, 256), dtype=np.int64)
|
||||
|
||||
def tick(self, cycles) -> bool:
|
||||
# print(cycles, self.cycles_per_frame, self.cycles_per_frame *
|
||||
# self.frame_number)
|
||||
if cycles > (self.cycles_per_frame * self.frame_number):
|
||||
self.frame_number += 1
|
||||
return True
|
||||
return False
|
||||
|
||||
def encode_frame(self, target: screen.MemoryMap) -> Iterator[
|
||||
opcodes.Opcode]:
|
||||
"""Update to match content of frame within provided budget.
|
||||
def _frame_grabber(self):
|
||||
for frame_array in self._reader.nextFrame():
|
||||
yield Image.fromarray(frame_array)
|
||||
|
||||
def frames(self) -> Iterator[screen.MemoryMap]:
|
||||
"""Encode frame to HGR using bmp2dhr.
|
||||
|
||||
We do the encoding in a background thread to parallelize.
|
||||
"""
|
||||
|
||||
frame_dir = self.filename.split(".")[0]
|
||||
try:
|
||||
os.mkdir(frame_dir)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
q = queue.Queue(maxsize=10)
|
||||
|
||||
def worker():
|
||||
for _idx, _frame in enumerate(self._frame_grabber()):
|
||||
outfile = "%s/%08dC.BIN" % (frame_dir, _idx)
|
||||
bmpfile = "%s/%08d.bmp" % (frame_dir, _idx)
|
||||
|
||||
try:
|
||||
os.stat(outfile)
|
||||
except FileNotFoundError:
|
||||
_frame = _frame.resize((280, 192))
|
||||
_frame.save(bmpfile)
|
||||
|
||||
subprocess.call(
|
||||
["/usr/local/bin/bmp2dhr", bmpfile, "hgr", "D9"])
|
||||
|
||||
os.remove(bmpfile)
|
||||
|
||||
_frame = np.fromfile(outfile, dtype=np.uint8)
|
||||
q.put(_frame)
|
||||
|
||||
q.put(None)
|
||||
|
||||
t = threading.Thread(target=worker)
|
||||
t.start()
|
||||
|
||||
while True:
|
||||
frame = q.get()
|
||||
if frame is None:
|
||||
break
|
||||
|
||||
yield screen.FlatMemoryMap(
|
||||
screen_page=1, data=frame).to_memory_map()
|
||||
q.task_done()
|
||||
|
||||
t.join()
|
||||
|
||||
def encode_frame(
|
||||
self, target: screen.MemoryMap
|
||||
) -> Iterator[opcodes.Opcode]:
|
||||
"""Update to match content of frame within provided budget."""
|
||||
|
||||
print("Similarity %f" % (self.update_priority.mean()))
|
||||
yield from self._index_changes(self.memory_map, target)
|
||||
|
||||
def _index_changes(
|
||||
def _diff_weights(
|
||||
self,
|
||||
source: screen.MemoryMap,
|
||||
target: screen.MemoryMap
|
||||
) -> Iterator[Tuple[int, int, int, int, int]]:
|
||||
"""Transform encoded screen to sequence of change tuples.
|
||||
|
||||
Change tuple is (update_priority, page, offset, content, run_length)
|
||||
"""
|
||||
|
||||
diff_weights = np.zeros((32, 256), dtype=np.uint8)
|
||||
):
|
||||
diff_weights = np.zeros((32, 256), dtype=np.int64)
|
||||
|
||||
it = np.nditer(
|
||||
source.page_offset ^ target.page_offset, flags=['multi_index'])
|
||||
@ -127,16 +218,9 @@ class Video:
|
||||
it.multi_index[1] % 2 == 1
|
||||
)
|
||||
it.iternext()
|
||||
return diff_weights
|
||||
|
||||
# Clear any update priority entries that have resolved themselves
|
||||
# with new frame
|
||||
self.update_priority[diff_weights == 0] = 0
|
||||
|
||||
self.update_priority += diff_weights
|
||||
|
||||
# Iterate in descending order of update priority and emit tuples
|
||||
# encoding (page, content, [offsets])
|
||||
|
||||
def _heapify_priorities(self) -> List:
|
||||
priorities = []
|
||||
it = np.nditer(self.update_priority, flags=['multi_index'])
|
||||
while not it.finished:
|
||||
@ -147,69 +231,107 @@ class Video:
|
||||
|
||||
page, offset = it.multi_index
|
||||
# Don't use deterministic order for page, offset
|
||||
nonce = random.randint(0,255)
|
||||
nonce = random.random()
|
||||
heapq.heappush(priorities, (-priority, nonce, page, offset))
|
||||
it.iternext()
|
||||
|
||||
while True:
|
||||
priority, _, page, offset = heapq.heappop(priorities)
|
||||
priority = -priority
|
||||
if page > (56-32):
|
||||
return priorities
|
||||
|
||||
@functools.lru_cache(None)
|
||||
def _compute_delta(self, content, target, old, is_odd):
|
||||
return edit_weight(content, target, is_odd, error=True) - old
|
||||
|
||||
def _compute_error(self, page, content, target, old_error):
|
||||
offsets = []
|
||||
|
||||
old_error_page = old_error[page]
|
||||
tpo = target.page_offset[page]
|
||||
|
||||
page_priorities = [(-p, random.random(), o) for o, p in enumerate(
|
||||
self.update_priority[page]) if p]
|
||||
heapq.heapify(page_priorities)
|
||||
|
||||
# Iterate in descending priority order and take first 3 offsets with
|
||||
# negative delta
|
||||
while page_priorities:
|
||||
_, _, o = heapq.heappop(page_priorities)
|
||||
|
||||
# If we store content at this offset, what is the difference
|
||||
# between this edit distance and the ideal target edit distance?
|
||||
delta = self._compute_delta(
|
||||
content, tpo[o], o % 2 == 1, old_error_page[o])
|
||||
|
||||
# Getting further away from goal, no thanks!
|
||||
if delta >= 0:
|
||||
continue
|
||||
#
|
||||
# # print("Offset %d prio %d: %d -> %d = %d" % (
|
||||
# # o, p, content,
|
||||
# # target.page_offset[page, o],
|
||||
# # delta
|
||||
# # ))
|
||||
offsets.append(o)
|
||||
if len(offsets) == 3:
|
||||
break
|
||||
|
||||
return offsets
|
||||
|
||||
def _index_changes(
|
||||
self,
|
||||
source: screen.MemoryMap,
|
||||
target: screen.MemoryMap
|
||||
) -> Iterator[Tuple[int, int, int, int, int]]:
|
||||
"""Transform encoded screen to sequence of change tuples.
|
||||
|
||||
Change tuple is (update_priority, page, offset, content, run_length)
|
||||
"""
|
||||
|
||||
diff_weights = self._diff_weights(source, target)
|
||||
|
||||
# Clear any update priority entries that have resolved themselves
|
||||
# with new frame
|
||||
self.update_priority[diff_weights == 0] = 0
|
||||
|
||||
self.update_priority += diff_weights
|
||||
|
||||
priorities = self._heapify_priorities()
|
||||
while priorities:
|
||||
_, _, page, offset = heapq.heappop(priorities)
|
||||
# Check whether we've already cleared this diff while processing
|
||||
# an earlier opcode
|
||||
if self.update_priority[page, offset] == 0:
|
||||
continue
|
||||
|
||||
offsets = [offset]
|
||||
content = target.page_offset[page, offset]
|
||||
#print("Priority %d: page %d offset %d content %d" % (
|
||||
# print("Priority %d: page %d offset %d content %d" % (
|
||||
# priority, page, offset, content))
|
||||
|
||||
# Clear priority for the offset we're emitting
|
||||
self.update_priority[page, offset] = 0
|
||||
self.memory_map.page_offset[page, offset] = content
|
||||
|
||||
# Need to find 3 more offsets to fill this opcode
|
||||
|
||||
# Minimize the update_priority delta that would result from
|
||||
# emitting this offset
|
||||
|
||||
# Find offsets that would have largest reduction in diff weight
|
||||
# with this content byte, then order by update priority
|
||||
deltas = {}
|
||||
for o, p in enumerate(self.update_priority[page]):
|
||||
if p == 0:
|
||||
continue
|
||||
|
||||
# If we store content at this offset, what is the new
|
||||
# edit_weight from this content byte to the target
|
||||
delta = edit_weight(
|
||||
for o in self._compute_error(
|
||||
page,
|
||||
content,
|
||||
target.page_offset[page, o],
|
||||
o % 2 == 1
|
||||
)
|
||||
#print("Offset %d prio %d: %d -> %d = %d" % (
|
||||
# o, p, content,
|
||||
# target.page_offset[page, o],
|
||||
# delta
|
||||
#))
|
||||
deltas.setdefault(delta, list()).append((p, o))
|
||||
target,
|
||||
diff_weights
|
||||
):
|
||||
offsets.append(o)
|
||||
# Clear priority for the offset we're emitting
|
||||
self.update_priority[page, o] = 0
|
||||
self.memory_map.page_offset[page, o] = content
|
||||
|
||||
for d in sorted(deltas.keys()):
|
||||
#print(d)
|
||||
po = sorted(deltas[d], reverse=True)
|
||||
#print(po)
|
||||
for p, o in po:
|
||||
offsets.append(o)
|
||||
# Clear priority for the offset we're emitting
|
||||
self.update_priority[page, offset] = 0
|
||||
if len(offsets) == 4:
|
||||
break
|
||||
if len(offsets) == 4:
|
||||
break
|
||||
|
||||
# Pad to 4 if we didn't find anything
|
||||
# Pad to 4 if we didn't find enough
|
||||
for _ in range(len(offsets), 4):
|
||||
offsets.append(offsets[0])
|
||||
|
||||
#print("Page %d, content %d: offsets %s" % (page+32, content,
|
||||
# print("Page %d, content %d: offsets %s" % (page+32, content,
|
||||
# offsets))
|
||||
yield (page+32, content, offsets)
|
||||
|
||||
|
||||
yield (page + 32, content, offsets)
|
||||
|
||||
# If we run out of things to do, pad forever
|
||||
content = target.page_offset[(0, 0)]
|
||||
while True:
|
||||
yield (32, content, [0, 0, 0, 0])
|
||||
|
Loading…
x
Reference in New Issue
Block a user