mirror of
https://github.com/KrisKennaway/ii-vision.git
synced 2024-12-21 20:29:21 +00:00
- have Bitmap.apply() update the memory representation instead of
requiring callers to keep track of it - stop trying to cache content_deltas, I think it results in losing deltas. Instead just recompute the deltas for each page as we need it. This is fast enough in practice. - track the average fill rate for the additional offsets we emit. This should be close to 3 if we're succeeding in finding enough collateral work - overhaul how we pass in the target memory maps. The previous way didn't make sense: we weren't actually encoding for the target video frame, but were using an inconsistent mix of old and new frames. I think this was causing image artifacting because we were aiming for the wrong thing. - Add some debugging assertions that were used to track this down.
This commit is contained in:
parent
6b612ffb0a
commit
990e1c9d74
@ -34,6 +34,9 @@ class Video:
|
|||||||
self.frame_number = 0 # type: int
|
self.frame_number = 0 # type: int
|
||||||
self.palette = palette # type: Palette
|
self.palette = palette # type: Palette
|
||||||
|
|
||||||
|
self._opcodes = 0
|
||||||
|
self._offsets = 0
|
||||||
|
|
||||||
# Initialize empty screen
|
# Initialize empty screen
|
||||||
self.memory_map = screen.MemoryMap(
|
self.memory_map = screen.MemoryMap(
|
||||||
screen_page=1) # type: screen.MemoryMap
|
screen_page=1) # type: screen.MemoryMap
|
||||||
@ -88,6 +91,8 @@ class Video:
|
|||||||
memory_map.page_offset[screen.SCREEN_HOLES]) == 0
|
memory_map.page_offset[screen.SCREEN_HOLES]) == 0
|
||||||
|
|
||||||
print("Similarity %f" % (update_priority.mean()))
|
print("Similarity %f" % (update_priority.mean()))
|
||||||
|
if self._opcodes:
|
||||||
|
print("Opcode fill rate %f" % (self._offsets / self._opcodes))
|
||||||
|
|
||||||
yield from self._index_changes(
|
yield from self._index_changes(
|
||||||
memory_map, target, update_priority, is_aux)
|
memory_map, target, update_priority, is_aux)
|
||||||
@ -181,6 +186,9 @@ class Video:
|
|||||||
if len(offsets) == 3:
|
if len(offsets) == 3:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
# Record how many additional offsets we were able to fill
|
||||||
|
self._opcodes += 1
|
||||||
|
self._offsets += len(offsets)
|
||||||
# Pad to 4 if we didn't find enough
|
# Pad to 4 if we didn't find enough
|
||||||
for _ in range(len(offsets), 4):
|
for _ in range(len(offsets), 4):
|
||||||
offsets.append(offsets[0])
|
offsets.append(offsets[0])
|
||||||
@ -206,44 +214,44 @@ class Video:
|
|||||||
# deterministic point in time when we can assert that all diffs should
|
# deterministic point in time when we can assert that all diffs should
|
||||||
# have been resolved.
|
# have been resolved.
|
||||||
# TODO: add flag to enable debug assertions
|
# TODO: add flag to enable debug assertions
|
||||||
# if not np.array_equal(source.page_offset, target.page_offset):
|
if not np.array_equal(source.page_offset, target.page_offset):
|
||||||
# diffs = np.nonzero(source.page_offset != target.page_offset)
|
diffs = np.nonzero(source.page_offset != target.page_offset)
|
||||||
# for i in range(len(diffs[0])):
|
for i in range(len(diffs[0])):
|
||||||
# diff_p = diffs[0][i]
|
diff_p = diffs[0][i]
|
||||||
# diff_o = diffs[1][i]
|
diff_o = diffs[1][i]
|
||||||
#
|
|
||||||
# # For HGR, 0x00 or 0x7f may be visually equivalent to the same
|
# For HGR, 0x00 or 0x7f may be visually equivalent to the same
|
||||||
# # bytes with high bit set (depending on neighbours), so skip
|
# bytes with high bit set (depending on neighbours), so skip
|
||||||
# # them
|
# them
|
||||||
# if (source.page_offset[diff_p, diff_o] & 0x7f) == 0 and \
|
if (source.page_offset[diff_p, diff_o] & 0x7f) == 0 and \
|
||||||
# (target.page_offset[diff_p, diff_o] & 0x7f) == 0:
|
(target.page_offset[diff_p, diff_o] & 0x7f) == 0:
|
||||||
# continue
|
continue
|
||||||
#
|
|
||||||
# if (source.page_offset[diff_p, diff_o] & 0x7f) == 0x7f and \
|
if (source.page_offset[diff_p, diff_o] & 0x7f) == 0x7f and \
|
||||||
# (target.page_offset[diff_p, diff_o] & 0x7f) == 0x7f:
|
(target.page_offset[diff_p, diff_o] & 0x7f) == 0x7f:
|
||||||
# continue
|
continue
|
||||||
#
|
|
||||||
# print("Diff at (%d, %d): %d != %d" % (
|
print("Diff at (%d, %d): %d != %d" % (
|
||||||
# diff_p, diff_o, source.page_offset[diff_p, diff_o],
|
diff_p, diff_o, source.page_offset[diff_p, diff_o],
|
||||||
# target.page_offset[diff_p, diff_o]
|
target.page_offset[diff_p, diff_o]
|
||||||
# ))
|
))
|
||||||
# assert False
|
assert False
|
||||||
#
|
|
||||||
# # If we've finished both main and aux pages, there should be no residual
|
# If we've finished both main and aux pages, there should be no residual
|
||||||
# # diffs in packed representation
|
# diffs in packed representation
|
||||||
# all_done = self.out_of_work[True] and self.out_of_work[False]
|
all_done = self.out_of_work[True] and self.out_of_work[False]
|
||||||
# if all_done and not np.array_equal(self.pixelmap.packed,
|
if all_done and not np.array_equal(self.pixelmap.packed,
|
||||||
# target_pixelmap.packed):
|
target_pixelmap.packed):
|
||||||
# diffs = np.nonzero(
|
diffs = np.nonzero(
|
||||||
# self.pixelmap.packed != target_pixelmap.packed)
|
self.pixelmap.packed != target_pixelmap.packed)
|
||||||
# print("is_aux: %s" % is_aux)
|
print("is_aux: %s" % is_aux)
|
||||||
# for i in range(len(diffs[0])):
|
for i in range(len(diffs[0])):
|
||||||
# diff_p = diffs[0][i]
|
diff_p = diffs[0][i]
|
||||||
# diff_o = diffs[1][i]
|
diff_o = diffs[1][i]
|
||||||
# print("(%d, %d): got %d want %d" % (
|
print("(%d, %d): got %d want %d" % (
|
||||||
# diff_p, diff_o, self.pixelmap.packed[diff_p, diff_o],
|
diff_p, diff_o, self.pixelmap.packed[diff_p, diff_o],
|
||||||
# target_pixelmap.packed[diff_p, diff_o]))
|
target_pixelmap.packed[diff_p, diff_o]))
|
||||||
# assert False
|
assert False
|
||||||
|
|
||||||
# If we run out of things to do, pad forever
|
# If we run out of things to do, pad forever
|
||||||
content = target.page_offset[0, 0]
|
content = target.page_offset[0, 0]
|
||||||
|
Loading…
Reference in New Issue
Block a user