mirror of
https://github.com/KrisKennaway/ii-pix.git
synced 2024-12-26 18:29:29 +00:00
- work with image as numpy.ndarray instead of Image
- use float32 representation instead of uint8 - Vectorize applying dither - Improve quality of 560px images by looking ahead N pixels, evaluating all 2^N colour choices and minimizing the total error.
This commit is contained in:
parent
82e5779a3a
commit
7f47aa33e7
201
dither.py
201
dither.py
@ -10,12 +10,11 @@ import numpy as np
|
||||
|
||||
|
||||
# TODO:
|
||||
# - switch to colours library
|
||||
# - only lookahead for 560px
|
||||
# - vectorize colour differences
|
||||
# - palette class
|
||||
# - compare to bmp2dhr and a2bestpix
|
||||
# - deal with fringing
|
||||
# - look ahead N pixels and compute all 2^N bit patterns, then minimize
|
||||
# average error
|
||||
# - optimize Dither.apply() critical path
|
||||
|
||||
|
||||
def srgb_to_linear_array(a: np.ndarray, gamma=2.4) -> np.ndarray:
|
||||
return np.where(a <= 0.04045, a / 12.92, ((a + 0.055) / 1.055) ** gamma)
|
||||
@ -26,17 +25,15 @@ def linear_to_srgb_array(a: np.ndarray, gamma=2.4) -> np.ndarray:
|
||||
0.055)
|
||||
|
||||
|
||||
def srgb_to_linear(im: Image) -> Image:
|
||||
a = np.array(im, dtype=np.float32) / 255.0
|
||||
rgb_linear = srgb_to_linear_array(a, gamma=2.4)
|
||||
return Image.fromarray(
|
||||
(np.clip(rgb_linear, 0.0, 1.0) * 255).astype(np.uint8))
|
||||
# XXX work uniformly with 255 or 1.0 range
|
||||
def srgb_to_linear(im: np.ndarray) -> np.ndarray:
|
||||
rgb_linear = srgb_to_linear_array(im / 255.0, gamma=2.4)
|
||||
return (np.clip(rgb_linear, 0.0, 1.0) * 255).astype(np.float32)
|
||||
|
||||
|
||||
def linear_to_srgb(im: Image) -> Image:
|
||||
a = np.array(im, dtype=np.float32) / 255.0
|
||||
srgb = linear_to_srgb_array(a, gamma=2.4)
|
||||
return Image.fromarray((np.clip(srgb, 0.0, 1.0) * 255).astype(np.uint8))
|
||||
def linear_to_srgb(im: np.ndarray) -> np.ndarray:
|
||||
srgb = linear_to_srgb_array(im / 255.0, gamma=2.4)
|
||||
return (np.clip(srgb, 0.0, 1.0) * 255).astype(np.float32)
|
||||
|
||||
|
||||
# Default bmp2dhr palette
|
||||
@ -112,7 +109,7 @@ for k, v in RGB.items():
|
||||
|
||||
class ColourDistance:
|
||||
@staticmethod
|
||||
def distance(rgb1: Tuple[int], rgb2: Tuple[int]) -> float:
|
||||
def distance(self, rgb1: np.ndarray, rgb2: np.ndarray) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@ -120,7 +117,7 @@ class RGBDistance(ColourDistance):
|
||||
"""Euclidean squared distance in RGB colour space."""
|
||||
|
||||
@staticmethod
|
||||
def distance(rgb1: Tuple[int], rgb2: Tuple[int]) -> float:
|
||||
def distance(self, rgb1: np.ndarray, rgb2: np.ndarray) -> float:
|
||||
return float(np.asscalar(np.sum(np.power(np.array(rgb1) - np.array(
|
||||
rgb2), 2))))
|
||||
|
||||
@ -129,31 +126,33 @@ class CIE2000Distance(ColourDistance):
|
||||
"""CIE2000 delta-E distance."""
|
||||
|
||||
@staticmethod
|
||||
@functools.lru_cache(None)
|
||||
def _to_lab(rgb):
|
||||
srgb = np.clip(linear_to_srgb_array(np.array(rgb) / 255), 0.0,
|
||||
1.0) * 255
|
||||
srgb = colormath.color_objects.sRGBColor(*tuple(srgb), is_upscaled=True)
|
||||
def _to_lab(rgb: Tuple[float]):
|
||||
srgb = np.clip(
|
||||
linear_to_srgb_array(np.array(rgb, dtype=np.float32) / 255), 0.0,
|
||||
1.0)
|
||||
srgb_color = colormath.color_objects.sRGBColor(*tuple(srgb),
|
||||
is_upscaled=False)
|
||||
lab = colormath.color_conversions.convert_color(
|
||||
srgb, colormath.color_objects.LabColor)
|
||||
srgb_color, colormath.color_objects.LabColor)
|
||||
return lab
|
||||
|
||||
def distance(self, rgb1: Tuple[int], rgb2: Tuple[int]) -> float:
|
||||
lab1 = self._to_lab(rgb1)
|
||||
lab2 = self._to_lab(rgb2)
|
||||
def distance(self, rgb1: np.ndarray, rgb2: np.ndarray) -> float:
|
||||
lab1 = self._to_lab(tuple(rgb1))
|
||||
lab2 = self._to_lab(tuple(rgb2))
|
||||
return colormath.color_diff.delta_e_cie2000(lab1, lab2)
|
||||
|
||||
|
||||
class CCIR601Distance(ColourDistance):
|
||||
@staticmethod
|
||||
def _to_luma(rgb):
|
||||
def _to_luma(rgb: np.ndarray):
|
||||
return rgb[0] * 0.299 + rgb[1] * 0.587 + rgb[2] * 0.114
|
||||
|
||||
def distance(self, rgb1: Tuple[int], rgb2: Tuple[int]) -> float:
|
||||
def distance(self, rgb1: np.ndarray, rgb2: np.ndarray) -> float:
|
||||
delta_rgb = ((rgb1[0] - rgb2[0]) / 255, (rgb1[1] - rgb2[1]) / 255,
|
||||
(rgb1[2] - rgb2[2]) / 255)
|
||||
luma_diff = (self._to_luma(rgb1) - self._to_luma(rgb2)) / 255
|
||||
|
||||
# TODO: this is the formula bmp2dhr uses but what motivates it?
|
||||
return (
|
||||
delta_rgb[0] * delta_rgb[0] * 0.299 +
|
||||
delta_rgb[1] * delta_rgb[1] * 0.587 +
|
||||
@ -180,10 +179,10 @@ class Screen:
|
||||
|
||||
return 1024 * c + 128 * b + 40 * a
|
||||
|
||||
def _image_to_bitmap(self, image: Image) -> np.ndarray:
|
||||
def _image_to_bitmap(self, image: np.ndarray) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
def pack(self, image: Image):
|
||||
def pack(self, image: np.ndarray):
|
||||
bitmap = self._image_to_bitmap(image)
|
||||
# The DHGR display encodes 7 pixels across interleaved 4-byte sequences
|
||||
# of AUX and MAIN memory, as follows:
|
||||
@ -232,12 +231,12 @@ class DHGR140Screen(Screen):
|
||||
Y_RES = 192
|
||||
X_PIXEL_WIDTH = 4
|
||||
|
||||
def _image_to_bitmap(self, image: Image) -> np.ndarray:
|
||||
def _image_to_bitmap(self, image: np.ndarray) -> np.ndarray:
|
||||
bitmap = np.zeros(
|
||||
(self.Y_RES, self.X_RES * self.X_PIXEL_WIDTH), dtype=np.bool)
|
||||
for y in range(self.Y_RES):
|
||||
for x in range(self.X_RES):
|
||||
pixel = image.getpixel((x, y))
|
||||
pixel = image[y, x]
|
||||
dots = DOTS[pixel]
|
||||
bitmap[y, x * self.X_PIXEL_WIDTH:(
|
||||
(x + 1) * self.X_PIXEL_WIDTH)] = dots
|
||||
@ -254,12 +253,12 @@ class DHGR560Screen(Screen):
|
||||
Y_RES = 192
|
||||
X_PIXEL_WIDTH = 1
|
||||
|
||||
def _image_to_bitmap(self, image: Image) -> np.ndarray:
|
||||
def _image_to_bitmap(self, image: np.ndarray) -> np.ndarray:
|
||||
bitmap = np.zeros((self.Y_RES, self.X_RES), dtype=np.bool)
|
||||
for y in range(self.Y_RES):
|
||||
for x in range(self.X_RES):
|
||||
pixel = image.getpixel((x, y))
|
||||
dots = DOTS[pixel]
|
||||
pixel = image[y, x]
|
||||
dots = DOTS[tuple(pixel)]
|
||||
phase = x % 4
|
||||
bitmap[y, x] = dots[phase]
|
||||
return bitmap
|
||||
@ -276,23 +275,31 @@ class Dither:
|
||||
PATTERN = None
|
||||
ORIGIN = None
|
||||
|
||||
def apply(self, screen: Screen, image: Image, x: int, y: int,
|
||||
quant_error: float):
|
||||
for offset, error_fraction in np.ndenumerate(self.PATTERN / np.sum(
|
||||
self.PATTERN)):
|
||||
xx = x + offset[1] - self.ORIGIN[1]
|
||||
yy = y + offset[0] - self.ORIGIN[0]
|
||||
if xx < 0 or yy < 0 or xx > (screen.X_RES - 1) or (
|
||||
yy > (screen.Y_RES - 1)):
|
||||
continue
|
||||
new_pixel = image.getpixel((xx, yy)) + error_fraction * quant_error
|
||||
image.putpixel((xx, yy), tuple(new_pixel.astype(int)))
|
||||
def apply(self, screen: Screen, image: np.ndarray, x: int, y: int,
|
||||
quant_error: np.ndarray):
|
||||
pshape = self.PATTERN.shape
|
||||
error = self.PATTERN.reshape(
|
||||
(pshape[0], pshape[1], 1)) * quant_error.reshape((1, 1,
|
||||
3))
|
||||
# print(quant_error)
|
||||
et = max(self.ORIGIN[0] - y, 0)
|
||||
eb = min(pshape[0], screen.Y_RES - 1 - y)
|
||||
el = max(self.ORIGIN[1] - x, 0)
|
||||
er = min(pshape[1], screen.X_RES - 1 - x)
|
||||
# print(x, et, eb, el, er)
|
||||
|
||||
yt = y - self.ORIGIN[0] + et
|
||||
yb = y - self.ORIGIN[0] + eb
|
||||
xl = x - self.ORIGIN[1] + el
|
||||
xr = x - self.ORIGIN[1] + er
|
||||
image[yt:yb, xl:xr, :] = np.clip(
|
||||
image[yt:yb, xl:xr, :] + error[et:eb, el:er, :], 0, 255)
|
||||
|
||||
|
||||
class FloydSteinbergDither(Dither):
|
||||
# 0 * 7
|
||||
# 3 5 1
|
||||
PATTERN = np.array(((0, 0, 7), (3, 5, 1)))
|
||||
PATTERN = np.array(((0, 0, 7), (3, 5, 1))) / 16
|
||||
ORIGIN = (0, 1)
|
||||
|
||||
|
||||
@ -300,7 +307,7 @@ class BuckelsDither(Dither):
|
||||
# 0 * 2 1
|
||||
# 1 2 1 0
|
||||
# 0 1 0 0
|
||||
PATTERN = np.array(((0, 0, 2, 1), (1, 2, 1, 0), (0, 1, 0, 0)))
|
||||
PATTERN = np.array(((0, 0, 2, 1), (1, 2, 1, 0), (0, 1, 0, 0))) / 8
|
||||
ORIGIN = (0, 1)
|
||||
|
||||
|
||||
@ -308,12 +315,12 @@ class JarvisDither(Dither):
|
||||
# 0 0 X 7 5
|
||||
# 3 5 7 5 3
|
||||
# 1 3 5 3 1
|
||||
PATTERN = np.array(((0, 0, 0, 7, 5), (3, 5, 7, 5, 3), (1, 3, 5, 3, 1)))
|
||||
PATTERN = np.array(((0, 0, 0, 7, 5), (3, 5, 7, 5, 3), (1, 3, 5, 3, 1))) / 48
|
||||
ORIGIN = (0, 2)
|
||||
|
||||
|
||||
# XXX needed?
|
||||
def SRGBResize(im, size, filter):
|
||||
def SRGBResize(im, size, filter) -> np.ndarray:
|
||||
# Convert to numpy array of float
|
||||
arr = np.array(im, dtype=np.float32) / 255.0
|
||||
# Convert sRGB -> linear
|
||||
@ -327,13 +334,11 @@ def SRGBResize(im, size, filter):
|
||||
# Convert linear -> sRGB
|
||||
arrOut = np.where(arrOut <= 0.0031308, 12.92 * arrOut,
|
||||
1.055 * arrOut ** (1.0 / 2.4) - 0.055)
|
||||
# Convert to 8-bit
|
||||
arrOut = np.uint8(np.rint(arrOut * 255.0))
|
||||
# Convert back to PIL
|
||||
return Image.fromarray(arrOut)
|
||||
arrOut = np.rint(np.clip(arrOut, 0.0, 1.0) * 255.0)
|
||||
return arrOut
|
||||
|
||||
|
||||
def open_image(screen: Screen, filename: str) -> Image:
|
||||
def open_image(screen: Screen, filename: str) -> np.ndarray:
|
||||
im = Image.open(filename)
|
||||
# TODO: convert to sRGB colour profile explicitly, in case it has some other
|
||||
# profile already.
|
||||
@ -344,19 +349,64 @@ def open_image(screen: Screen, filename: str) -> Image:
|
||||
Image.LANCZOS))
|
||||
|
||||
|
||||
def dither_image(
|
||||
screen: Screen, image: Image, dither: Dither, differ: ColourDistance
|
||||
# XXX
|
||||
def dither_one_pixel(screen: Screen, differ: ColourDistance,
|
||||
input_pixel, last_pixel, x) -> Tuple[int]:
|
||||
palette_choices = screen.pixel_palette_options(last_pixel, x)
|
||||
return screen.find_closest_color(input_pixel, palette_choices,
|
||||
differ)
|
||||
|
||||
|
||||
def dither_lookahead(
|
||||
screen: Screen, image: np.ndarray, dither: Dither, differ:
|
||||
ColourDistance,
|
||||
x, y, last_pixel, lookahead
|
||||
) -> Image:
|
||||
best_error = 1e9
|
||||
best_pixel = None
|
||||
for i in range(2 ** lookahead):
|
||||
temp_image = np.empty_like(image)
|
||||
# XXX
|
||||
temp_image[y:y + 3, :, :] = image[y:y + 3, :, :]
|
||||
output_pixel = last_pixel
|
||||
total_error = 0.0
|
||||
choices = []
|
||||
inputs = []
|
||||
for j in range(min(lookahead, screen.X_RES - x)):
|
||||
xx = x + j
|
||||
input_pixel = temp_image[y, xx, :]
|
||||
palette_choices = screen.pixel_palette_options(output_pixel, xx)
|
||||
output_pixel = np.array(palette_choices[(i & (1 << j)) >> j])
|
||||
inputs.append(input_pixel)
|
||||
choices.append(output_pixel)
|
||||
# output_pixel = dither_one_pixel(screen, differ,
|
||||
# input_pixel, output_pixel, xx)
|
||||
quant_error = input_pixel - output_pixel
|
||||
total_error += differ.distance(input_pixel, output_pixel)
|
||||
dither.apply(screen, temp_image, xx, y, quant_error)
|
||||
# print(bin(i), total_error, inputs, choices)
|
||||
if total_error < best_error:
|
||||
best_error = total_error
|
||||
best_pixel = choices[0]
|
||||
# print(best_error, best_pixel)
|
||||
return best_pixel
|
||||
|
||||
|
||||
def dither_image(
|
||||
screen: Screen, image: np.ndarray, dither: Dither, differ:
|
||||
ColourDistance, lookahead) -> np.ndarray:
|
||||
for y in range(screen.Y_RES):
|
||||
print(y)
|
||||
new_pixel = (0, 0, 0)
|
||||
output_pixel = (0, 0, 0)
|
||||
for x in range(screen.X_RES):
|
||||
old_pixel = image.getpixel((x, y))
|
||||
palette_choices = screen.pixel_palette_options(new_pixel, x)
|
||||
new_pixel = screen.find_closest_color(
|
||||
old_pixel, palette_choices, differ)
|
||||
image.putpixel((x, y), tuple(new_pixel))
|
||||
quant_error = old_pixel - new_pixel
|
||||
# print(x)
|
||||
input_pixel = image[y, x, :]
|
||||
output_pixel = dither_lookahead(screen, image, dither, differ, x,
|
||||
y, output_pixel, lookahead)
|
||||
# output_pixel = dither_one_pixel(screen, differ, input_pixel,
|
||||
# output_pixel, x)
|
||||
quant_error = input_pixel - output_pixel
|
||||
image[y, x, :] = output_pixel
|
||||
dither.apply(screen, image, x, y, quant_error)
|
||||
return image
|
||||
|
||||
@ -365,26 +415,39 @@ def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("input", type=str, help="Input file to process")
|
||||
parser.add_argument("output", type=str, help="Output file for ")
|
||||
import traceback
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
def warn_with_traceback(message, category, filename, lineno, file=None,
|
||||
line=None):
|
||||
log = file if hasattr(file, 'write') else sys.stderr
|
||||
traceback.print_stack(file=log)
|
||||
log.write(
|
||||
warnings.formatwarning(message, category, filename, lineno, line))
|
||||
|
||||
warnings.showwarning = warn_with_traceback
|
||||
# screen = DHGR140Screen()
|
||||
screen = DHGR560Screen()
|
||||
|
||||
args = parser.parse_args()
|
||||
image = open_image(screen, args.input)
|
||||
image.show()
|
||||
# image.show()
|
||||
|
||||
# dither = FloydSteinbergDither()
|
||||
# dither = BuckelsDither()
|
||||
dither = JarvisDither()
|
||||
|
||||
# differ = CIE2000Distance()
|
||||
differ = CCIR601Distance()
|
||||
differ = CIE2000Distance()
|
||||
# differ = CCIR601Distance()
|
||||
|
||||
output = dither_image(screen, image, dither, differ)
|
||||
linear_to_srgb(output).show()
|
||||
# bitmap = Image.fromarray(screen.bitmap.astype('uint8') * 255)
|
||||
output = dither_image(screen, image, dither, differ, lookahead=1)
|
||||
screen.pack(output)
|
||||
|
||||
out_image = Image.fromarray(linear_to_srgb(output).astype(np.uint8))
|
||||
out_image.show()
|
||||
# bitmap = Image.fromarray(screen.bitmap.astype('uint8') * 255)
|
||||
|
||||
with open(args.output, "wb") as f:
|
||||
f.write(bytes(screen.main))
|
||||
f.write(bytes(screen.aux))
|
||||
|
Loading…
Reference in New Issue
Block a user