mirror of
https://github.com/dgelessus/python-rsrcfork.git
synced 2025-07-01 17:23:51 +00:00
Compare commits
77 Commits
Author | SHA1 | Date | |
---|---|---|---|
d342614f55 | |||
a5fb30e194 | |||
f3b3de496e | |||
a71274d554 | |||
6d69d0097d | |||
8db1b22bdc | |||
6559cbc337 | |||
1e79dc3c50 | |||
db48212ade | |||
3a72bd3406 | |||
cb868b8005 | |||
2f2472cfe9 | |||
e0f73d3220 | |||
b77c85c295 | |||
e5875ffe67 | |||
449bf4dd71 | |||
0ac6e8a3c4 | |||
29ddd21740 | |||
add22b704a | |||
fdd04c944b | |||
97c459bca7 | |||
9ef084de58 | |||
6d03954784 | |||
343259049c | |||
e75e88018e | |||
0f72e8eb1f | |||
84f09d0b83 | |||
c108af60ca | |||
0c942e26ec | |||
868a322b8e | |||
a23cd0fcb2 | |||
53e73be980 | |||
9dbdf5b827 | |||
87d4ae43d4 | |||
716ac30a53 | |||
20991154d3 | |||
7207b1d32b | |||
1de940d597 | |||
d7255bc977 | |||
c6337bdfbd | |||
f4c2717720 | |||
8ad0234633 | |||
7612322c43 | |||
51ae7c6a09 | |||
194c886472 | |||
b2fa5f8b0f | |||
752ec9e828 | |||
fb5708e6b4 | |||
5bcc3f02d7 | |||
d082f29238 | |||
fb827e4073 | |||
c373b9fe28 | |||
e6779b021a | |||
c4fe09dbf0 | |||
acdbbc89b2 | |||
d7fb67fac1 | |||
5ede8a351a | |||
7253c53d67 | |||
efd3848146 | |||
f798928270 | |||
ad7f9f5d6d | |||
a8c09f19d1 | |||
af4c465613 | |||
4a759027f4 | |||
3e28fa7fe0 | |||
8904f6e093 | |||
4c32987cc3 | |||
acd056973e | |||
3d444bda10 | |||
5bc2c0cc81 | |||
360833f940 | |||
67a16d34a6 | |||
2fb1d02064 | |||
9adb188624 | |||
e98166d0a6 | |||
ea2fcac692 | |||
0d2a3f886b |
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@ -0,0 +1,10 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
indent_style = tab
|
||||
insert_final_newline = true
|
||||
|
||||
[*.rst]
|
||||
indent_style = space
|
||||
indent_size = 4
|
43
.gitignore
vendored
43
.gitignore
vendored
@ -1,40 +1,11 @@
|
||||
# IntelliJ IDEA, PyCharm, etc.
|
||||
.idea
|
||||
*.iml
|
||||
out
|
||||
gen
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
# Python bytecode
|
||||
*.py[co]
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
# setuptools
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
build/
|
||||
dist/
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# virtualenv
|
||||
.venv/
|
||||
venv/
|
||||
ENV/
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
245
README.rst
245
README.rst
@ -1,7 +1,27 @@
|
||||
``rsrcfork``
|
||||
============
|
||||
|
||||
A pure Python library for reading Macintosh Toolbox or Carbon resource manager data, as found in resource forks or ``.rsrc`` files even on current Mac OS X/macOS systems.
|
||||
A pure Python, cross-platform library/tool for reading Macintosh resource data, as stored in resource forks and ``.rsrc`` files.
|
||||
|
||||
Resource forks were an important part of the Classic Mac OS, where they provided a standard way to store structured file data, metadata and application resources. This usage continued into Mac OS X (now called macOS) for backward compatibility, but over time resource forks became less commonly used in favor of simple data fork-only formats, application bundles, and extended attributes.
|
||||
|
||||
As of OS X 10.8 and the deprecation of the Carbon API, macOS no longer provides any officially supported APIs for using and manipulating resource data. Despite this, parts of macOS still support and use resource forks, for example to store custom file and folder icons set by the user.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Pure Python, cross-platform - no native Mac APIs are used.
|
||||
* Provides both a Python API and a command-line tool.
|
||||
* Resource data can be read from either the resource fork or the data fork.
|
||||
|
||||
* On Mac systems, the correct fork is selected automatically when reading a file. This allows reading both regular resource forks and resource data stored in data forks (as with ``.rsrc`` and similar files).
|
||||
* On non-Mac systems, resource forks are not available, so the data fork is always used.
|
||||
|
||||
* Compressed resources (supported by System 7 through Mac OS 9) are automatically decompressed.
|
||||
|
||||
* Only the standard System 7.0 resource compression methods are supported. Resources that use non-standard decompressors cannot be decompressed.
|
||||
|
||||
* Object ``repr``\s are REPL-friendly: all relevant information is displayed, and long data is truncated to avoid filling up the screen by accident.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
@ -11,27 +31,17 @@ Python 3.6 or later. No other libraries are required.
|
||||
Installation
|
||||
------------
|
||||
|
||||
``rsrcfork`` is available `on PyPI`__ and can be installed using ``pip``:
|
||||
``rsrcfork`` is available `on PyPI <https://pypi.org/project/rsrcfork/>`_ and can be installed using ``pip``:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python3 -m pip install rsrcfork
|
||||
python3 -m pip install rsrcfork
|
||||
|
||||
Alternatively you can run the ``setup.py`` script manually:
|
||||
Alternatively you can download the source code manually, and run this command in the source code directory to install it:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python3 setup.py install
|
||||
|
||||
__ https://pypi.python.org/pypi/rsrcfork
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Reading resources from data or resource forks (the latter only work on macOS of course)
|
||||
* Reading data lazily with seeking, or sequentially without seeking
|
||||
* Accessing resource data and attributes by their type code and ID, using a mapping-like interface
|
||||
* REPL-friendly ``repr``\s that truncate long resource data so it doesn't fill the entire screen
|
||||
python3 -m pip install .
|
||||
|
||||
Examples
|
||||
--------
|
||||
@ -41,53 +51,50 @@ Simple example
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> import rsrcfork
|
||||
>>> rf = rsrcfork.open("/Users/Shared/Test.textClipping")
|
||||
>>> rf
|
||||
<rsrcfork.ResourceFile at 0x1046e6048, attributes ResourceFileAttrs.0, containing 4 resource types: [b'utxt', b'utf8', b'TEXT', b'drag']>
|
||||
>>> rf[b"TEXT"]
|
||||
<rsrcfork.ResourceFile._LazyResourceMap at 0x10470ed30 containing one resource: rsrcfork.Resource(resource_type=b'TEXT', resource_id=256, name=None, attributes=ResourceAttrs.0, data=b'Here is some text')>
|
||||
>>> import rsrcfork
|
||||
>>> rf = rsrcfork.open("/Users/Shared/Test.textClipping")
|
||||
>>> rf
|
||||
<rsrcfork.ResourceFile at 0x1046e6048, attributes ResourceFileAttrs.0, containing 4 resource types: [b'utxt', b'utf8', b'TEXT', b'drag']>
|
||||
>>> rf[b"TEXT"]
|
||||
<rsrcfork.ResourceFile._LazyResourceMap at 0x10470ed30 containing one resource: rsrcfork.Resource(type=b'TEXT', id=256, name=None, attributes=ResourceAttrs.0, data=b'Here is some text')>
|
||||
|
||||
Automatic selection of data/resource fork
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> import rsrcfork
|
||||
>>> datarf = rsrcfork.open("/System/Library/Fonts/Monaco.dfont") # Resources in data fork
|
||||
>>> datarf._stream
|
||||
<_io.BufferedReader name='/System/Library/Fonts/Monaco.dfont'>
|
||||
>>> resourcerf = rsrcfork.open("/Users/Shared/Test.textClipping") # Resources in resource fork
|
||||
>>> resourcerf._stream
|
||||
<_io.BufferedReader name='/Users/Shared/Test.textClipping/..namedfork/rsrc'>
|
||||
>>> import rsrcfork
|
||||
>>> datarf = rsrcfork.open("/System/Library/Fonts/Monaco.dfont") # Resources in data fork
|
||||
>>> datarf._stream
|
||||
<_io.BufferedReader name='/System/Library/Fonts/Monaco.dfont'>
|
||||
>>> resourcerf = rsrcfork.open("/Users/Shared/Test.textClipping") # Resources in resource fork
|
||||
>>> resourcerf._stream
|
||||
<_io.BufferedReader name='/Users/Shared/Test.textClipping/..namedfork/rsrc'>
|
||||
|
||||
Command-line interface
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: sh
|
||||
$ python3 -m rsrcfork /Users/Shared/Test.textClipping
|
||||
No header system data
|
||||
No header application data
|
||||
No file attributes
|
||||
4 resource types:
|
||||
'utxt': 1 resources:
|
||||
(256), unnamed, no attributes, 34 bytes
|
||||
|
||||
'utf8': 1 resources:
|
||||
(256), unnamed, no attributes, 17 bytes
|
||||
|
||||
'TEXT': 1 resources:
|
||||
(256), unnamed, no attributes, 17 bytes
|
||||
|
||||
'drag': 1 resources:
|
||||
(128), unnamed, no attributes, 64 bytes
|
||||
|
||||
$ python3 -m rsrcfork /Users/Shared/Test.textClipping "'TEXT' (256)"
|
||||
Resource 'TEXT' (256), unnamed, no attributes, 17 bytes:
|
||||
00000000 48 65 72 65 20 69 73 20 73 6f 6d 65 20 74 65 78 |Here is some tex|
|
||||
00000010 74 |t|
|
||||
00000011
|
||||
|
||||
|
||||
$ python3 -m rsrcfork /Users/Shared/Test.textClipping
|
||||
4 resource types:
|
||||
'utxt': 1 resources:
|
||||
(256): 34 bytes
|
||||
|
||||
'utf8': 1 resources:
|
||||
(256): 17 bytes
|
||||
|
||||
'TEXT': 1 resources:
|
||||
(256): 17 bytes
|
||||
|
||||
'drag': 1 resources:
|
||||
(128): 64 bytes
|
||||
|
||||
$ python3 -m rsrcfork /Users/Shared/Test.textClipping "'TEXT' (256)"
|
||||
Resource 'TEXT' (256): 17 bytes:
|
||||
00000000 48 65 72 65 20 69 73 20 73 6f 6d 65 20 74 65 78 |Here is some tex|
|
||||
00000010 74 |t|
|
||||
00000011
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
@ -103,12 +110,62 @@ Further info on resource files
|
||||
|
||||
Sources of information about the resource fork data format, and the structure of common resource types:
|
||||
|
||||
* Inside Macintosh, Volume I, Chapter 5 "The Resource Manager". This book can probably be obtained in physical form somewhere, but the relevant chapter/book is also available in a few places online:
|
||||
- `Apple's legacy documentation <https://developer.apple.com/legacy/library/documentation/mac/pdf/MoreMacintoshToolbox.pdf>`_
|
||||
- pagetable.com, a site that happened to have a copy of the book: `info blog post <http://www.pagetable.com/?p=50>`_, `direct download <http://www.weihenstephan.org/~michaste/pagetable/mac/Inside_Macintosh.pdf>`_
|
||||
* The Inside Macintosh book series, specifically the chapter "Resource Manager". These books are Apple's official reference material for the classic Macintosh platform. Over time, they have gone through many revisions and updates, and their structure has been changed multiple times. This is a (likely incomplete) list of the major revisions of Inside Macintosh and where they can be obtained online.
|
||||
|
||||
* The earliest revisions consisted of two volumes, each a three-ring binder containing photocopied pages. The chapters were referred to as individual "manuals" and were essentially standalone - each one had its own title page, TOC, glossary, and page numbers. Various parts were still missing or not yet finalized, and updated pages were distributed regularly as part of the `Macintosh Software Supplement <https://macgui.com/news/article.php?t=447>`_.
|
||||
|
||||
* bitsavers.org has scanned and OCRed PDFs of a late (November 1984) revision: `Volume I <http://bitsavers.org/pdf/apple/mac/Inside_Macintosh_Vol_1_1984.pdf>`_, `Volume II <http://bitsavers.org/pdf/apple/mac/Inside_Macintosh_Vol_2_1984.pdf>`_.
|
||||
|
||||
* The Promotional Edition, released in early 1985, consisted of a single book (it was nicknamed the "phonebook" edition because of its paper quality). Although it was physically a single book, the contents were still structured into standalone "manuals" like in the ring binder version, and some parts were still missing or not finalized.
|
||||
|
||||
* bitsavers.org has `a scanned and OCRed PDF <http://bitsavers.org/pdf/apple/mac/Inside_Macintosh_Promotional_Edition_1985.pdf>`_.
|
||||
|
||||
* The published 1985 revision consisted of three volumes, available as three paperback books or a single hardcover book. They contained the finalized contents of the previous revisions, which documented the Macintosh 128k, Macintosh 512k, and Macintosh XL. Unlike the previous revisions, each volume had continuous page numbers and a full TOC and index, and volume III contained a complete glossary.
|
||||
|
||||
* pagetable.com has a `blog post <http://www.pagetable.com/?p=50>`_ with `a scanned and OCRed PDF of the three paperback volumes <http://www.weihenstephan.org/~michaste/pagetable/mac/Inside_Macintosh.pdf>`_.
|
||||
|
||||
* Additional volumes were published later to document newer Macintosh models. These served as incremental additions and did not fully supersede or replace any of the previous volumes.
|
||||
|
||||
* Volume IV was published in 1986 and documented the Macintosh Plus and Macintosh 512k Enhanced.
|
||||
* Volume V was published in 1986 and documented the Macintosh II and Macintosh SE.
|
||||
* Volume VI was published in 1991 and documented System 7.0.
|
||||
* VintageApple.org has `scanned and OCRed PDFs of Volumes I through VI <https://vintageapple.org/inside_o/>`_.
|
||||
|
||||
* After 1991, Inside Macintosh was restructured into over 20 volumes organized by topic, rather than chronologically by Macintosh model. These were published as books starting in 1992, and later also on CDs and online.
|
||||
|
||||
* VintageApple.org has `rendered (not scanned) PDFs of 26 volumes and 7 X-Ref volumes <https://vintageapple.org/inside_r/>`_.
|
||||
|
||||
* The Communications Toolbox and QuickDraw GX Programmers' Overview volumes appear to be missing.
|
||||
|
||||
* Many volumes are still available in Apple's legacy developer documentation archive, in HTML and rendered (not scanned) PDF formats:
|
||||
|
||||
* Two volumes appear on the website under Inside Macintosh, even though other sources don't consider them part of the Inside Macintosh series:
|
||||
|
||||
* `Advanced Color Imaging on the Mac OS (HTML) <https://developer.apple.com/library/archive/documentation/mac/ACI/ACI-2.html>`_ (November 1996)
|
||||
* `Advanced Color Imaging Reference (HTML) <https://developer.apple.com/library/archive/documentation/mac/ACIReference/ACIReference-2.html>`_ (November 1996)
|
||||
|
||||
* `Devices (HTML) <https://developer.apple.com/library/archive/documentation/mac/Devices/Devices-2.html>`_ (July 1996), `Devices (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Devices/pdf.html>`_ (1994)
|
||||
* `Files (HTML) <https://developer.apple.com/library/archive/documentation/mac/Files/Files-2.html>`_ (July 1996), `Files (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Files/pdf.html>`_ (1992)
|
||||
* `Imaging with QuickDraw (HTML) <https://developer.apple.com/library/archive/documentation/mac/QuickDraw/QuickDraw-2.html>`_ (July 1996), `Imaging with QuickDraw (single PDF) <https://developer.apple.com/library/archive/documentation/mac/pdf/ImagingWithQuickDraw.pdf>`_ (1994)
|
||||
* `Interapplication Communication (HTML) <https://developer.apple.com/library/archive/documentation/mac/IAC/IAC-2.html>`_ (July 1996), `Interapplication Communication (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Interapplication_Communication/pdf.html>`_ (1993)
|
||||
* `Macintosh Toolbox Essentials (HTML) <https://developer.apple.com/library/archive/documentation/mac/Toolbox/Toolbox-2.html>`_ (July 1996), `Macintosh Toolbox Essentials (single PDF) <https://developer.apple.com/library/archive/documentation/mac/pdf/MacintoshToolboxEssentials.pdf>`_ (1992)
|
||||
* `Memory (HTML) <https://developer.apple.com/library/archive/documentation/mac/Memory/Memory-2.html>`_ (July 1996), `Memory (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Memory/pdf.html>`_ (1992)
|
||||
* `More Macintosh Toolbox (HTML) <https://developer.apple.com/library/archive/documentation/mac/MoreToolbox/MoreToolbox-2.html>`_ (July 1996), `More Macintosh Toolbox (single PDF) <https://developer.apple.com/library/archive/documentation/mac/pdf/MoreMacintoshToolbox.pdf>`_ (1993)
|
||||
* `Networking (HTML) <https://developer.apple.com/library/archive/documentation/mac/Networking/Networking-2.html>`_ (July 1996), `Networking (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Networking/pdf.html>`_ (1994)
|
||||
* `Operating System Utilities (HTML) <https://developer.apple.com/library/archive/documentation/mac/OSUtilities/OSUtilities-2.html>`_ (July 1996), `Operating System Utilities (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Operating_System_Utilities/pdf.html>`_ (1994)
|
||||
* `PowerPC Numerics (HTML) <https://developer.apple.com/library/archive/documentation/mac/PPCNumerics/PPCNumerics-2.html>`_ (July 1996), `PowerPC Numerics (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/PPC_Numerics.sit.hqx>`_ (1994)
|
||||
* `PowerPC System Software (HTML) <https://developer.apple.com/library/archive/documentation/mac/PPCSoftware/PPCSoftware-2.html>`_ (July 1996), `PowerPC System Software (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/PPC_System_Software.sit.hqx>`_ (1994)
|
||||
* `Processes (HTML) <https://developer.apple.com/library/archive/documentation/mac/Processes/Processes-2.html>`_ (June 1996), `Processes (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Processes/pdf.html>`_ (1992)
|
||||
* `Sound (HTML) <https://developer.apple.com/library/archive/documentation/mac/Sound/Sound-2.html>`_ (July 1996), `Sound (chapter PDFs) <https://developer.apple.com/library/archive/documentation/mac/pdf/Sound/pdf.html>`_ (1994)
|
||||
* `Text (HTML) <https://developer.apple.com/library/archive/documentation/mac/Text/Text-2.html>`_ (July 1996), `Text (single PDF) <https://developer.apple.com/library/archive/documentation/mac/pdf/Text.pdf>`_ (1993)
|
||||
* The two AOCE volumes, Communications Toolbox, Human Interface Guidelines, Overview, seven QuickDraw GX volumes, two QuickTime volumes, and X-Ref are missing.
|
||||
|
||||
* The Gryphel project (best known for the Mini vMac emulator) has `a list of physical book releases <https://www.gryphel.com/c/books/appledev.html>`_ of Inside Macintosh (and other Apple developer documentation), including ISBNs, publishers, dates, and Amazon links.
|
||||
|
||||
* `Wikipedia <https://en.wikipedia.org/wiki/Resource_fork>`_, of course
|
||||
* The `Resource Fork <http://fileformats.archiveteam.org/wiki/Resource_Fork>`_ article on "Just Solve the File Format Problem" (despite the title, this is a decent site and not clickbait)
|
||||
* The `KSFL <https://github.com/kreativekorp/ksfl>`_ library (and `its wiki <https://github.com/kreativekorp/ksfl/wiki/Macintosh-Resource-File-Format>`_), written in Java, which supports reading and writing resource files
|
||||
* Alysis Software Corporation's article on resource compression (found on `the company's website <http://www.alysis.us/arctechnology.htm>`_ and in `MacTech Magazine's online archive <http://preserve.mactech.com/articles/mactech/Vol.09/09.01/ResCompression/index.html>`_) has some information on the structure of certain kinds of compressed resources.
|
||||
* Apple's macOS SDK, which is distributed with Xcode. The latest version of Xcode is available for free from the Mac App Store. Current and previous versions can be downloaded from `the Apple Developer download page <https://developer.apple.com/download/more/>`_. Accessing these downloads requires an Apple ID with (at least) a free developer program membership.
|
||||
* Apple's MPW (Macintosh Programmer's Workshop) and related developer tools. These were previously available from Apple's FTP server at ftp://ftp.apple.com/, which is no longer functional. Because of this, these downloads are only available on mirror sites, such as http://staticky.com/mirrors/ftp.apple.com/.
|
||||
|
||||
@ -117,9 +174,89 @@ If these links are no longer functional, some are archived in the `Internet Arch
|
||||
Changelog
|
||||
---------
|
||||
|
||||
Version 1.5.0
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
* Added stream-based decompression methods to the ``rsrcfork.compress`` module.
|
||||
|
||||
* The internal decompressor implementations have been refactored to use streams.
|
||||
* This allows for incremental decompression of compressed resource data. In practice this has no noticeable effect yet, because the main ``rsrcfork`` API doesn't support incremental reading of resource data.
|
||||
|
||||
* Fixed the command line tool always displaying an incorrect error "Cannot specify an explicit fork when reading from stdin" when using ``-`` (stdin) as the input file.
|
||||
|
||||
Version 1.4.0
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
* Added ``length`` and ``length_raw`` attributes to ``Resource``. These attributes are equivalent to the ``len`` of ``data`` and ``data_raw`` respectively, but may be faster to access.
|
||||
|
||||
* Currently, the only optimized case is ``length`` for compressed resources, but more optimizations may be added in the future.
|
||||
|
||||
* Added a ``compressed_info`` attribute to ``Resource`` that provides access to the header information of compressed resources.
|
||||
* Improved handling of compressed resources when listing resource files with the command line tool.
|
||||
|
||||
* Metadata of compressed resources is now displayed even if no decompressor implementation is available (as long as the compressed data header can be parsed).
|
||||
* Performance has been improved - the data no longer needs to be fully decompressed to get its length, this information is now read from the header.
|
||||
* The ``'dcmp'`` ID used to decompress each resource is displayed.
|
||||
|
||||
* Fixed an incorrect ``options.packages`` in ``setup.cfg``, which made the library unusable except when installing from source using ``--editable``.
|
||||
* Fixed ``ResourceFile.__enter__`` returning ``None``, which made it impossible to use ``ResourceFile`` properly in a ``with`` statement.
|
||||
* Fixed various minor errors reported by type checking with ``mypy``.
|
||||
|
||||
Version 1.3.0.post1
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* Fixed an incorrect ``options.packages`` in ``setup.cfg``, which made the library unusable except when installing from source using ``--editable``.
|
||||
|
||||
Version 1.2.0.post1
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* Fixed an incorrect ``options.packages`` in ``setup.cfg``, which made the library unusable except when installing from source using ``--editable``.
|
||||
|
||||
Version 1.3.0
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
* Added a ``--group`` command line option to group resources in list format by type (the default), ID, or with no grouping.
|
||||
* Added a ``dump-text`` output format to the command line tool. This format is identical to ``dump``, but instead of a hex dump, it outputs the resource data as text. The data is decoded as MacRoman and classic Mac newlines (``\r``) are translated. This is useful for examining resources that contain mostly plain text.
|
||||
* Changed the command line tool to sort resources by type and ID, and added a ``--no-sort`` option to disable sorting and output resources in file order (which was the previous behavior).
|
||||
* Renamed the ``rsrcfork.Resource`` attributes ``resource_type`` and ``resource_id`` to ``type`` and ``id``, respectively. The old names have been deprecated and will be removed in the future, but are still supported for now.
|
||||
* Changed ``--format=dump`` output to match ``hexdump -C``'s format - spacing has been adjusted, and multiple subsequent identical lines are collapsed into a single ``*``.
|
||||
|
||||
Version 1.2.0
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
* Added support for compressed resources.
|
||||
|
||||
* Compressed resource data is automatically decompressed, both in the Python API and on the command line.
|
||||
* This is technically a breaking change, since in previous versions the compressed resource data was returned directly. However, this change will not affect end users negatively, unless one has already implemented custom handling for compressed resources.
|
||||
* Currently, only the three standard System 7.0 compression formats (``'dcmp'`` IDs 0, 1, 2) are supported. Attempting to access a resource compressed in an unsupported format results in a ``DecompressError``.
|
||||
* To access the raw resource data as stored in the file, without automatic decompression, use the ``res.data_raw`` attribute (for the Python API), or the ``--no-decompress`` option (for the command-line interface). This can be used to read the resource data in its compressed form, even if the compression format is not supported.
|
||||
|
||||
* Improved automatic data/resource fork selection for files whose resource fork contains invalid data.
|
||||
|
||||
* This fixes reading certain system files with resource data in their data fork (such as HIToolbox.rsrc in HIToolbox.framework, or .dfont fonts) on recent macOS versions (at least macOS 10.14, possibly earlier). Although these files have no resource fork, recent macOS versions will successfully open the resource fork and return garbage data for it. This behavior is now detected and handled by using the data fork instead.
|
||||
|
||||
* Replaced the ``rsrcfork`` parameter of ``rsrcfork.open``/``ResourceFork.open`` with a new ``fork`` parameter. ``fork`` accepts string values (like the command line ``--fork`` option) rather than ``rsrcfork``'s hard to understand ``None``/``True``/``False``.
|
||||
|
||||
* The old ``rsrcfork`` parameter has been deprecated and will be removed in the future, but for now it still works as before.
|
||||
|
||||
* Added an explanatory message when a resource filter on the command line doesn't match any resources in the resource file. Previously there would either be no output or a confusing error, depending on the selected ``--format``.
|
||||
* Changed resource type codes and names to be displayed in MacRoman instead of escaping all non-ASCII characters.
|
||||
* Cleaned up the resource descriptions in listings and dumps to improve readability. Previously they included some redundant or unnecessary information - for example, each resource with no attributes set would be explicitly marked as "no attributes".
|
||||
* Unified the formats of resource descriptions in listings and dumps, which were previously slightly different from each other.
|
||||
* Improved error messages when attempting to read multiple resources using ``--format=hex`` or ``--format=raw``.
|
||||
* Fixed reading from non-seekable streams not working for some resource files.
|
||||
* Removed the ``allow_seek`` parameter of ``ResourceFork.__init__`` and the ``--read-mode`` command line option. They are no longer necessary, and were already practically useless before due to non-seekable stream reading being broken.
|
||||
|
||||
Version 1.1.3.post1
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* Fixed a formatting error in the README.rst to allow upload to PyPI.
|
||||
|
||||
Version 1.1.3
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
**Note: This version is not available on PyPI, see version 1.1.3.post1 changelog for details.**
|
||||
|
||||
* Added a setuptools entry point for the command-line interface. This allows calling it using just ``rsrcfork`` instead of ``python3 -m rsrcfork``.
|
||||
* Changed the default value of ``ResourceFork.__init__``'s ``close`` keyword argument from ``True`` to ``False``. This matches the behavior of classes like ``zipfile.ZipFile`` and ``tarfile.TarFile``.
|
||||
* Fixed ``ResourceFork.open`` and ``ResourceFork.__init__`` not closing their streams in some cases.
|
||||
|
@ -1,21 +1,37 @@
|
||||
"""A library for reading old Macintosh resource manager data, as found in resource forks or .rsrc files even on current Mac OS X/macOS systems.
|
||||
"""A pure Python, cross-platform library/tool for reading Macintosh resource data, as stored in resource forks and ``.rsrc`` files."""
|
||||
|
||||
This library only understands the resource file's general structure, i. e. the type codes, IDs, attributes, and data of the resources stored in the file. The data of individual resources is provided in raw bytes form and is not processed further - the format of this data is specific to each resource type.
|
||||
# To release a new version:
|
||||
# * Remove the .dev suffix from the version number in this file.
|
||||
# * Update the changelog in the README.rst (rename the "next version" section to the correct version number).
|
||||
# * Remove the ``dist`` directory (if it exists) to clean up any old release files.
|
||||
# * Run ``python3 setup.py sdist bdist_wheel`` to build the release files.
|
||||
# * Run ``python3 -m twine check dist/*`` to check the release files.
|
||||
# * Fix any errors reported by the build and/or check steps.
|
||||
# * Commit the changes to master.
|
||||
# * Tag the release commit with the version number, prefixed with a "v" (e. g. version 1.2.3 is tagged as v1.2.3).
|
||||
# * Fast-forward the release branch to the new release commit.
|
||||
# * Push the master and release branches.
|
||||
# * Upload the release files to PyPI using ``python3 -m twine upload dist/*``.
|
||||
# * On the GitHub repo's Releases page, edit the new release tag and add the relevant changelog section from the README.rst. (Note: The README is in reStructuredText format, but GitHub's release notes use Markdown, so it may be necessary to adjust the markup syntax.)
|
||||
|
||||
Writing resource data is not supported at all.
|
||||
"""
|
||||
# After releasing:
|
||||
# * (optional) Remove the build and dist directories from the previous release as they are no longer needed.
|
||||
# * Bump the version number in this file to the next version and add a .dev suffix.
|
||||
# * Add a new empty section for the next version to the README.rst changelog.
|
||||
# * Commit and push the changes to master.
|
||||
|
||||
__version__ = "1.1.3"
|
||||
__version__ = "1.5.0"
|
||||
|
||||
__all__ = [
|
||||
"Resource",
|
||||
"ResourceAttrs",
|
||||
"ResourceFile",
|
||||
"ResourceFileAttrs",
|
||||
"compress",
|
||||
"open",
|
||||
]
|
||||
|
||||
from . import api
|
||||
from . import api, compress
|
||||
from .api import Resource, ResourceAttrs, ResourceFile, ResourceFileAttrs
|
||||
|
||||
# noinspection PyShadowingBuiltins
|
||||
|
@ -1,11 +1,15 @@
|
||||
import argparse
|
||||
import collections
|
||||
import enum
|
||||
import itertools
|
||||
import sys
|
||||
import textwrap
|
||||
import typing
|
||||
|
||||
from . import __version__, api
|
||||
from . import __version__, api, compress
|
||||
|
||||
# The encoding to use when rendering bytes as text (in four-char codes, strings, hex dumps, etc.) or reading a quoted byte string (from the command line).
|
||||
_TEXT_ENCODING = "MacRoman"
|
||||
|
||||
# Translation table to replace ASCII non-printable characters with periods.
|
||||
_TRANSLATE_NONPRINTABLES = {k: "." for k in [*range(0x20), 0x7f]}
|
||||
@ -21,27 +25,34 @@ _REZ_ATTR_NAMES = {
|
||||
api.ResourceAttrs.resCompressed: None, # "Extended Header resource attribute"
|
||||
}
|
||||
|
||||
F = typing.TypeVar("F", bound=enum.Flag, covariant=True)
|
||||
def _decompose_flags(value: F) -> typing.Sequence[F]:
|
||||
F = typing.TypeVar("F", bound=enum.Flag)
|
||||
def decompose_flags(value: F) -> typing.Sequence[F]:
|
||||
"""Decompose an enum.Flags instance into separate enum constants."""
|
||||
|
||||
return [bit for bit in type(value) if bit in value]
|
||||
|
||||
def _bytes_unescape(string: str) -> bytes:
|
||||
"""Convert a string containing ASCII characters and hex escapes to a bytestring.
|
||||
def is_printable(char: str) -> bool:
|
||||
"""Determine whether a character is printable for our purposes.
|
||||
|
||||
We mainly use Python's definition of printable (i. e. everything that Unicode does not consider a separator or "other" character). However, we also treat U+F8FF as printable, which is the private use codepoint used for the Apple logo character.
|
||||
"""
|
||||
|
||||
return char.isprintable() or char == "\uf8ff"
|
||||
|
||||
def bytes_unescape(string: str) -> bytes:
|
||||
"""Convert a string containing text (in _TEXT_ENCODING) and hex escapes to a bytestring.
|
||||
|
||||
(We implement our own unescaping mechanism here to not depend on any of Python's string/bytes escape syntax.)
|
||||
"""
|
||||
|
||||
out = []
|
||||
out: typing.List[int] = []
|
||||
it = iter(string)
|
||||
n = 0
|
||||
for char in it:
|
||||
if char == "\\":
|
||||
try:
|
||||
esc = next(it)
|
||||
if esc in "\\\'\"":
|
||||
out.append(esc)
|
||||
out.extend(esc.encode(_TEXT_ENCODING))
|
||||
elif esc == "x":
|
||||
x1, x2 = next(it), next(it)
|
||||
out.append(int(x1+x2, 16))
|
||||
@ -50,31 +61,29 @@ def _bytes_unescape(string: str) -> bytes:
|
||||
except StopIteration:
|
||||
raise ValueError("End of string in escape sequence")
|
||||
else:
|
||||
out.append(ord(char))
|
||||
n += 1
|
||||
out.extend(char.encode(_TEXT_ENCODING))
|
||||
|
||||
return bytes(out)
|
||||
|
||||
def _bytes_escape(bs: bytes, *, quote: str=None) -> str:
|
||||
"""Convert a bytestring to a string, with non-ASCII bytes hex-escaped.
|
||||
def bytes_escape(bs: bytes, *, quote: typing.Optional[str]=None) -> str:
|
||||
"""Convert a bytestring to a string (using _TEXT_ENCODING), with non-printable characters hex-escaped.
|
||||
|
||||
(We implement our own escaping mechanism here to not depend on Python's str or bytes repr.)
|
||||
"""
|
||||
|
||||
out = []
|
||||
for byte in bs:
|
||||
c = chr(byte)
|
||||
if c in {quote, "\\"}:
|
||||
out.append(f"\\{c}")
|
||||
elif 0x20 <= byte < 0x7f:
|
||||
out.append(c)
|
||||
for byte, char in zip(bs, bs.decode(_TEXT_ENCODING)):
|
||||
if char in {quote, "\\"}:
|
||||
out.append(f"\\{char}")
|
||||
elif is_printable(char):
|
||||
out.append(char)
|
||||
else:
|
||||
out.append(f"\\x{byte:02x}")
|
||||
|
||||
return "".join(out)
|
||||
|
||||
def _filter_resources(rf: api.ResourceFile, filters: typing.Sequence[str]) -> typing.Sequence[api.Resource]:
|
||||
matching = collections.OrderedDict()
|
||||
def filter_resources(rf: api.ResourceFile, filters: typing.Sequence[str]) -> typing.List[api.Resource]:
|
||||
matching: typing.MutableMapping[typing.Tuple[bytes, int], api.Resource] = collections.OrderedDict()
|
||||
|
||||
for filter in filters:
|
||||
if len(filter) == 4:
|
||||
@ -84,15 +93,15 @@ def _filter_resources(rf: api.ResourceFile, filters: typing.Sequence[str]) -> ty
|
||||
continue
|
||||
|
||||
for res in resources.values():
|
||||
matching[res.resource_type, res.resource_id] = res
|
||||
matching[res.type, res.id] = res
|
||||
elif filter[0] == filter[-1] == "'":
|
||||
try:
|
||||
resources = rf[_bytes_unescape(filter[1:-1])]
|
||||
resources = rf[bytes_unescape(filter[1:-1])]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
for res in resources.values():
|
||||
matching[res.resource_type, res.resource_id] = res
|
||||
matching[res.type, res.id] = res
|
||||
else:
|
||||
pos = filter.find("'", 1)
|
||||
if pos == -1:
|
||||
@ -100,67 +109,118 @@ def _filter_resources(rf: api.ResourceFile, filters: typing.Sequence[str]) -> ty
|
||||
elif filter[pos + 1] != " ":
|
||||
raise ValueError(f"Invalid filter {filter!r}: Resource type and ID must be separated by a space")
|
||||
|
||||
restype, resid = filter[:pos + 1], filter[pos + 2:]
|
||||
restype_str, resid_str = filter[:pos + 1], filter[pos + 2:]
|
||||
|
||||
if not restype[0] == restype[-1] == "'":
|
||||
if not restype_str[0] == restype_str[-1] == "'":
|
||||
raise ValueError(
|
||||
f"Invalid filter {filter!r}: Resource type is not a single-quoted type identifier: {restype!r}")
|
||||
restype = _bytes_unescape(restype[1:-1])
|
||||
f"Invalid filter {filter!r}: Resource type is not a single-quoted type identifier: {restype_str!r}")
|
||||
restype = bytes_unescape(restype_str[1:-1])
|
||||
|
||||
if len(restype) != 4:
|
||||
raise ValueError(
|
||||
f"Invalid filter {filter!r}: Type identifier must be 4 bytes after replacing escapes, got {len(restype)} bytes: {restype!r}")
|
||||
|
||||
if resid[0] != "(" or resid[-1] != ")":
|
||||
if resid_str[0] != "(" or resid_str[-1] != ")":
|
||||
raise ValueError(f"Invalid filter {filter!r}: Resource ID must be parenthesized")
|
||||
resid = resid[1:-1]
|
||||
resid_str = resid_str[1:-1]
|
||||
|
||||
try:
|
||||
resources = rf[restype]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
if resid[0] == resid[-1] == '"':
|
||||
name = _bytes_unescape(resid[1:-1])
|
||||
if resid_str[0] == resid_str[-1] == '"':
|
||||
name = bytes_unescape(resid_str[1:-1])
|
||||
|
||||
for res in resources.values():
|
||||
if res.name == name:
|
||||
matching[res.resource_type, res.resource_id] = res
|
||||
matching[res.type, res.id] = res
|
||||
break
|
||||
elif ":" in resid:
|
||||
if resid.count(":") > 1:
|
||||
raise ValueError(f"Invalid filter {filter!r}: Too many colons in ID range expression: {resid!r}")
|
||||
start, end = resid.split(":")
|
||||
start, end = int(start), int(end)
|
||||
elif ":" in resid_str:
|
||||
if resid_str.count(":") > 1:
|
||||
raise ValueError(f"Invalid filter {filter!r}: Too many colons in ID range expression: {resid_str!r}")
|
||||
start_str, end_str = resid_str.split(":")
|
||||
start, end = int(start_str), int(end_str)
|
||||
|
||||
for res in resources.values():
|
||||
if start <= res.resource_id <= end:
|
||||
matching[res.resource_type, res.resource_id] = res
|
||||
if start <= res.id <= end:
|
||||
matching[res.type, res.id] = res
|
||||
else:
|
||||
resid = int(resid)
|
||||
resid = int(resid_str)
|
||||
try:
|
||||
res = resources[resid]
|
||||
except KeyError:
|
||||
continue
|
||||
matching[res.resource_type, res.resource_id] = res
|
||||
matching[res.type, res.id] = res
|
||||
|
||||
return list(matching.values())
|
||||
|
||||
def _hexdump(data: bytes):
|
||||
def hexdump(data: bytes) -> None:
|
||||
last_line = None
|
||||
asterisk_shown = False
|
||||
for i in range(0, len(data), 16):
|
||||
line = data[i:i + 16]
|
||||
line_hex = " ".join(f"{byte:02x}" for byte in line)
|
||||
line_char = line.decode("MacRoman").translate(_TRANSLATE_NONPRINTABLES)
|
||||
print(f"{i:08x} {line_hex:<{16*2+15}} |{line_char}|")
|
||||
# If the same 16-byte lines appear multiple times, print only the first one, and replace all further lines with a single line with an asterisk.
|
||||
# This is unambiguous - to find out how many lines were collapsed this way, the user can compare the addresses of the lines before and after the asterisk.
|
||||
if line == last_line:
|
||||
if not asterisk_shown:
|
||||
print("*")
|
||||
asterisk_shown = True
|
||||
else:
|
||||
line_hex_left = " ".join(f"{byte:02x}" for byte in line[:8])
|
||||
line_hex_right = " ".join(f"{byte:02x}" for byte in line[8:])
|
||||
line_char = line.decode(_TEXT_ENCODING).translate(_TRANSLATE_NONPRINTABLES)
|
||||
print(f"{i:08x} {line_hex_left:<{8*2+7}} {line_hex_right:<{8*2+7}} |{line_char}|")
|
||||
asterisk_shown = False
|
||||
last_line = line
|
||||
|
||||
if data:
|
||||
print(f"{len(data):08x}")
|
||||
|
||||
def _raw_hexdump(data: bytes):
|
||||
def raw_hexdump(data: bytes) -> None:
|
||||
for i in range(0, len(data), 16):
|
||||
print(" ".join(f"{byte:02x}" for byte in data[i:i + 16]))
|
||||
|
||||
def main():
|
||||
def translate_text(data: bytes) -> str:
|
||||
return data.decode(_TEXT_ENCODING).replace("\r", "\n")
|
||||
|
||||
def describe_resource(res: api.Resource, *, include_type: bool, decompress: bool) -> str:
|
||||
id_desc_parts = [f"{res.id}"]
|
||||
|
||||
if res.name is not None:
|
||||
name = bytes_escape(res.name, quote='"')
|
||||
id_desc_parts.append(f'"{name}"')
|
||||
|
||||
id_desc = ", ".join(id_desc_parts)
|
||||
|
||||
content_desc_parts = []
|
||||
|
||||
if decompress and api.ResourceAttrs.resCompressed in res.attributes:
|
||||
try:
|
||||
res.compressed_info
|
||||
except compress.DecompressError:
|
||||
length_desc = f"unparseable compressed data header ({res.length_raw} bytes compressed)"
|
||||
else:
|
||||
assert res.compressed_info is not None
|
||||
length_desc = f"{res.length} bytes ({res.length_raw} bytes compressed, 'dcmp' ({res.compressed_info.dcmp_id}) format)"
|
||||
else:
|
||||
assert res.compressed_info is None
|
||||
length_desc = f"{res.length_raw} bytes"
|
||||
content_desc_parts.append(length_desc)
|
||||
|
||||
attrs = decompose_flags(res.attributes)
|
||||
if attrs:
|
||||
content_desc_parts.append(" | ".join(attr.name for attr in attrs))
|
||||
|
||||
content_desc = ", ".join(content_desc_parts)
|
||||
|
||||
desc = f"({id_desc}): {content_desc}"
|
||||
if include_type:
|
||||
restype = bytes_escape(res.type, quote="'")
|
||||
desc = f"'{restype}' {desc}"
|
||||
return desc
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
ap = argparse.ArgumentParser(
|
||||
add_help=False,
|
||||
fromfile_prefix_chars="@",
|
||||
@ -186,27 +246,190 @@ def main():
|
||||
ap.add_argument("--version", action="version", version=__version__, help="Display version information and exit")
|
||||
ap.add_argument("-a", "--all", action="store_true", help="When no filters are given, show all resources in full, instead of an overview")
|
||||
ap.add_argument("-f", "--fork", choices=["auto", "data", "rsrc"], default="auto", help="The fork from which to read the resource data, or auto to guess (default: %(default)s)")
|
||||
ap.add_argument("--format", choices=["dump", "hex", "raw", "derez"], default="dump", help="How to output the resources - human-readable info with hex dump (dump), data only as hex (hex), data only as raw bytes (raw), or like DeRez with no resource definitions (derez)")
|
||||
ap.add_argument("--no-decompress", action="store_false", dest="decompress", help="Do not decompress compressed resources, output compressed resource data as-is")
|
||||
ap.add_argument("--format", choices=["dump", "dump-text", "hex", "raw", "derez"], default="dump", help="How to output the resources - human-readable info with hex dump (dump) (default), human-readable info with newline-translated data (dump-text), data only as hex (hex), data only as raw bytes (raw), or like DeRez with no resource definitions (derez)")
|
||||
ap.add_argument("--group", action="store", choices=["none", "type", "id"], default="type", help="Group resources in list view by type or ID, or disable grouping (default: type)")
|
||||
ap.add_argument("--no-sort", action="store_false", dest="sort", help="Output resources in the order in which they are stored in the file, instead of sorting them by type and ID")
|
||||
ap.add_argument("--header-system", action="store_true", help="Output system-reserved header data and nothing else")
|
||||
ap.add_argument("--header-application", action="store_true", help="Output application-specific header data and nothing else")
|
||||
ap.add_argument("--read-mode", choices=["auto", "stream", "seek"], default="auto", help="Whether to read the data sequentially (stream) or on-demand (seek), or auto to use seeking when possible (default: %(default)s)")
|
||||
|
||||
ap.add_argument("file", help="The file to read, or - for stdin")
|
||||
ap.add_argument("filter", nargs="*", help="One or more filters to select which resources to display, or omit to show an overview of all resources")
|
||||
|
||||
ns = ap.parse_args()
|
||||
return ns
|
||||
|
||||
def show_header_data(data: bytes, *, format: str) -> None:
|
||||
if format == "dump":
|
||||
hexdump(data)
|
||||
elif format == "dump-text":
|
||||
print(translate_text(data))
|
||||
elif format == "hex":
|
||||
raw_hexdump(data)
|
||||
elif format == "raw":
|
||||
sys.stdout.buffer.write(data)
|
||||
elif format == "derez":
|
||||
print("Cannot output file header data in derez format", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise ValueError(f"Unhandled output format: {format}")
|
||||
|
||||
def show_filtered_resources(resources: typing.Sequence[api.Resource], format: str, decompress: bool) -> None:
|
||||
if not resources:
|
||||
if format in ("dump", "dump-text"):
|
||||
print("No resources matched the filter")
|
||||
elif format in ("hex", "raw"):
|
||||
print("No resources matched the filter", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
elif format == "derez":
|
||||
print("/* No resources matched the filter */")
|
||||
else:
|
||||
raise AssertionError(f"Unhandled output format: {format}")
|
||||
elif format in ("hex", "raw") and len(resources) != 1:
|
||||
print(f"Format {format} can only output a single resource, but the filter matched {len(resources)} resources", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
ns.fork = {"auto": None, "data": False, "rsrc": True}[ns.fork]
|
||||
ns.read_mode = {"auto": None, "stream": False, "seek": True}[ns.read_mode]
|
||||
for res in resources:
|
||||
if decompress:
|
||||
data = res.data
|
||||
else:
|
||||
data = res.data_raw
|
||||
|
||||
if format in ("dump", "dump-text"):
|
||||
# Human-readable info and hex or text dump
|
||||
desc = describe_resource(res, include_type=True, decompress=decompress)
|
||||
print(f"Resource {desc}:")
|
||||
if format == "dump":
|
||||
hexdump(data)
|
||||
elif format == "dump-text":
|
||||
print(translate_text(data))
|
||||
else:
|
||||
raise AssertionError(f"Unhandled format: {format!r}")
|
||||
print()
|
||||
elif format == "hex":
|
||||
# Data only as hex
|
||||
|
||||
raw_hexdump(data)
|
||||
elif format == "raw":
|
||||
# Data only as raw bytes
|
||||
|
||||
sys.stdout.buffer.write(data)
|
||||
elif format == "derez":
|
||||
# Like DeRez with no resource definitions
|
||||
|
||||
attrs = list(decompose_flags(res.attributes))
|
||||
|
||||
if decompress and api.ResourceAttrs.resCompressed in attrs:
|
||||
attrs.remove(api.ResourceAttrs.resCompressed)
|
||||
attrs_comment = " /* was compressed */"
|
||||
else:
|
||||
attrs_comment = ""
|
||||
|
||||
attr_descs_with_none = [_REZ_ATTR_NAMES[attr] for attr in attrs]
|
||||
if None in attr_descs_with_none:
|
||||
attr_descs = [f"${res.attributes.value:02X}"]
|
||||
else:
|
||||
attr_descs = typing.cast(typing.List[str], attr_descs_with_none)
|
||||
|
||||
parts = [str(res.id)]
|
||||
|
||||
if res.name is not None:
|
||||
name = bytes_escape(res.name, quote='"')
|
||||
parts.append(f'"{name}"')
|
||||
|
||||
parts += attr_descs
|
||||
|
||||
restype = bytes_escape(res.type, quote="'")
|
||||
print(f"data '{restype}' ({', '.join(parts)}{attrs_comment}) {{")
|
||||
|
||||
for i in range(0, len(data), 16):
|
||||
# Two-byte grouping is really annoying to implement.
|
||||
groups = []
|
||||
for j in range(0, 16, 2):
|
||||
if i+j >= len(data):
|
||||
break
|
||||
elif i+j+1 >= len(data):
|
||||
groups.append(f"{data[i+j]:02X}")
|
||||
else:
|
||||
groups.append(f"{data[i+j]:02X}{data[i+j+1]:02X}")
|
||||
|
||||
s = f'$"{" ".join(groups)}"'
|
||||
comment = "/* " + data[i:i + 16].decode(_TEXT_ENCODING).translate(_TRANSLATE_NONPRINTABLES) + " */"
|
||||
print(f"\t{s:<54s}{comment}")
|
||||
|
||||
print("};")
|
||||
print()
|
||||
else:
|
||||
raise ValueError(f"Unhandled output format: {format}")
|
||||
|
||||
def list_resource_file(rf: api.ResourceFile, *, sort: bool, group: str, decompress: bool) -> None:
|
||||
if rf.header_system_data != bytes(len(rf.header_system_data)):
|
||||
print("Header system data:")
|
||||
hexdump(rf.header_system_data)
|
||||
|
||||
if rf.header_application_data != bytes(len(rf.header_application_data)):
|
||||
print("Header application data:")
|
||||
hexdump(rf.header_application_data)
|
||||
|
||||
attrs = decompose_flags(rf.file_attributes)
|
||||
if attrs:
|
||||
print("File attributes: " + " | ".join(attr.name for attr in attrs))
|
||||
|
||||
if len(rf) == 0:
|
||||
print("No resources (empty resource file)")
|
||||
return
|
||||
|
||||
if group == "none":
|
||||
all_resources: typing.List[api.Resource] = []
|
||||
for reses in rf.values():
|
||||
all_resources.extend(reses.values())
|
||||
if sort:
|
||||
all_resources.sort(key=lambda res: (res.type, res.id))
|
||||
print(f"{len(all_resources)} resources:")
|
||||
for res in all_resources:
|
||||
print(describe_resource(res, include_type=True, decompress=decompress))
|
||||
elif group == "type":
|
||||
print(f"{len(rf)} resource types:")
|
||||
restype_items: typing.Collection[typing.Tuple[bytes, typing.Mapping[int, api.Resource]]] = rf.items()
|
||||
if sort:
|
||||
restype_items = sorted(restype_items, key=lambda item: item[0])
|
||||
for typecode, resources_map in restype_items:
|
||||
restype = bytes_escape(typecode, quote="'")
|
||||
print(f"'{restype}': {len(resources_map)} resources:")
|
||||
resources_items: typing.Collection[typing.Tuple[int, api.Resource]] = resources_map.items()
|
||||
if sort:
|
||||
resources_items = sorted(resources_items, key=lambda item: item[0])
|
||||
for resid, res in resources_items:
|
||||
print(describe_resource(res, include_type=False, decompress=decompress))
|
||||
print()
|
||||
elif group == "id":
|
||||
all_resources = []
|
||||
for reses in rf.values():
|
||||
all_resources.extend(reses.values())
|
||||
all_resources.sort(key=lambda res: res.id)
|
||||
resources_by_id = {resid: list(reses) for resid, reses in itertools.groupby(all_resources, key=lambda res: res.id)}
|
||||
print(f"{len(resources_by_id)} resource IDs:")
|
||||
for resid, resources in resources_by_id.items():
|
||||
print(f"({resid}): {len(resources)} resources:")
|
||||
if sort:
|
||||
resources.sort(key=lambda res: res.type)
|
||||
for res in resources:
|
||||
print(describe_resource(res, include_type=True, decompress=decompress))
|
||||
print()
|
||||
else:
|
||||
raise AssertionError(f"Unhandled group mode: {group!r}")
|
||||
|
||||
def main() -> typing.NoReturn:
|
||||
ns = parse_args()
|
||||
|
||||
if ns.file == "-":
|
||||
if ns.fork is not None:
|
||||
if ns.fork != "auto":
|
||||
print("Cannot specify an explicit fork when reading from stdin", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
rf = api.ResourceFile(sys.stdin.buffer, allow_seek=ns.read_mode)
|
||||
rf = api.ResourceFile(sys.stdin.buffer)
|
||||
else:
|
||||
rf = api.ResourceFile.open(ns.file, rsrcfork=ns.fork, allow_seek=ns.read_mode)
|
||||
rf = api.ResourceFile.open(ns.file, fork=ns.fork)
|
||||
|
||||
with rf:
|
||||
if ns.header_system or ns.header_application:
|
||||
@ -215,135 +438,21 @@ def main():
|
||||
else:
|
||||
data = rf.header_application_data
|
||||
|
||||
if ns.format == "dump":
|
||||
_hexdump(data)
|
||||
elif ns.format == "hex":
|
||||
_raw_hexdump(data)
|
||||
elif ns.format == "raw":
|
||||
sys.stdout.buffer.write(data)
|
||||
elif ns.format == "derez":
|
||||
print("Cannot output file header data in derez format", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise ValueError(f"Unhandled output format: {ns.format}")
|
||||
show_header_data(data, format=ns.format)
|
||||
elif ns.filter or ns.all:
|
||||
if ns.filter:
|
||||
resources = _filter_resources(rf, ns.filter)
|
||||
resources = filter_resources(rf, ns.filter)
|
||||
else:
|
||||
resources = []
|
||||
for reses in rf.values():
|
||||
resources.extend(reses.values())
|
||||
|
||||
if ns.format in ("hex", "raw") and len(resources) != 1:
|
||||
print(f"Format {ns.format} only supports exactly one resource, but found {len(resources)}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if ns.sort:
|
||||
resources.sort(key=lambda res: (res.type, res.id))
|
||||
|
||||
for res in resources:
|
||||
if ns.format == "dump":
|
||||
# Human-readable info and hex dump
|
||||
|
||||
if res.name is None:
|
||||
name = "unnamed"
|
||||
else:
|
||||
name = _bytes_escape(res.name, quote='"')
|
||||
name = f'name "{name}"'
|
||||
|
||||
attrs = _decompose_flags(res.attributes)
|
||||
if attrs:
|
||||
attrdesc = "attributes: " + " | ".join(attr.name for attr in attrs)
|
||||
else:
|
||||
attrdesc = "no attributes"
|
||||
|
||||
restype = _bytes_escape(res.resource_type, quote="'")
|
||||
print(f"Resource '{restype}' ({res.resource_id}), {name}, {attrdesc}, {len(res.data)} bytes:")
|
||||
_hexdump(res.data)
|
||||
print()
|
||||
elif ns.format == "hex":
|
||||
# Data only as hex
|
||||
|
||||
_raw_hexdump(res.data)
|
||||
elif ns.format == "raw":
|
||||
# Data only as raw bytes
|
||||
|
||||
sys.stdout.buffer.write(res.data)
|
||||
elif ns.format == "derez":
|
||||
# Like DeRez with no resource definitions
|
||||
|
||||
attrs = [_REZ_ATTR_NAMES[attr] for attr in _decompose_flags(res.attributes)]
|
||||
if None in attrs:
|
||||
attrs[:] = [f"${res.attributes.value:02X}"]
|
||||
|
||||
parts = [str(res.resource_id)]
|
||||
|
||||
if res.name is not None:
|
||||
name = _bytes_escape(res.name, quote='"')
|
||||
parts.append(f'"{name}"')
|
||||
|
||||
parts += attrs
|
||||
|
||||
restype = _bytes_escape(res.resource_type, quote="'")
|
||||
print(f"data '{restype}' ({', '.join(parts)}) {{")
|
||||
|
||||
for i in range(0, len(res.data), 16):
|
||||
# Two-byte grouping is really annoying to implement.
|
||||
groups = []
|
||||
for j in range(0, 16, 2):
|
||||
if i+j >= len(res.data):
|
||||
break
|
||||
elif i+j+1 >= len(res.data):
|
||||
groups.append(f"{res.data[i+j]:02X}")
|
||||
else:
|
||||
groups.append(f"{res.data[i+j]:02X}{res.data[i+j+1]:02X}")
|
||||
|
||||
s = f'$"{" ".join(groups)}"'
|
||||
comment = "/* " + res.data[i:i + 16].decode("MacRoman").translate(_TRANSLATE_NONPRINTABLES) + " */"
|
||||
print(f"\t{s:<54s}{comment}")
|
||||
|
||||
print("};")
|
||||
print()
|
||||
else:
|
||||
raise ValueError(f"Unhandled output format: {ns.format}")
|
||||
show_filtered_resources(resources, format=ns.format, decompress=ns.decompress)
|
||||
else:
|
||||
if rf.header_system_data != bytes(len(rf.header_system_data)):
|
||||
print("Header system data:")
|
||||
_hexdump(rf.header_system_data)
|
||||
else:
|
||||
print("No header system data")
|
||||
|
||||
if rf.header_application_data != bytes(len(rf.header_application_data)):
|
||||
print("Header application data:")
|
||||
_hexdump(rf.header_application_data)
|
||||
else:
|
||||
print("No header application data")
|
||||
|
||||
attrs = _decompose_flags(rf.file_attributes)
|
||||
if attrs:
|
||||
print("File attributes: " + " | ".join(attr.name for attr in attrs))
|
||||
else:
|
||||
print("No file attributes")
|
||||
|
||||
if len(rf) > 0:
|
||||
print(f"{len(rf)} resource types:")
|
||||
for typecode, resources in rf.items():
|
||||
restype = _bytes_escape(typecode, quote="'")
|
||||
print(f"'{restype}': {len(resources)} resources:")
|
||||
for resid, res in rf[typecode].items():
|
||||
if res.name is None:
|
||||
name = "unnamed"
|
||||
else:
|
||||
name = _bytes_escape(res.name, quote='"')
|
||||
name = f'name "{name}"'
|
||||
|
||||
attrs = _decompose_flags(res.attributes)
|
||||
if attrs:
|
||||
attrdesc = " | ".join(attr.name for attr in attrs)
|
||||
else:
|
||||
attrdesc = "no attributes"
|
||||
|
||||
print(f"({resid}), {name}, {attrdesc}, {len(res.data)} bytes")
|
||||
print()
|
||||
else:
|
||||
print("No resource types (empty resource file)")
|
||||
list_resource_file(rf, sort=ns.sort, group=ns.group, decompress=ns.decompress)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
393
rsrcfork/api.py
393
rsrcfork/api.py
@ -1,9 +1,14 @@
|
||||
import collections
|
||||
import collections.abc
|
||||
import enum
|
||||
import io
|
||||
import os
|
||||
import struct
|
||||
import types
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
from . import compress
|
||||
|
||||
# The formats of all following structures is as described in the Inside Macintosh book (see module docstring).
|
||||
# Signedness and byte order of the integers is never stated explicitly in IM.
|
||||
@ -54,6 +59,9 @@ STRUCT_RESOURCE_REFERENCE = struct.Struct(">hHI4x")
|
||||
# 1 byte: Length of following resource name.
|
||||
STRUCT_RESOURCE_NAME_HEADER = struct.Struct(">B")
|
||||
|
||||
class InvalidResourceFileError(Exception):
|
||||
pass
|
||||
|
||||
class ResourceFileAttrs(enum.Flag):
|
||||
"""Resource file attribute flags. The descriptions for these flags are taken from comments on the map*Bit and map* enum constants in <CarbonCore/Resources.h>."""
|
||||
|
||||
@ -89,54 +97,138 @@ class ResourceAttrs(enum.Flag):
|
||||
class Resource(object):
|
||||
"""A single resource from a resource file."""
|
||||
|
||||
__slots__ = ("resource_type", "resource_id", "name", "attributes", "data")
|
||||
type: bytes
|
||||
id: int
|
||||
name: typing.Optional[bytes]
|
||||
attributes: ResourceAttrs
|
||||
data_raw: bytes
|
||||
_compressed_info: compress.common.CompressedHeaderInfo
|
||||
_data_decompressed: bytes
|
||||
|
||||
def __init__(self, resource_type: bytes, resource_id: int, name: typing.Optional[bytes], attributes: ResourceAttrs, data: bytes):
|
||||
def __init__(self, resource_type: bytes, resource_id: int, name: typing.Optional[bytes], attributes: ResourceAttrs, data_raw: bytes) -> None:
|
||||
"""Create a new resource with the given type code, ID, name, attributes, and data."""
|
||||
|
||||
super().__init__()
|
||||
|
||||
self.resource_type: bytes = resource_type
|
||||
self.resource_id: int = resource_id
|
||||
self.name: typing.Optional[bytes] = name
|
||||
self.attributes: ResourceAttrs = attributes
|
||||
self.data: bytes = data
|
||||
self.type = resource_type
|
||||
self.id = resource_id
|
||||
self.name = name
|
||||
self.attributes = attributes
|
||||
self.data_raw = data_raw
|
||||
|
||||
def __repr__(self):
|
||||
if len(self.data) > 32:
|
||||
data = f"<{len(self.data)} bytes: {self.data[:32]}...>"
|
||||
def __repr__(self) -> str:
|
||||
try:
|
||||
data = self.data
|
||||
except compress.DecompressError:
|
||||
decompress_ok = False
|
||||
data = self.data_raw
|
||||
else:
|
||||
data = repr(self.data)
|
||||
decompress_ok = True
|
||||
|
||||
return f"{type(self).__module__}.{type(self).__qualname__}(resource_type={self.resource_type}, resource_id={self.resource_id}, name={self.name}, attributes={self.attributes}, data={data})"
|
||||
if len(data) > 32:
|
||||
data_repr = f"<{len(data)} bytes: {data[:32]}...>"
|
||||
else:
|
||||
data_repr = repr(data)
|
||||
|
||||
if not decompress_ok:
|
||||
data_repr = f"<decompression failed - compressed data: {data_repr}>"
|
||||
|
||||
return f"{type(self).__module__}.{type(self).__qualname__}(type={self.type}, id={self.id}, name={self.name}, attributes={self.attributes}, data={data_repr})"
|
||||
|
||||
@property
|
||||
def resource_type(self) -> bytes:
|
||||
warnings.warn(DeprecationWarning("The resource_type attribute has been deprecated and will be removed in a future version. Please use the type attribute instead."))
|
||||
return self.type
|
||||
|
||||
@property
|
||||
def resource_id(self) -> int:
|
||||
warnings.warn(DeprecationWarning("The resource_id attribute has been deprecated and will be removed in a future version. Please use the id attribute instead."))
|
||||
return self.id
|
||||
|
||||
@property
|
||||
def compressed_info(self) -> typing.Optional[compress.common.CompressedHeaderInfo]:
|
||||
"""The compressed resource header information, or None if this resource is not compressed.
|
||||
|
||||
Accessing this attribute may raise a DecompressError if the resource data is compressed and the header could not be parsed. To access the unparsed header data, use the data_raw attribute.
|
||||
"""
|
||||
|
||||
if ResourceAttrs.resCompressed in self.attributes:
|
||||
try:
|
||||
return self._compressed_info
|
||||
except AttributeError:
|
||||
self._compressed_info = compress.common.CompressedHeaderInfo.parse(self.data_raw)
|
||||
return self._compressed_info
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def length_raw(self) -> int:
|
||||
"""The length of the raw resource data, which may be compressed.
|
||||
|
||||
Accessing this attribute may be faster than computing len(self.data_raw) manually.
|
||||
"""
|
||||
|
||||
return len(self.data_raw)
|
||||
|
||||
@property
|
||||
def length(self) -> int:
|
||||
"""The length of the resource data. If the resource data is compressed, this is the length of the data after decompression.
|
||||
|
||||
Accessing this attribute may be faster than computing len(self.data) manually.
|
||||
"""
|
||||
|
||||
if self.compressed_info is not None:
|
||||
return self.compressed_info.decompressed_length
|
||||
else:
|
||||
return self.length_raw
|
||||
|
||||
@property
|
||||
def data(self) -> bytes:
|
||||
"""The resource data, decompressed if necessary.
|
||||
|
||||
Accessing this attribute may raise a DecompressError if the resource data is compressed and could not be decompressed. To access the compressed resource data, use the data_raw attribute.
|
||||
"""
|
||||
|
||||
if self.compressed_info is not None:
|
||||
try:
|
||||
return self._data_decompressed
|
||||
except AttributeError:
|
||||
self._data_decompressed = compress.decompress_parsed(self.compressed_info, self.data_raw[self.compressed_info.header_length:])
|
||||
return self._data_decompressed
|
||||
else:
|
||||
return self.data_raw
|
||||
|
||||
class ResourceFile(collections.abc.Mapping):
|
||||
class ResourceFile(typing.Mapping[bytes, typing.Mapping[int, Resource]], typing.ContextManager["ResourceFile"]):
|
||||
"""A resource file reader operating on a byte stream."""
|
||||
|
||||
# noinspection PyProtectedMember
|
||||
class _LazyResourceMap(collections.abc.Mapping):
|
||||
class _LazyResourceMap(typing.Mapping[int, Resource]):
|
||||
"""Internal class: Lazy mapping of resource IDs to resource objects, returned when subscripting a ResourceFile."""
|
||||
|
||||
def __init__(self, resfile: "ResourceFile", restype: bytes):
|
||||
_resfile: "ResourceFile"
|
||||
_restype: bytes
|
||||
_submap: typing.Mapping[int, typing.Tuple[int, ResourceAttrs, int]]
|
||||
|
||||
def __init__(self, resfile: "ResourceFile", restype: bytes) -> None:
|
||||
"""Create a new _LazyResourceMap "containing" all resources in resfile that have the type code restype."""
|
||||
|
||||
super().__init__()
|
||||
|
||||
self._resfile: "ResourceFile" = resfile
|
||||
self._restype: bytes = restype
|
||||
self._submap: typing.Mapping[int, typing.Tuple[int, ResourceAttrs, int]] = self._resfile._references[self._restype]
|
||||
self._resfile = resfile
|
||||
self._restype = restype
|
||||
self._submap = self._resfile._references[self._restype]
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
"""Get the number of resources with this type code."""
|
||||
|
||||
return len(self._submap)
|
||||
|
||||
def __iter__(self):
|
||||
def __iter__(self) -> typing.Iterator[int]:
|
||||
"""Iterate over the IDs of all resources with this type code."""
|
||||
|
||||
return iter(self._submap)
|
||||
|
||||
def __contains__(self, key: int):
|
||||
def __contains__(self, key: object) -> bool:
|
||||
"""Check if a resource with the given ID exists for this type code."""
|
||||
|
||||
return key in self._submap
|
||||
@ -148,134 +240,151 @@ class ResourceFile(collections.abc.Mapping):
|
||||
|
||||
if name_offset == 0xffff:
|
||||
name = None
|
||||
elif self._resfile._allow_seek:
|
||||
else:
|
||||
self._resfile._stream.seek(self._resfile.map_offset + self._resfile.map_name_list_offset + name_offset)
|
||||
(name_length,) = self._resfile._stream_unpack(STRUCT_RESOURCE_NAME_HEADER)
|
||||
name = self._resfile._read(name_length)
|
||||
else:
|
||||
name = self._resfile._resource_names[name_offset]
|
||||
name = self._resfile._read_exact(name_length)
|
||||
|
||||
if self._resfile._allow_seek:
|
||||
self._resfile._stream.seek(self._resfile.data_offset + data_offset)
|
||||
(data_length,) = self._resfile._stream_unpack(STRUCT_RESOURCE_DATA_HEADER)
|
||||
data = self._resfile._read(data_length)
|
||||
else:
|
||||
data = self._resfile._resource_data[data_offset]
|
||||
self._resfile._stream.seek(self._resfile.data_offset + data_offset)
|
||||
(data_length,) = self._resfile._stream_unpack(STRUCT_RESOURCE_DATA_HEADER)
|
||||
data = self._resfile._read_exact(data_length)
|
||||
|
||||
return Resource(self._restype, key, name, attributes, data)
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
if len(self) == 1:
|
||||
return f"<{type(self).__module__}.{type(self).__qualname__} at {id(self):#x} containing one resource: {next(iter(self.values()))}>"
|
||||
else:
|
||||
return f"<{type(self).__module__}.{type(self).__qualname__} at {id(self):#x} containing {len(self)} resources with IDs: {list(self)}>"
|
||||
|
||||
_close_stream: bool
|
||||
_stream: typing.BinaryIO
|
||||
|
||||
data_offset: int
|
||||
map_offset: int
|
||||
data_length: int
|
||||
map_length: int
|
||||
header_system_data: bytes
|
||||
header_application_data: bytes
|
||||
|
||||
map_type_list_offset: int
|
||||
map_name_list_offset: int
|
||||
file_attributes: ResourceFileAttrs
|
||||
|
||||
_reference_counts: typing.MutableMapping[bytes, int]
|
||||
_references: typing.MutableMapping[bytes, typing.MutableMapping[int, typing.Tuple[int, ResourceAttrs, int]]]
|
||||
|
||||
@classmethod
|
||||
def open(cls, filename: typing.Union[str, bytes, os.PathLike], *, rsrcfork: typing.Optional[bool]=None, **kwargs) -> "ResourceFile":
|
||||
def open(cls, filename: typing.Union[str, os.PathLike], *, fork: str="auto", **kwargs: typing.Any) -> "ResourceFile":
|
||||
"""Open the file at the given path as a ResourceFile.
|
||||
|
||||
If rsrcfork is not None, it is treated as boolean and controls whether the data or resource fork of the file should be opened. (On systems other than macOS, opening resource forks will not work of course, since they don't exist.)
|
||||
If rsrcfork is None, guess whether the data or resource fork should be opened. If the resource fork exists and is not empty, it is opened, otherwise the data fork is opened instead.
|
||||
The fork parameter controls which fork of the file the resource data will be read from. It accepts the following values:
|
||||
|
||||
* "auto" (the default): Automatically select the correct fork. The resource fork will be used if the file has one and it contains valid resource data. Otherwise the data fork will be used.
|
||||
* "rsrc": Force use of the resource fork and never fall back to the data fork. This will not work on systems other than macOS, because they do not support resource forks natively.
|
||||
* "data": Force use of the data fork, even if a resource fork is present.
|
||||
|
||||
The rsrcfork parameter is deprecated and will be removed in the future. It has the same purpose as the fork parameter, but accepts different argument values: None stands for "auto", True stands for "rsrc", and False stands for "data". These argument values are less understandable than the string versions and are not easily extensible in the future, which is why the parameter has been deprecated.
|
||||
"""
|
||||
|
||||
f: typing.io.BinaryIO
|
||||
if rsrcfork is None:
|
||||
if "close" in kwargs:
|
||||
raise TypeError("ResourceFile.open does not support the 'close' keyword argument")
|
||||
|
||||
kwargs["close"] = True
|
||||
|
||||
if "rsrcfork" in kwargs:
|
||||
if fork != "auto":
|
||||
raise TypeError("The fork and rsrcfork parameters cannot be used together. Please use only the fork parameter; it replaces the deprecated rsrcfork parameter.")
|
||||
|
||||
if kwargs["rsrcfork"] is None:
|
||||
fork = "auto"
|
||||
elif kwargs["rsrcfork"]:
|
||||
fork = "rsrc"
|
||||
else:
|
||||
fork = "data"
|
||||
warnings.warn(DeprecationWarning(f"The rsrcfork parameter has been deprecated and will be removed in a future version. Please use fork={fork!r} instead of rsrcfork={kwargs['rsrcfork']!r}."))
|
||||
del kwargs["rsrcfork"]
|
||||
|
||||
if fork == "auto":
|
||||
# Determine whether the file has a usable resource fork.
|
||||
try:
|
||||
# Try to open the resource fork.
|
||||
f = open(os.path.join(filename, "..namedfork", "rsrc"), "rb")
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
# If the resource fork doesn't exist, fall back to the data fork.
|
||||
f = open(filename, "rb")
|
||||
return cls(open(filename, "rb"), **kwargs)
|
||||
else:
|
||||
# Resource fork exists, check if it actually contains valid resource data.
|
||||
# This check is necessary because opening ..namedfork/rsrc on files that don't actually have a resource fork can sometimes succeed, but the resulting stream will either be empty, or (as of macOS 10.14, and possibly earlier) contain garbage data.
|
||||
try:
|
||||
# Resource fork exists, check if it actually contains anything.
|
||||
if f.read(1):
|
||||
# Resource fork contains data, seek back to start before using it.
|
||||
f.seek(0)
|
||||
else:
|
||||
# Resource fork contains no data, fall back to the data fork.
|
||||
f.close()
|
||||
f = open(filename, "rb")
|
||||
return cls(f, **kwargs)
|
||||
except InvalidResourceFileError:
|
||||
# Resource fork is empty or invalid, fall back to the data fork.
|
||||
f.close()
|
||||
return cls(open(filename, "rb"), **kwargs)
|
||||
except BaseException:
|
||||
f.close()
|
||||
raise
|
||||
elif rsrcfork:
|
||||
elif fork == "rsrc":
|
||||
# Force use of the resource fork.
|
||||
f = open(os.path.join(filename, "..namedfork", "rsrc"), "rb")
|
||||
else:
|
||||
return cls(open(os.path.join(filename, "..namedfork", "rsrc"), "rb"), **kwargs)
|
||||
elif fork == "data":
|
||||
# Force use of the data fork.
|
||||
f = open(filename, "rb")
|
||||
|
||||
# Use the selected fork to build a ResourceFile.
|
||||
return cls(f, close=True, **kwargs)
|
||||
return cls(open(filename, "rb"), **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unsupported value for the fork parameter: {fork!r}")
|
||||
|
||||
def __init__(self, stream: typing.io.BinaryIO, *, allow_seek: typing.Optional[bool]=None, close: bool=False):
|
||||
def __init__(self, stream: typing.BinaryIO, *, close: bool=False) -> None:
|
||||
"""Create a ResourceFile wrapping the given byte stream.
|
||||
|
||||
To read resource file data from a bytes object, wrap it in an io.BytesIO.
|
||||
|
||||
allow_seek controls whether seeking should be used when reading the file. If allow_seek is None, stream.seekable() is called to determine whether seeking should be used.
|
||||
If seeking is used, only the file header, map header, resource types, and resource references are read into memory. Resource data and names are loaded on-demand when the respective resource is accessed.
|
||||
If seeking is not used, the entire stream is processed sequentially and read into memory, including all resource data and names. This may be necessary when the stream does not support seeking at all. Memory is usually not a concern, most resource files are not even a megabyte in size.
|
||||
If the stream is seekable, only the file header and resource map are read initially. Resource data and names are loaded on-demand when the respective resource is accessed. If the stream is not seekable, the entire stream data is read into memory (this is necessary because the resource map is stored at the end of the resource file).
|
||||
|
||||
In practice, memory usage is usually not a concern when reading resource files. Even large resource files are only a few megabytes in size, and due to limitations in the format, resource files cannot be much larger than 16 MiB (except for special cases that are unlikely to occur in practice).
|
||||
|
||||
close controls whether the stream should be closed when the ResourceFile's close method is called. By default this is False.
|
||||
"""
|
||||
|
||||
super().__init__()
|
||||
|
||||
self._close_stream: bool = close
|
||||
self._stream: typing.io.BinaryIO = stream
|
||||
self._close_stream = close
|
||||
if stream.seekable():
|
||||
self._stream = stream
|
||||
else:
|
||||
self._stream = io.BytesIO(stream.read())
|
||||
|
||||
try:
|
||||
self._allow_seek: bool
|
||||
if allow_seek is None:
|
||||
self._allow_seek = self._stream.seekable()
|
||||
else:
|
||||
self._allow_seek = allow_seek
|
||||
|
||||
if self._allow_seek:
|
||||
self._pos = None
|
||||
self._init_seeking()
|
||||
else:
|
||||
self._pos: int = 0
|
||||
self._init_streaming()
|
||||
self._read_header()
|
||||
self._stream.seek(self.map_offset)
|
||||
self._read_map_header()
|
||||
self._read_all_resource_types()
|
||||
self._read_all_references()
|
||||
except BaseException:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def _tell(self) -> int:
|
||||
"""Get the current position in the stream. This uses the stream's tell method if seeking is enabled, and an internal counter otherwise."""
|
||||
def _read_exact(self, byte_count: int) -> bytes:
|
||||
"""Read byte_count bytes from the stream and raise an exception if too few bytes are read (i. e. if EOF was hit prematurely)."""
|
||||
|
||||
if self._allow_seek:
|
||||
return self._stream.tell()
|
||||
else:
|
||||
return self._pos
|
||||
data = self._stream.read(byte_count)
|
||||
if len(data) != byte_count:
|
||||
raise InvalidResourceFileError(f"Attempted to read {byte_count} bytes of data, but only got {len(data)} bytes")
|
||||
return data
|
||||
|
||||
def _read(self, count: int) -> bytes:
|
||||
"""Read count bytes from the stream. If seeking is disabled, this also increments the internal seek counter accordingly."""
|
||||
|
||||
ret = self._stream.read(count)
|
||||
if not self._allow_seek:
|
||||
self._pos += len(ret)
|
||||
return ret
|
||||
|
||||
def _stream_unpack(self, st: struct.Struct) -> typing.Tuple:
|
||||
def _stream_unpack(self, st: struct.Struct) -> tuple:
|
||||
"""Unpack data from the stream according to the struct st. The number of bytes to read is determined using st.size, so variable-sized structs cannot be used with this method."""
|
||||
|
||||
return st.unpack(self._read(st.size))
|
||||
try:
|
||||
return st.unpack(self._read_exact(st.size))
|
||||
except struct.error as e:
|
||||
raise InvalidResourceFileError(str(e))
|
||||
|
||||
def _read_header(self):
|
||||
def _read_header(self) -> None:
|
||||
"""Read the resource file header, starting at the current stream position."""
|
||||
|
||||
assert self._tell() == 0
|
||||
assert self._stream.tell() == 0
|
||||
|
||||
self.data_offset: int
|
||||
self.map_offset: int
|
||||
self.data_length: int
|
||||
self.map_length: int
|
||||
self.header_system_data: bytes
|
||||
self.header_application_data: bytes
|
||||
(
|
||||
self.data_offset,
|
||||
self.map_offset,
|
||||
@ -285,42 +394,26 @@ class ResourceFile(collections.abc.Mapping):
|
||||
self.header_application_data,
|
||||
) = self._stream_unpack(STRUCT_RESOURCE_HEADER)
|
||||
|
||||
assert self._tell() == self.data_offset
|
||||
if self._stream.tell() != self.data_offset:
|
||||
raise InvalidResourceFileError(f"The data offset ({self.data_offset}) should point exactly to the end of the file header ({self._stream.tell()})")
|
||||
|
||||
def _read_all_resource_data(self):
|
||||
"""Read all resource data blocks, starting at the current stream position, until self.map_offset is reached."""
|
||||
|
||||
assert self._tell() == self.data_offset
|
||||
|
||||
self._resource_data: typing.MutableMapping[int, bytes] = collections.OrderedDict()
|
||||
|
||||
while self._tell() < self.map_offset:
|
||||
initial_pos = self._tell()
|
||||
(length,) = self._stream_unpack(STRUCT_RESOURCE_DATA_HEADER)
|
||||
assert self._tell() + length <= self.map_offset
|
||||
self._resource_data[initial_pos] = self._read(length)
|
||||
|
||||
assert self._tell() == self.map_offset
|
||||
|
||||
def _read_map_header(self):
|
||||
def _read_map_header(self) -> None:
|
||||
"""Read the map header, starting at the current stream position."""
|
||||
|
||||
assert self._tell() == self.map_offset
|
||||
assert self._stream.tell() == self.map_offset
|
||||
|
||||
self.map_type_list_offset: int
|
||||
self.map_name_list_offset: int
|
||||
(
|
||||
_file_attributes,
|
||||
self.map_type_list_offset,
|
||||
self.map_name_list_offset,
|
||||
) = self._stream_unpack(STRUCT_RESOURCE_MAP_HEADER)
|
||||
|
||||
self.file_attributes: ResourceFileAttrs = ResourceFileAttrs(_file_attributes)
|
||||
self.file_attributes = ResourceFileAttrs(_file_attributes)
|
||||
|
||||
def _read_all_resource_types(self):
|
||||
def _read_all_resource_types(self) -> None:
|
||||
"""Read all resource types, starting at the current stream position."""
|
||||
|
||||
self._reference_counts: typing.MutableMapping[bytes, int] = collections.OrderedDict()
|
||||
self._reference_counts = collections.OrderedDict()
|
||||
|
||||
(type_list_length_m1,) = self._stream_unpack(STRUCT_RESOURCE_TYPE_LIST_HEADER)
|
||||
type_list_length = (type_list_length_m1 + 1) % 0x10000
|
||||
@ -334,10 +427,10 @@ class ResourceFile(collections.abc.Mapping):
|
||||
count = (count_m1 + 1) % 0x10000
|
||||
self._reference_counts[resource_type] = count
|
||||
|
||||
def _read_all_references(self):
|
||||
def _read_all_references(self) -> None:
|
||||
"""Read all resource references, starting at the current stream position."""
|
||||
|
||||
self._references: typing.MutableMapping[bytes, typing.MutableMapping[int, typing.Tuple[int, ResourceAttrs, int]]] = collections.OrderedDict()
|
||||
self._references = collections.OrderedDict()
|
||||
|
||||
for resource_type, count in self._reference_counts.items():
|
||||
resmap: typing.MutableMapping[int, typing.Tuple[int, ResourceAttrs, int]] = collections.OrderedDict()
|
||||
@ -354,45 +447,7 @@ class ResourceFile(collections.abc.Mapping):
|
||||
|
||||
resmap[resource_id] = (name_offset, ResourceAttrs(attributes), data_offset)
|
||||
|
||||
def _read_all_resource_names(self):
|
||||
"""Read all resource names, starting at the current stream position, until the end of the map is reached."""
|
||||
|
||||
self._resource_names: typing.MutableMapping[int, bytes] = collections.OrderedDict()
|
||||
|
||||
while self._tell() < self.map_offset + self.map_length:
|
||||
initial_pos = self._tell()
|
||||
(length,) = self._stream_unpack(STRUCT_RESOURCE_NAME_HEADER)
|
||||
self._resource_names[initial_pos] = self._read(length)
|
||||
|
||||
def _init_seeking(self):
|
||||
"""Initialize self with seeking enabled, by reading the header, map header, resource types, and references."""
|
||||
|
||||
self._read_header()
|
||||
self._stream.seek(self.map_offset)
|
||||
self._read_map_header()
|
||||
self._read_all_resource_types()
|
||||
self._read_all_references()
|
||||
|
||||
def _init_streaming(self):
|
||||
"""Initialize self with seeking disabled, by reading the entire file sequentially."""
|
||||
|
||||
self._read_header()
|
||||
self._read_all_resource_data()
|
||||
|
||||
assert self._tell() == self.map_offset
|
||||
|
||||
self._read_map_header()
|
||||
|
||||
assert self._tell() == self.map_offset + self.map_type_list_offset
|
||||
|
||||
self._read_all_resource_types()
|
||||
self._read_all_references()
|
||||
|
||||
assert self._tell() == self.map_offset + self.map_name_list_offset
|
||||
|
||||
self._read_all_resource_names()
|
||||
|
||||
def close(self):
|
||||
def close(self) -> None:
|
||||
"""Close this ResourceFile.
|
||||
|
||||
If close=True was passed when this ResourceFile was created, the underlying stream's close method is called as well.
|
||||
@ -401,23 +456,29 @@ class ResourceFile(collections.abc.Mapping):
|
||||
if self._close_stream:
|
||||
self._stream.close()
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
def __enter__(self) -> "ResourceFile":
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: typing.Optional[typing.Type[BaseException]],
|
||||
exc_val: typing.Optional[BaseException],
|
||||
exc_tb: typing.Optional[types.TracebackType]
|
||||
) -> typing.Optional[bool]:
|
||||
self.close()
|
||||
return None
|
||||
|
||||
def __len__(self):
|
||||
def __len__(self) -> int:
|
||||
"""Get the number of resource types in this ResourceFile."""
|
||||
|
||||
return len(self._references)
|
||||
|
||||
def __iter__(self):
|
||||
def __iter__(self) -> typing.Iterator[bytes]:
|
||||
"""Iterate over all resource types in this ResourceFile."""
|
||||
|
||||
return iter(self._references)
|
||||
|
||||
def __contains__(self, key: bytes):
|
||||
def __contains__(self, key: object) -> bool:
|
||||
"""Check whether this ResourceFile contains any resources of the given type."""
|
||||
|
||||
return key in self._references
|
||||
@ -427,5 +488,5 @@ class ResourceFile(collections.abc.Mapping):
|
||||
|
||||
return ResourceFile._LazyResourceMap(self, key)
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return f"<{type(self).__module__}.{type(self).__qualname__} at {id(self):#x}, attributes {self.file_attributes}, containing {len(self)} resource types: {list(self)}>"
|
||||
|
63
rsrcfork/compress/__init__.py
Normal file
63
rsrcfork/compress/__init__.py
Normal file
@ -0,0 +1,63 @@
|
||||
import io
|
||||
import typing
|
||||
|
||||
from . import dcmp0
|
||||
from . import dcmp1
|
||||
from . import dcmp2
|
||||
|
||||
from .common import DecompressError, CompressedHeaderInfo
|
||||
|
||||
__all__ = [
|
||||
"CompressedHeaderInfo",
|
||||
"DecompressError",
|
||||
"decompress",
|
||||
"decompress_parsed",
|
||||
"decompress_stream",
|
||||
"decompress_stream_parsed",
|
||||
]
|
||||
|
||||
|
||||
# Maps 'dcmp' IDs to their corresponding Python implementations.
|
||||
# Each decompressor has the signature (header_info: CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes].
|
||||
DECOMPRESSORS = {
|
||||
0: dcmp0.decompress_stream,
|
||||
1: dcmp1.decompress_stream,
|
||||
2: dcmp2.decompress_stream,
|
||||
}
|
||||
|
||||
|
||||
def decompress_stream_parsed(header_info: CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Decompress compressed resource data from a stream, whose header has already been read and parsed into a CompressedHeaderInfo object."""
|
||||
|
||||
try:
|
||||
decompress_func = DECOMPRESSORS[header_info.dcmp_id]
|
||||
except KeyError:
|
||||
raise DecompressError(f"Unsupported 'dcmp' ID: {header_info.dcmp_id}")
|
||||
|
||||
decompressed_length = 0
|
||||
for chunk in decompress_func(header_info, stream, debug=debug):
|
||||
decompressed_length += len(chunk)
|
||||
yield chunk
|
||||
|
||||
if decompressed_length != header_info.decompressed_length:
|
||||
raise DecompressError(f"Actual length of decompressed data ({decompressed_length}) does not match length stored in resource ({header_info.decompressed_length})")
|
||||
|
||||
def decompress_parsed(header_info: CompressedHeaderInfo, data: bytes, *, debug: bool=False) -> bytes:
|
||||
"""Decompress the given compressed resource data, whose header has already been removed and parsed into a CompressedHeaderInfo object."""
|
||||
|
||||
return b"".join(decompress_stream_parsed(header_info, io.BytesIO(data), debug=debug))
|
||||
|
||||
def decompress_stream(stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Decompress compressed resource data from a stream."""
|
||||
|
||||
header_info = CompressedHeaderInfo.parse_stream(stream)
|
||||
|
||||
if debug:
|
||||
print(f"Compressed resource data header: {header_info}")
|
||||
|
||||
yield from decompress_stream_parsed(header_info, stream, debug=debug)
|
||||
|
||||
def decompress(data: bytes, *, debug: bool=False) -> bytes:
|
||||
"""Decompress the given compressed resource data."""
|
||||
|
||||
return b"".join(decompress_stream(io.BytesIO(data), debug=debug))
|
198
rsrcfork/compress/common.py
Normal file
198
rsrcfork/compress/common.py
Normal file
@ -0,0 +1,198 @@
|
||||
import io
|
||||
import struct
|
||||
import typing
|
||||
|
||||
|
||||
class DecompressError(Exception):
|
||||
"""Raised when resource data decompression fails, because the data is invalid or the compression type is not supported."""
|
||||
|
||||
|
||||
# The signature of all compressed resource data, 0xa89f6572 in hex, or "®üer" in MacRoman.
|
||||
COMPRESSED_SIGNATURE = b"\xa8\x9fer"
|
||||
# The number of the "type 8" compression type. This type is used in the Finder, ResEdit, and some other system files.
|
||||
COMPRESSED_TYPE_8 = 0x0801
|
||||
# The number of the "type 9" compression type. This type is used in the System file and System 7.5's Installer.
|
||||
COMPRESSED_TYPE_9 = 0x0901
|
||||
|
||||
# Common header for compressed resources of all types.
|
||||
# 4 bytes: Signature (see above).
|
||||
# 2 bytes: Length of the complete header (this common part and the type-specific part that follows it). (This meaning is just a guess - the field's value is always 0x0012, so there's no way to know for certain what it means.)
|
||||
# 2 bytes: Compression type. Known so far: 0x0801 ("type 8") and 0x0901 ("type 9").
|
||||
# 4 bytes: Length of the data after decompression.
|
||||
# 6 bytes: Remainder of the header. The exact format varies depending on the compression type.
|
||||
STRUCT_COMPRESSED_HEADER = struct.Struct(">4sHHI6s")
|
||||
|
||||
# Remainder of header for a "type 8" compressed resource.
|
||||
# 1 byte: "Working buffer fractional size" - the ratio of the compressed data size to the uncompressed data size, times 256.
|
||||
# 1 byte: "Expansion buffer size" - the maximum number of bytes that the data might grow during decompression.
|
||||
# 2 bytes: The ID of the 'dcmp' resource that can decompress this resource. Currently only ID 0 is supported.
|
||||
# 2 bytes: Reserved (always zero).
|
||||
STRUCT_COMPRESSED_TYPE_8_HEADER = struct.Struct(">BBhH")
|
||||
|
||||
# Remainder of header for a "type 9" compressed resource.
|
||||
# 2 bytes: The ID of the 'dcmp' resource that can decompress this resource. Currently only ID 2 is supported.
|
||||
# 4 bytes: Decompressor-specific parameters.
|
||||
STRUCT_COMPRESSED_TYPE_9_HEADER = struct.Struct(">h4s")
|
||||
|
||||
|
||||
class CompressedHeaderInfo(object):
|
||||
@classmethod
|
||||
def parse_stream(cls, stream: typing.BinaryIO) -> "CompressedHeaderInfo":
|
||||
try:
|
||||
signature, header_length, compression_type, decompressed_length, remainder = STRUCT_COMPRESSED_HEADER.unpack(stream.read(STRUCT_COMPRESSED_HEADER.size))
|
||||
except struct.error:
|
||||
raise DecompressError(f"Invalid header")
|
||||
if signature != COMPRESSED_SIGNATURE:
|
||||
raise DecompressError(f"Invalid signature: {signature!r}, expected {COMPRESSED_SIGNATURE}")
|
||||
if header_length != 0x12:
|
||||
raise DecompressError(f"Unsupported header length: 0x{header_length:>04x}, expected 0x12")
|
||||
|
||||
if compression_type == COMPRESSED_TYPE_8:
|
||||
working_buffer_fractional_size, expansion_buffer_size, dcmp_id, reserved = STRUCT_COMPRESSED_TYPE_8_HEADER.unpack(remainder)
|
||||
|
||||
if reserved != 0:
|
||||
raise DecompressError(f"Reserved field should be 0, not 0x{reserved:>04x}")
|
||||
|
||||
return CompressedType8HeaderInfo(header_length, compression_type, decompressed_length, dcmp_id, working_buffer_fractional_size, expansion_buffer_size)
|
||||
elif compression_type == COMPRESSED_TYPE_9:
|
||||
dcmp_id, parameters = STRUCT_COMPRESSED_TYPE_9_HEADER.unpack(remainder)
|
||||
|
||||
return CompressedType9HeaderInfo(header_length, compression_type, decompressed_length, dcmp_id, parameters)
|
||||
else:
|
||||
raise DecompressError(f"Unsupported compression type: 0x{compression_type:>04x}")
|
||||
|
||||
@classmethod
|
||||
def parse(cls, data: bytes) -> "CompressedHeaderInfo":
|
||||
return cls.parse_stream(io.BytesIO(data))
|
||||
|
||||
header_length: int
|
||||
compression_type: int
|
||||
decompressed_length: int
|
||||
dcmp_id: int
|
||||
|
||||
def __init__(self, header_length: int, compression_type: int, decompressed_length: int, dcmp_id: int) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.header_length = header_length
|
||||
self.compression_type = compression_type
|
||||
self.decompressed_length = decompressed_length
|
||||
self.dcmp_id = dcmp_id
|
||||
|
||||
|
||||
class CompressedType8HeaderInfo(CompressedHeaderInfo):
|
||||
working_buffer_fractional_size: int
|
||||
expansion_buffer_size: int
|
||||
|
||||
def __init__(self, header_length: int, compression_type: int, decompressed_length: int, dcmp_id: int, working_buffer_fractional_size: int, expansion_buffer_size: int) -> None:
|
||||
super().__init__(header_length, compression_type, decompressed_length, dcmp_id)
|
||||
|
||||
self.working_buffer_fractional_size = working_buffer_fractional_size
|
||||
self.expansion_buffer_size = expansion_buffer_size
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{type(self).__qualname__}(header_length={self.header_length}, compression_type=0x{self.compression_type:>04x}, decompressed_length={self.decompressed_length}, dcmp_id={self.dcmp_id}, working_buffer_fractional_size={self.working_buffer_fractional_size}, expansion_buffer_size={self.expansion_buffer_size})"
|
||||
|
||||
|
||||
class CompressedType9HeaderInfo(CompressedHeaderInfo):
|
||||
parameters: bytes
|
||||
|
||||
def __init__(self, header_length: int, compression_type: int, decompressed_length: int, dcmp_id: int, parameters: bytes) -> None:
|
||||
super().__init__(header_length, compression_type, decompressed_length, dcmp_id)
|
||||
|
||||
self.parameters = parameters
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{type(self).__qualname__}(header_length={self.header_length}, compression_type=0x{self.compression_type:>04x}, decompressed_length={self.decompressed_length}, dcmp_id={self.dcmp_id}, parameters={self.parameters!r})"
|
||||
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
class PeekableIO(typing.Protocol):
|
||||
"""Minimal protocol for binary IO streams that support the peek method.
|
||||
|
||||
The peek method is supported by various standard Python binary IO streams, such as io.BufferedReader. If a stream does not natively support the peek method, it may be wrapped using the custom helper function make_peekable.
|
||||
"""
|
||||
|
||||
def readable(self) -> bool: ...
|
||||
def read(self, size: typing.Optional[int] = ...) -> bytes: ...
|
||||
def peek(self, size: int = ...) -> bytes: ...
|
||||
|
||||
|
||||
class _PeekableIOWrapper(object):
|
||||
"""Wrapper class to add peek support to an existing stream. Do not instantiate this class directly, use the make_peekable function instead.
|
||||
|
||||
Python provides a standard io.BufferedReader class, which supports the peek method. However, according to its documentation, it only supports wrapping io.RawIOBase subclasses, and not streams which are already otherwise buffered.
|
||||
|
||||
Warning: this class does not perform any buffering of its own, outside of what is required to make peek work. It is strongly recommended to only wrap streams that are already buffered or otherwise fast to read from. In particular, raw streams (io.RawIOBase subclasses) should be wrapped using io.BufferedReader instead.
|
||||
"""
|
||||
|
||||
_wrapped: typing.BinaryIO
|
||||
_readahead: bytes
|
||||
|
||||
def __init__(self, wrapped: typing.BinaryIO) -> None:
|
||||
super().__init__()
|
||||
|
||||
self._wrapped = wrapped
|
||||
self._readahead = b""
|
||||
|
||||
def readable(self) -> bool:
|
||||
return self._wrapped.readable()
|
||||
|
||||
def read(self, size: typing.Optional[int] = None) -> bytes:
|
||||
if size is None or size < 0:
|
||||
ret = self._readahead + self._wrapped.read()
|
||||
self._readahead = b""
|
||||
elif size <= len(self._readahead):
|
||||
ret = self._readahead[:size]
|
||||
self._readahead = self._readahead[size:]
|
||||
else:
|
||||
ret = self._readahead + self._wrapped.read(size - len(self._readahead))
|
||||
self._readahead = b""
|
||||
|
||||
return ret
|
||||
|
||||
def peek(self, size: int = -1) -> bytes:
|
||||
if not self._readahead:
|
||||
self._readahead = self._wrapped.read(io.DEFAULT_BUFFER_SIZE if size < 0 else size)
|
||||
return self._readahead
|
||||
|
||||
|
||||
def make_peekable(stream: typing.BinaryIO) -> "PeekableIO":
|
||||
"""Wrap an arbitrary binary IO stream so that it supports the peek method.
|
||||
|
||||
The stream is wrapped as efficiently as possible (or not at all if it already supports the peek method). However, in the worst case a custom wrapper class needs to be used, which may not be particularly efficient and only supports a very minimal interface. The only methods that are guaranteed to exist on the returned stream are readable, read, and peek.
|
||||
"""
|
||||
|
||||
if hasattr(stream, "peek"):
|
||||
# Stream is already peekable, nothing to be done.
|
||||
return typing.cast("PeekableIO", stream)
|
||||
elif isinstance(stream, io.RawIOBase):
|
||||
# Raw IO streams can be wrapped efficiently using BufferedReader.
|
||||
return io.BufferedReader(stream)
|
||||
else:
|
||||
# Other streams need to be wrapped using our custom wrapper class.
|
||||
return _PeekableIOWrapper(stream)
|
||||
|
||||
|
||||
def read_exact(stream: typing.BinaryIO, byte_count: int) -> bytes:
|
||||
"""Read byte_count bytes from the stream and raise an exception if too few bytes are read (i. e. if EOF was hit prematurely)."""
|
||||
|
||||
data = stream.read(byte_count)
|
||||
if len(data) != byte_count:
|
||||
raise DecompressError(f"Attempted to read {byte_count} bytes of data, but only got {len(data)} bytes")
|
||||
return data
|
||||
|
||||
def read_variable_length_integer(stream: typing.BinaryIO) -> int:
|
||||
"""Read a variable-length integer from the stream.
|
||||
|
||||
This variable-length integer format is used by the 0xfe codes in the compression formats used by 'dcmp' (0) and 'dcmp' (1).
|
||||
"""
|
||||
|
||||
head = read_exact(stream, 1)
|
||||
|
||||
if head[0] == 0xff:
|
||||
return int.from_bytes(read_exact(stream, 4), "big", signed=True)
|
||||
elif head[0] >= 0x80:
|
||||
data_modified = bytes([(head[0] - 0xc0) & 0xff]) + read_exact(stream, 1)
|
||||
return int.from_bytes(data_modified, "big", signed=True)
|
||||
else:
|
||||
return int.from_bytes(head, "big", signed=True)
|
269
rsrcfork/compress/dcmp0.py
Normal file
269
rsrcfork/compress/dcmp0.py
Normal file
@ -0,0 +1,269 @@
|
||||
import io
|
||||
import typing
|
||||
|
||||
from . import common
|
||||
|
||||
# Lookup table for codes in range(0x4b, 0xfe).
|
||||
# This table was obtained by decompressing a manually created compressed resource with the following contents:
|
||||
# b'\xa8\x9fer\x00\x12\x08\x01\x00\x00\x01f\x80\x03\x00\x00\x00\x00' + bytes(range(0x4b, 0xfe)) + b'\xff'
|
||||
TABLE_DATA = (
|
||||
# First line corresponds to codes in range(0x4b, 0x50).
|
||||
b"\x00\x00N\xba\x00\x08Nu\x00\x0c"
|
||||
# All following lines correspond to 8 codes each.
|
||||
b"N\xad S/\x0ba\x00\x00\x10p\x00/\x00Hn"
|
||||
b" P n/.\xff\xfcH\xe7?<\x00\x04\xff\xf8"
|
||||
b"/\x0c \x06N\xedNV hN^\x00\x01X\x8f"
|
||||
b"O\xef\x00\x02\x00\x18`\x00\xff\xffP\x8fN\x90\x00\x06"
|
||||
b"&n\x00\x14\xff\xf4L\xee\x00\n\x00\x0eA\xeeL\xdf"
|
||||
b"H\xc0\xff\xf0-@\x00\x120.p\x01/( T"
|
||||
b"g\x00\x00 \x00\x1c _\x18\x00&oHx\x00\x16"
|
||||
b"A\xfa0<(@r\x00(n \x0cf\x00 k"
|
||||
b"/\x07U\x8f\x00(\xff\xfe\xff\xec\"\xd8 \x0b\x00\x0f"
|
||||
b"Y\x8f/<\xff\x00\x01\x18\x81\xe1J\x00N\xb0\xff\xe8"
|
||||
b"H\xc7\x00\x03\x00\"\x00\x07\x00\x1ag\x06g\x08N\xf9"
|
||||
b"\x00$ x\x08\x00f\x04\x00*N\xd00(&_"
|
||||
b"g\x04\x000C\xee?\x00 \x1f\x00\x1e\xff\xf6 ."
|
||||
b"B\xa7 \x07\xff\xfa`\x02=@\x0c@f\x06\x00&"
|
||||
b"-H/\x01p\xff`\x04\x18\x80J@\x00@\x00,"
|
||||
b"/\x08\x00\x11\xff\xe4!@&@\xff\xf2BnN\xb9"
|
||||
b"=|\x008\x00\r`\x06B. <g\x0c-h"
|
||||
b"f\x08J.J\xae\x00.H@\"_\"\x00g\n"
|
||||
b"0\x07Bg\x002 (\x00\tHz\x02\x00/+"
|
||||
b"\x00\x05\"nf\x02\xe5\x80g\x0ef\n\x00P>\x00"
|
||||
b"f\x0c.\x00\xff\xee m @\xff\xe0S@`\x08"
|
||||
# Last line corresponds to codes in range(0xf8, 0xfe).
|
||||
b"\x04\x80\x00h\x0b|D\x00A\xe8HA"
|
||||
)
|
||||
# Note: index 0 in this table corresponds to code 0x4b, index 1 to 0x4c, etc.
|
||||
TABLE = [TABLE_DATA[i:i + 2] for i in range(0, len(TABLE_DATA), 2)]
|
||||
assert len(TABLE) == len(range(0x4b, 0xfe))
|
||||
|
||||
|
||||
def decompress_stream_inner(header_info: common.CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Internal helper function, implements the main decompression algorithm. Only called from decompress_stream, which performs some extra checks and debug logging."""
|
||||
|
||||
if not isinstance(header_info, common.CompressedType8HeaderInfo):
|
||||
raise common.DecompressError(f"Incorrect header type: {type(header_info).__qualname__}")
|
||||
|
||||
prev_literals: typing.List[bytes] = []
|
||||
|
||||
while True: # Loop is terminated when the EOF marker (0xff) is encountered
|
||||
(byte,) = common.read_exact(stream, 1)
|
||||
if debug:
|
||||
print(f"Tag byte 0x{byte:>02x}")
|
||||
|
||||
if byte in range(0x00, 0x20):
|
||||
# Literal byte sequence.
|
||||
if byte in (0x00, 0x10):
|
||||
# The length of the literal data is stored in the next byte.
|
||||
(count_div2,) = common.read_exact(stream, 1)
|
||||
else:
|
||||
# The length of the literal data is stored in the low nibble of the tag byte.
|
||||
count_div2 = byte >> 0 & 0xf
|
||||
count = 2 * count_div2
|
||||
# Controls whether or not the literal is stored so that it can be referenced again later.
|
||||
do_store = byte >= 0x10
|
||||
literal = common.read_exact(stream, count)
|
||||
if debug:
|
||||
print(f"Literal (storing: {do_store})")
|
||||
if do_store:
|
||||
if debug:
|
||||
print(f"\t-> storing as literal number 0x{len(prev_literals):x}")
|
||||
prev_literals.append(literal)
|
||||
yield literal
|
||||
elif byte in (0x20, 0x21):
|
||||
# Backreference to a previous literal, 2-byte form.
|
||||
# This can reference literals with index in range(0x28, 0x228).
|
||||
(next_byte,) = common.read_exact(stream, 1)
|
||||
table_index = 0x28 + ((byte - 0x20) << 8 | next_byte)
|
||||
if debug:
|
||||
print(f"Backreference (2-byte form) to 0x{table_index:>02x}")
|
||||
yield prev_literals[table_index]
|
||||
elif byte == 0x22:
|
||||
# Backreference to a previous literal, 3-byte form.
|
||||
# This can reference any literal with index 0x28 and higher, but is only necessary for literals with index 0x228 and higher.
|
||||
table_index = 0x28 + int.from_bytes(common.read_exact(stream, 2), "big", signed=False)
|
||||
if debug:
|
||||
print(f"Backreference (3-byte form) to 0x{table_index:>02x}")
|
||||
yield prev_literals[table_index]
|
||||
elif byte in range(0x23, 0x4b):
|
||||
# Backreference to a previous literal, 1-byte form.
|
||||
# This can reference literals with indices in range(0x28).
|
||||
table_index = byte - 0x23
|
||||
if debug:
|
||||
print(f"Backreference (1-byte form) to 0x{table_index:>02x}")
|
||||
yield prev_literals[table_index]
|
||||
elif byte in range(0x4b, 0xfe):
|
||||
# Reference into a fixed table of two-byte literals.
|
||||
# All compressed resources use the same table.
|
||||
table_index = byte - 0x4b
|
||||
if debug:
|
||||
print(f"Fixed table reference to 0x{table_index:>02x}")
|
||||
yield TABLE[table_index]
|
||||
elif byte == 0xfe:
|
||||
# Extended code, whose meaning is controlled by the following byte.
|
||||
|
||||
(kind,) = common.read_exact(stream, 1)
|
||||
if debug:
|
||||
print(f"Extended code: 0x{kind:>02x}")
|
||||
|
||||
if kind == 0x00:
|
||||
# Compact representation of (part of) a segment loader jump table, as used in 'CODE' (0) resources.
|
||||
|
||||
if debug:
|
||||
print(f"Segment loader jump table entries")
|
||||
|
||||
# All generated jump table entries have the same segment number.
|
||||
segment_number_int = common.read_variable_length_integer(stream)
|
||||
if debug:
|
||||
print(f"\t-> segment number: {segment_number_int:#x}")
|
||||
|
||||
# The tail part of all jump table entries (i. e. everything except for the address).
|
||||
entry_tail = b"?<" + segment_number_int.to_bytes(2, "big", signed=True) + b"\xa9\xf0"
|
||||
# The tail is output once *without* an address in front, i. e. the first entry's address must be generated manually by a previous code.
|
||||
yield entry_tail
|
||||
|
||||
count = common.read_variable_length_integer(stream)
|
||||
if count <= 0:
|
||||
raise common.DecompressError(f"Jump table entry count must be greater than 0, not {count}")
|
||||
|
||||
# The second entry's address is stored explicitly.
|
||||
current_int = common.read_variable_length_integer(stream)
|
||||
if debug:
|
||||
print(f"\t-> address of second entry: {current_int:#x}")
|
||||
yield current_int.to_bytes(2, "big", signed=False) + entry_tail
|
||||
|
||||
for _ in range(1, count):
|
||||
# All further entries' addresses are stored as differences relative to the previous entry's address.
|
||||
diff = common.read_variable_length_integer(stream)
|
||||
# For some reason, each difference is 6 higher than it should be.
|
||||
diff -= 6
|
||||
|
||||
# Simulate 16-bit integer wraparound.
|
||||
current_int = (current_int + diff) & 0xffff
|
||||
if debug:
|
||||
print(f"\t-> difference {diff:#x}: {current_int:#x}")
|
||||
yield current_int.to_bytes(2, "big", signed=False) + entry_tail
|
||||
elif kind in (0x02, 0x03):
|
||||
# Repeat 1 or 2 bytes a certain number of times.
|
||||
|
||||
if kind == 0x02:
|
||||
byte_count = 1
|
||||
elif kind == 0x03:
|
||||
byte_count = 2
|
||||
else:
|
||||
raise AssertionError()
|
||||
|
||||
if debug:
|
||||
print(f"Repeat {byte_count}-byte value")
|
||||
|
||||
# The byte(s) to repeat, stored as a variable-length integer. The value is treated as unsigned, i. e. the integer is never negative.
|
||||
to_repeat_int = common.read_variable_length_integer(stream)
|
||||
try:
|
||||
to_repeat = to_repeat_int.to_bytes(byte_count, "big", signed=False)
|
||||
except OverflowError:
|
||||
raise common.DecompressError(f"Value to repeat out of range for {byte_count}-byte repeat: {to_repeat_int:#x}")
|
||||
|
||||
count = common.read_variable_length_integer(stream) + 1
|
||||
if count <= 0:
|
||||
raise common.DecompressError(f"Repeat count must be positive: {count}")
|
||||
|
||||
if debug:
|
||||
print(f"\t-> {to_repeat} * {count}")
|
||||
yield to_repeat * count
|
||||
elif kind == 0x04:
|
||||
# A sequence of 16-bit signed integers, with each integer encoded as a difference relative to the previous integer. The first integer is stored explicitly.
|
||||
|
||||
if debug:
|
||||
print(f"Difference-encoded 16-bit integers")
|
||||
|
||||
# The first integer is stored explicitly, as a signed value.
|
||||
initial_int = common.read_variable_length_integer(stream)
|
||||
try:
|
||||
initial = initial_int.to_bytes(2, "big", signed=True)
|
||||
except OverflowError:
|
||||
raise common.DecompressError(f"Initial value out of range for 16-bit integer difference encoding: {initial_int:#x}")
|
||||
if debug:
|
||||
print(f"\t-> initial: 0x{initial_int:>04x}")
|
||||
yield initial
|
||||
|
||||
count = common.read_variable_length_integer(stream)
|
||||
if count < 0:
|
||||
raise common.DecompressError(f"Count cannot be negative: {count}")
|
||||
|
||||
# To make the following calculations simpler, the signed initial_int value is converted to unsigned.
|
||||
current_int = initial_int & 0xffff
|
||||
for _ in range(count):
|
||||
# The difference to the previous integer is stored as an 8-bit signed integer.
|
||||
# The usual variable-length integer format is *not* used here.
|
||||
diff = int.from_bytes(common.read_exact(stream, 1), "big", signed=True)
|
||||
|
||||
# Simulate 16-bit integer wraparound.
|
||||
current_int = (current_int + diff) & 0xffff
|
||||
if debug:
|
||||
print(f"\t-> difference {diff:#x}: 0x{current_int:>04x}")
|
||||
yield current_int.to_bytes(2, "big", signed=False)
|
||||
elif kind == 0x06:
|
||||
# A sequence of 32-bit signed integers, with each integer encoded as a difference relative to the previous integer. The first integer is stored explicitly.
|
||||
|
||||
if debug:
|
||||
print(f"Difference-encoded 32-bit integers")
|
||||
|
||||
# The first integer is stored explicitly, as a signed value.
|
||||
initial_int = common.read_variable_length_integer(stream)
|
||||
try:
|
||||
initial = initial_int.to_bytes(4, "big", signed=True)
|
||||
except OverflowError:
|
||||
raise common.DecompressError(f"Initial value out of range for 32-bit integer difference encoding: {initial_int:#x}")
|
||||
if debug:
|
||||
print(f"\t-> initial: 0x{initial_int:>08x}")
|
||||
yield initial
|
||||
|
||||
count = common.read_variable_length_integer(stream)
|
||||
assert count >= 0
|
||||
|
||||
# To make the following calculations simpler, the signed initial_int value is converted to unsigned.
|
||||
current_int = initial_int & 0xffffffff
|
||||
for _ in range(count):
|
||||
# The difference to the previous integer is stored as a variable-length integer, whose value may be negative.
|
||||
diff = common.read_variable_length_integer(stream)
|
||||
|
||||
# Simulate 32-bit integer wraparound.
|
||||
current_int = (current_int + diff) & 0xffffffff
|
||||
if debug:
|
||||
print(f"\t-> difference {diff:#x}: 0x{current_int:>08x}")
|
||||
yield current_int.to_bytes(4, "big", signed=False)
|
||||
else:
|
||||
raise common.DecompressError(f"Unknown extended code: 0x{kind:>02x}")
|
||||
elif byte == 0xff:
|
||||
# End of data marker, always occurs exactly once as the last byte of the compressed data.
|
||||
if debug:
|
||||
print("End marker")
|
||||
|
||||
# Check that there really is no more data left.
|
||||
extra = stream.read(1)
|
||||
if extra:
|
||||
raise common.DecompressError(f"Extra data encountered after end of data marker (first extra byte: {extra})")
|
||||
break
|
||||
else:
|
||||
raise common.DecompressError(f"Unknown tag byte: 0x{byte:>02x}")
|
||||
|
||||
def decompress_stream(header_info: common.CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Decompress compressed data in the format used by 'dcmp' (0)."""
|
||||
|
||||
decompressed_length = 0
|
||||
for chunk in decompress_stream_inner(header_info, stream, debug=debug):
|
||||
if debug:
|
||||
print(f"\t-> {chunk}")
|
||||
|
||||
if header_info.decompressed_length % 2 != 0 and decompressed_length + len(chunk) == header_info.decompressed_length + 1:
|
||||
# Special case: if the decompressed data length stored in the header is odd and one less than the length of the actual decompressed data, drop the last byte.
|
||||
# This is necessary because nearly all codes generate data in groups of 2 or 4 bytes, so it is basically impossible to represent data with an odd length using this compression format.
|
||||
decompressed_length += len(chunk) - 1
|
||||
yield chunk[:-1]
|
||||
else:
|
||||
decompressed_length += len(chunk)
|
||||
yield chunk
|
||||
|
||||
if debug:
|
||||
print(f"Decompressed {decompressed_length:#x} bytes so far")
|
144
rsrcfork/compress/dcmp1.py
Normal file
144
rsrcfork/compress/dcmp1.py
Normal file
@ -0,0 +1,144 @@
|
||||
import io
|
||||
import typing
|
||||
|
||||
from . import common
|
||||
|
||||
# Lookup table for codes in range(0xd5, 0xfe).
|
||||
# This table was obtained by decompressing a manually created compressed resource with the following contents:
|
||||
# b'\xa8\x9fer\x00\x12\x08\x01\x00\x00\x00R\x80\x03\x00\x01\x00\x00' + bytes(range(0xd5, 0xfe)) + b'\xff'
|
||||
TABLE_DATA = (
|
||||
# First line corresponds to codes in range(0xd5, 0xd8).
|
||||
b"\x00\x00\x00\x01\x00\x02"
|
||||
# All following lines correspond to 8 codes each.
|
||||
b"\x00\x03.\x01>\x01\x01\x01\x1e\x01\xff\xff\x0e\x011\x00"
|
||||
b"\x11\x12\x01\x0732\x129\xed\x10\x01'#\"\x017"
|
||||
b"\x07\x06\x01\x17\x01#\x00\xff\x00/\x07\x0e\xfd<\x015"
|
||||
b"\x01\x15\x01\x02\x00\x07\x00>\x05\xd5\x02\x01\x06\x07\x07\x08"
|
||||
# Last line corresponds to codes in range(0xf8, 0xfe).
|
||||
b"0\x01\x013\x00\x10\x17\x167>67"
|
||||
)
|
||||
# Note: index 0 in this table corresponds to code 0xd5, index 1 to 0xd6, etc.
|
||||
TABLE = [TABLE_DATA[i:i + 2] for i in range(0, len(TABLE_DATA), 2)]
|
||||
assert len(TABLE) == len(range(0xd5, 0xfe))
|
||||
|
||||
|
||||
def decompress_stream_inner(header_info: common.CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Internal helper function, implements the main decompression algorithm. Only called from decompress_stream, which performs some extra checks and debug logging."""
|
||||
|
||||
if not isinstance(header_info, common.CompressedType8HeaderInfo):
|
||||
raise common.DecompressError(f"Incorrect header type: {type(header_info).__qualname__}")
|
||||
|
||||
prev_literals: typing.List[bytes] = []
|
||||
|
||||
while True: # Loop is terminated when the EOF marker (0xff) is encountered
|
||||
(byte,) = common.read_exact(stream, 1)
|
||||
if debug:
|
||||
print(f"Tag byte 0x{byte:>02x}")
|
||||
|
||||
if byte in range(0x00, 0x20):
|
||||
# Literal byte sequence, 1-byte header.
|
||||
# The length of the literal data is stored in the low nibble of the tag byte.
|
||||
count = (byte >> 0 & 0xf) + 1
|
||||
# Controls whether or not the literal is stored so that it can be referenced again later.
|
||||
do_store = byte >= 0x10
|
||||
literal = common.read_exact(stream, count)
|
||||
if debug:
|
||||
print(f"Literal (1-byte header, storing: {do_store})")
|
||||
if do_store:
|
||||
if debug:
|
||||
print(f"\t-> storing as literal number 0x{len(prev_literals):x}")
|
||||
prev_literals.append(literal)
|
||||
yield literal
|
||||
elif byte in range(0x20, 0xd0):
|
||||
# Backreference to a previous literal, 1-byte form.
|
||||
# This can reference literals with indices in range(0xb0).
|
||||
table_index = byte - 0x20
|
||||
if debug:
|
||||
print(f"Backreference (1-byte form) to 0x{table_index:>02x}")
|
||||
yield prev_literals[table_index]
|
||||
elif byte in (0xd0, 0xd1):
|
||||
# Literal byte sequence, 2-byte header.
|
||||
# The length of the literal data is stored in the following byte.
|
||||
(count,) = common.read_exact(stream, 1)
|
||||
# Controls whether or not the literal is stored so that it can be referenced again later.
|
||||
do_store = byte == 0xd1
|
||||
literal = common.read_exact(stream, count)
|
||||
if debug:
|
||||
print(f"Literal (2-byte header, storing: {do_store})")
|
||||
if do_store:
|
||||
if debug:
|
||||
print(f"\t-> storing as literal number 0x{len(prev_literals):x}")
|
||||
prev_literals.append(literal)
|
||||
yield literal
|
||||
elif byte == 0xd2:
|
||||
# Backreference to a previous literal, 2-byte form.
|
||||
# This can reference literals with indices in range(0xb0, 0x1b0).
|
||||
(next_byte,) = common.read_exact(stream, 1)
|
||||
table_index = next_byte + 0xb0
|
||||
if debug:
|
||||
print(f"Backreference (2-byte form) to 0x{table_index:>02x}")
|
||||
yield prev_literals[table_index]
|
||||
elif byte in range(0xd5, 0xfe):
|
||||
# Reference into a fixed table of two-byte literals.
|
||||
# All compressed resources use the same table.
|
||||
table_index = byte - 0xd5
|
||||
if debug:
|
||||
print(f"Fixed table reference to 0x{table_index:>02x}")
|
||||
yield TABLE[table_index]
|
||||
elif byte == 0xfe:
|
||||
# Extended code, whose meaning is controlled by the following byte.
|
||||
|
||||
(kind,) = common.read_exact(stream, 1)
|
||||
if debug:
|
||||
print(f"Extended code: 0x{kind:>02x}")
|
||||
|
||||
if kind == 0x02:
|
||||
# Repeat 1 byte a certain number of times.
|
||||
|
||||
byte_count = 1 # Unlike with 'dcmp' (0) compression, there doesn't appear to be a 2-byte repeat (or if there is, it's never used in practice).
|
||||
|
||||
if debug:
|
||||
print(f"Repeat {byte_count}-byte value")
|
||||
|
||||
# The byte(s) to repeat, stored as a variable-length integer. The value is treated as unsigned, i. e. the integer is never negative.
|
||||
to_repeat_int = common.read_variable_length_integer(stream)
|
||||
try:
|
||||
to_repeat = to_repeat_int.to_bytes(byte_count, "big", signed=False)
|
||||
except OverflowError:
|
||||
raise common.DecompressError(f"Value to repeat out of range for {byte_count}-byte repeat: {to_repeat_int:#x}")
|
||||
|
||||
count = common.read_variable_length_integer(stream) + 1
|
||||
if count <= 0:
|
||||
raise common.DecompressError(f"Repeat count must be positive: {count}")
|
||||
|
||||
if debug:
|
||||
print(f"\t-> {to_repeat} * {count}")
|
||||
yield to_repeat * count
|
||||
else:
|
||||
raise common.DecompressError(f"Unknown extended code: 0x{kind:>02x}")
|
||||
elif byte == 0xff:
|
||||
# End of data marker, always occurs exactly once as the last byte of the compressed data.
|
||||
if debug:
|
||||
print("End marker")
|
||||
|
||||
# Check that there really is no more data left.
|
||||
extra = stream.read(1)
|
||||
if extra:
|
||||
raise common.DecompressError(f"Extra data encountered after end of data marker (first extra byte: {extra})")
|
||||
break
|
||||
else:
|
||||
raise common.DecompressError(f"Unknown tag byte: 0x{byte:>02x}")
|
||||
|
||||
def decompress_stream(header_info: common.CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Decompress compressed data in the format used by 'dcmp' (1)."""
|
||||
|
||||
decompressed_length = 0
|
||||
for chunk in decompress_stream_inner(header_info, stream, debug=debug):
|
||||
if debug:
|
||||
print(f"\t-> {chunk}")
|
||||
|
||||
decompressed_length += len(chunk)
|
||||
yield chunk
|
||||
|
||||
if debug:
|
||||
print(f"Decompressed {decompressed_length:#x} bytes so far")
|
177
rsrcfork/compress/dcmp2.py
Normal file
177
rsrcfork/compress/dcmp2.py
Normal file
@ -0,0 +1,177 @@
|
||||
import enum
|
||||
import io
|
||||
import struct
|
||||
import typing
|
||||
|
||||
from . import common
|
||||
|
||||
|
||||
# Parameters for a 'dcmp' (2)-compressed resource.
|
||||
# 2 bytes: Unknown meaning, doesn't appear to have any effect on the decompression algorithm. Usually zero, sometimes set to a small integer (< 10). On 'lpch' resources, the value is always nonzero, and sometimes larger than usual.
|
||||
# 1 byte: Number of entries in the custom lookup table minus one. Set to zero if the default lookup table is used.
|
||||
# 1 byte: Flags. See the ParameterFlags enum below for details.
|
||||
STRUCT_PARAMETERS = struct.Struct(">HBB")
|
||||
|
||||
# Default lookup table.
|
||||
# If the custom table flag is set, a custom table (usually with fewer than 256 entries) is used instead of this one.
|
||||
# This table was obtained by decompressing a manually created compressed resource with the following contents:
|
||||
# b'\xa8\x9fer\x00\x12\t\x01\x00\x00\x02\x00\x00\x02\x00\x00\x00\x00' + bytes(range(256))
|
||||
DEFAULT_TABLE_DATA = (
|
||||
b"\x00\x00\x00\x08N\xba nNu\x00\x0c\x00\x04p\x00"
|
||||
b"\x00\x10\x00\x02Hn\xff\xfc`\x00\x00\x01H\xe7/."
|
||||
b"NV\x00\x06N^/\x00a\x00\xff\xf8/\x0b\xff\xff"
|
||||
b"\x00\x14\x00\n\x00\x18 _\x00\x0e P?<\xff\xf4"
|
||||
b"L\xee0.g\x00L\xdf&n\x00\x12\x00\x1cBg"
|
||||
b"\xff\xf00</\x0c\x00\x03N\xd0\x00 p\x01\x00\x16"
|
||||
b"-@H\xc0 xr\x00X\x8ff\x00O\xefB\xa7"
|
||||
b"g\x06\xff\xfaU\x8f(n?\x00\xff\xfe/<g\x04"
|
||||
b"Y\x8f k\x00$ \x1fA\xfa\x81\xe1f\x04g\x08"
|
||||
b"\x00\x1aN\xb9P\x8f .\x00\x07N\xb0\xff\xf2=@"
|
||||
b"\x00\x1e hf\x06\xff\xf6N\xf9\x08\x00\x0c@=|"
|
||||
b"\xff\xec\x00\x05 <\xff\xe8\xde\xfcJ.\x000\x00("
|
||||
b"/\x08 \x0b`\x02Bn-H S @\x18\x00"
|
||||
b"`\x04A\xee/(/\x01g\nH@ \x07f\x08"
|
||||
b"\x01\x18/\x070(?.0+\"n/+\x00,"
|
||||
b"g\x0c\"_`\x06\x00\xff0\x07\xff\xeeS@\x00@"
|
||||
b"\xff\xe4J@f\n\x00\x0fN\xadp\xff\"\xd8Hk"
|
||||
b"\x00\" Kg\x0eJ\xaeN\x90\xff\xe0\xff\xc0\x00*"
|
||||
b"'@g\x02Q\xc8\x02\xb6Hz\"x\xb0n\xff\xe6"
|
||||
b"\x00\t2.>\x00HA\xff\xeaC\xeeNqt\x00"
|
||||
b"/, l\x00<\x00&\x00P\x18\x800\x1f\"\x00"
|
||||
b"f\x0c\xff\xda\x008f\x020, \x0c-nB@"
|
||||
b"\xff\xe2\xa9\xf0\xff\x007|\xe5\x80\xff\xdcHhYO"
|
||||
b"\x004>\x1f`\x08/\x06\xff\xde`\np\x02\x002"
|
||||
b"\xff\xcc\x00\x80\"Q\x10\x1f1|\xa0)\xff\xd8R@"
|
||||
b"\x01\x00g\x10\xa0#\xff\xce\xff\xd4 \x06Hx\x00."
|
||||
b"POC\xfag\x12v\x00A\xe8Jn \xd9\x00Z"
|
||||
b"\x7f\xffQ\xca\x00\\.\x00\x02@H\xc7g\x14\x0c\x80"
|
||||
b".\x9f\xff\xd6\x80\x00\x10\x00HBJk\xff\xd2\x00H"
|
||||
b"JGN\xd1 o\x00A`\x0c*xB.2\x00"
|
||||
b"etg\x16\x00DHm \x08Hl\x0b|&@"
|
||||
b"\x04\x00\x00h m\x00\r*@\x00\x0b\x00>\x02 "
|
||||
)
|
||||
DEFAULT_TABLE = [DEFAULT_TABLE_DATA[i:i + 2] for i in range(0, len(DEFAULT_TABLE_DATA), 2)]
|
||||
|
||||
|
||||
class ParameterFlags(enum.Flag):
|
||||
TAGGED = 1 << 1 # The compressed data is tagged, meaning that it consists of "blocks" of a tag byte followed by 8 table references and/or literals. See comments in the decompress function for details.
|
||||
CUSTOM_TABLE = 1 << 0 # A custom lookup table is included before the compressed data, which is used instead of the default table.
|
||||
|
||||
|
||||
def _split_bits(i: int) -> typing.Tuple[bool, bool, bool, bool, bool, bool, bool, bool]:
|
||||
"""Split a byte (an int) into its 8 bits (a tuple of 8 bools)."""
|
||||
|
||||
assert i in range(256)
|
||||
return (
|
||||
bool(i & (1 << 7)),
|
||||
bool(i & (1 << 6)),
|
||||
bool(i & (1 << 5)),
|
||||
bool(i & (1 << 4)),
|
||||
bool(i & (1 << 3)),
|
||||
bool(i & (1 << 2)),
|
||||
bool(i & (1 << 1)),
|
||||
bool(i & (1 << 0)),
|
||||
)
|
||||
|
||||
|
||||
def _decompress_untagged(stream: "common.PeekableIO", decompressed_length: int, table: typing.Sequence[bytes], *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
while True: # Loop is terminated when EOF is reached.
|
||||
table_index_data = stream.read(1)
|
||||
if not table_index_data:
|
||||
# End of compressed data.
|
||||
break
|
||||
elif not stream.peek(1) and decompressed_length % 2 != 0:
|
||||
# Special case: if we are at the last byte of the compressed data, and the decompressed data has an odd length, the last byte is a single literal byte, and not a table reference.
|
||||
if debug:
|
||||
print(f"Last byte: {table_index_data}")
|
||||
yield table_index_data
|
||||
break
|
||||
|
||||
# Compressed data is untagged, every byte is a table reference.
|
||||
(table_index,) = table_index_data
|
||||
if debug:
|
||||
print(f"Reference: {table_index} -> {table[table_index]}")
|
||||
yield table[table_index]
|
||||
|
||||
def _decompress_tagged(stream: "common.PeekableIO", decompressed_length: int, table: typing.Sequence[bytes], *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
while True: # Loop is terminated when EOF is reached.
|
||||
tag_data = stream.read(1)
|
||||
if not tag_data:
|
||||
# End of compressed data.
|
||||
break
|
||||
elif not stream.peek(1) and decompressed_length % 2 != 0:
|
||||
# Special case: if we are at the last byte of the compressed data, and the decompressed data has an odd length, the last byte is a single literal byte, and not a tag or a table reference.
|
||||
if debug:
|
||||
print(f"Last byte: {tag_data}")
|
||||
yield tag_data
|
||||
break
|
||||
|
||||
# Compressed data is tagged, each tag byte is followed by 8 table references and/or literals.
|
||||
(tag,) = tag_data
|
||||
if debug:
|
||||
print(f"Tag: 0b{tag:>08b}")
|
||||
for is_ref in _split_bits(tag):
|
||||
if is_ref:
|
||||
# This is a table reference (a single byte that is an index into the table).
|
||||
table_index_data = stream.read(1)
|
||||
if not table_index_data:
|
||||
# End of compressed data.
|
||||
break
|
||||
(table_index,) = table_index_data
|
||||
if debug:
|
||||
print(f"Reference: {table_index} -> {table[table_index]}")
|
||||
yield table[table_index]
|
||||
else:
|
||||
# This is a literal (two uncompressed bytes that are literally copied into the output).
|
||||
literal = stream.read(2)
|
||||
if not literal:
|
||||
# End of compressed data.
|
||||
break
|
||||
# Note: the literal may be only a single byte long if it is located exactly at EOF. This is intended and expected - the 1-byte literal is yielded normally, and on the next iteration, decompression is terminated as EOF is detected.
|
||||
if debug:
|
||||
print(f"Literal: {literal}")
|
||||
yield literal
|
||||
|
||||
|
||||
def decompress_stream(header_info: common.CompressedHeaderInfo, stream: typing.BinaryIO, *, debug: bool=False) -> typing.Iterator[bytes]:
|
||||
"""Decompress compressed data in the format used by 'dcmp' (2)."""
|
||||
|
||||
if not isinstance(header_info, common.CompressedType9HeaderInfo):
|
||||
raise common.DecompressError(f"Incorrect header type: {type(header_info).__qualname__}")
|
||||
|
||||
unknown, table_count_m1, flags_raw = STRUCT_PARAMETERS.unpack(header_info.parameters)
|
||||
|
||||
if debug:
|
||||
print(f"Value of unknown parameter field: 0x{unknown:>04x}")
|
||||
|
||||
table_count = table_count_m1 + 1
|
||||
if debug:
|
||||
print(f"Table has {table_count} entries")
|
||||
|
||||
try:
|
||||
flags = ParameterFlags(flags_raw)
|
||||
except ValueError:
|
||||
raise common.DecompressError(f"Unsupported flags set: 0b{flags_raw:>08b}, currently only bits 0 and 1 are supported")
|
||||
|
||||
if debug:
|
||||
print(f"Flags: {flags}")
|
||||
|
||||
if ParameterFlags.CUSTOM_TABLE in flags:
|
||||
table = []
|
||||
for _ in range(table_count):
|
||||
table.append(common.read_exact(stream, 2))
|
||||
if debug:
|
||||
print(f"Using custom table: {table}")
|
||||
else:
|
||||
if table_count_m1 != 0:
|
||||
raise common.DecompressError(f"table_count_m1 field is {table_count_m1}, but must be zero when the default table is used")
|
||||
table = DEFAULT_TABLE
|
||||
if debug:
|
||||
print("Using default table")
|
||||
|
||||
if ParameterFlags.TAGGED in flags:
|
||||
decompress_func = _decompress_tagged
|
||||
else:
|
||||
decompress_func = _decompress_untagged
|
||||
|
||||
yield from decompress_func(common.make_peekable(stream), header_info.decompressed_length, table, debug=debug)
|
29
setup.cfg
29
setup.cfg
@ -6,17 +6,21 @@ author = dgelessus
|
||||
classifiers =
|
||||
Development Status :: 4 - Beta
|
||||
Intended Audience :: Developers
|
||||
Topic :: Software Development :: Libraries :: Python Modules
|
||||
Topic :: Software Development :: Disassemblers
|
||||
Topic :: System
|
||||
Topic :: Utilities
|
||||
License :: OSI Approved :: MIT License
|
||||
Operating System :: MacOS :: MacOS 9
|
||||
Operating System :: MacOS :: MacOS X
|
||||
Operating System :: OS Independent
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3 :: Only
|
||||
Programming Language :: Python :: 3.6
|
||||
Programming Language :: Python :: 3.7
|
||||
license = MIT
|
||||
license_file = LICENSE
|
||||
description = A pure Python library for reading old Macintosh resource manager data
|
||||
description = A pure Python, cross-platform library/tool for reading Macintosh resource data, as stored in resource forks and ``.rsrc`` files
|
||||
long_description = file: README.rst
|
||||
long_description_content_type = text/x-rst
|
||||
keywords =
|
||||
@ -32,9 +36,28 @@ keywords =
|
||||
setup_requires =
|
||||
setuptools>=39.2.0
|
||||
python_requires = >=3.6
|
||||
packages =
|
||||
packages = find:
|
||||
|
||||
[options.packages.find]
|
||||
include =
|
||||
rsrcfork
|
||||
rsrcfork.*
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts =
|
||||
rsrcfork = rsrcfork.__main__:main
|
||||
|
||||
[mypy]
|
||||
files=rsrcfork/**/*.py
|
||||
python_version = 3.6
|
||||
|
||||
disallow_untyped_calls = True
|
||||
disallow_untyped_defs = True
|
||||
disallow_untyped_decorators = True
|
||||
|
||||
no_implicit_optional = True
|
||||
|
||||
warn_unused_ignores = True
|
||||
warn_unreachable = True
|
||||
|
||||
warn_redundant_casts = True
|
||||
|
Reference in New Issue
Block a user