mirror of
https://github.com/AppleCommander/AppleCommander.git
synced 2025-01-22 00:32:08 +00:00
Integrate ShrinkItArchive into AppleCommander
Add ability to decompress and use SDK disk images tighten up image ordering detection - rely less on file names
This commit is contained in:
parent
a01a54a522
commit
9b985c140e
@ -1,12 +0,0 @@
|
||||
#Thu Jun 05 04:48:15 EDT 2008
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.2
|
||||
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
|
||||
org.eclipse.jdt.core.compiler.compliance=1.4
|
||||
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
|
||||
org.eclipse.jdt.core.compiler.debug.localVariable=generate
|
||||
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
|
||||
org.eclipse.jdt.core.compiler.problem.assertIdentifier=warning
|
||||
org.eclipse.jdt.core.compiler.problem.enumIdentifier=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.3
|
@ -84,7 +84,7 @@
|
||||
</target>
|
||||
|
||||
<target name="executableGuiJar" depends="init" description="Build GUI executable JAR">
|
||||
<javac srcdir="${source}" destdir="${classes}" target="1.3" source="1.3" classpath="${swtjar}">
|
||||
<javac srcdir="${source}" destdir="${classes}" target="1.5" source="1.5" classpath="${swtjar}">
|
||||
<include name="**/*.java"/>
|
||||
<exclude name="**/*Test.java"/>
|
||||
</javac>
|
||||
|
@ -96,7 +96,7 @@ public class Disk {
|
||||
private boolean newImage = false;
|
||||
private ByteArrayImageLayout diskImageManager;
|
||||
private ImageOrder imageOrder;
|
||||
|
||||
|
||||
/**
|
||||
* Get the supported file filters supported by the Disk interface.
|
||||
* This is due to the fact that FilenameFilter is an innerclass of Disk -
|
||||
@ -108,7 +108,7 @@ public class Disk {
|
||||
}
|
||||
return filenameFilters;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the supported file extensions supported by the Disk interface.
|
||||
* This is used by the Swing UI to populate the open file dialog box.
|
||||
@ -138,7 +138,7 @@ public class Disk {
|
||||
new FilenameFilter(textBundle.get("Disk.ApplePcImages"), //$NON-NLS-1$
|
||||
"*.hdv"), //$NON-NLS-1$
|
||||
new FilenameFilter(textBundle.get("Disk.CompressedImages"), //$NON-NLS-1$
|
||||
"*.do.gz; *.dsk.gz; *.po.gz; *.2mg.gz; *.2img.gz"), //$NON-NLS-1$
|
||||
".sdk; *.do.gz; *.dsk.gz; *.po.gz; *.2mg.gz; *.2img.gz"), //$NON-NLS-1$
|
||||
new FilenameFilter(textBundle.get("Disk.AllFiles"), //$NON-NLS-1$
|
||||
"*.*") //$NON-NLS-1$
|
||||
};
|
||||
@ -147,6 +147,7 @@ public class Disk {
|
||||
".dsk", //$NON-NLS-1$
|
||||
".po", //$NON-NLS-1$
|
||||
".nib", //$NON-NLS-1$
|
||||
".sdk", //$NON-NLS-1$
|
||||
".2mg", //$NON-NLS-1$
|
||||
".2img", //$NON-NLS-1$
|
||||
".hdv", //$NON-NLS-1$
|
||||
@ -158,7 +159,7 @@ public class Disk {
|
||||
".2img.gz" //$NON-NLS-1$
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Construct a Disk with the given byte array.
|
||||
*/
|
||||
@ -167,7 +168,7 @@ public class Disk {
|
||||
this.filename = filename;
|
||||
this.newImage = true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Construct a Disk and load the specified file.
|
||||
* Read in the entire contents of the file.
|
||||
@ -175,30 +176,95 @@ public class Disk {
|
||||
public Disk(String filename) throws IOException {
|
||||
this.filename = filename;
|
||||
File file = new File(filename);
|
||||
InputStream input = new FileInputStream(file);
|
||||
if (isCompressed()) {
|
||||
input = new GZIPInputStream(input);
|
||||
int diskSize = 0;
|
||||
byte[] diskImage = null;
|
||||
if (isSDK()) {
|
||||
// If we have an SDK, unpack it and branch around all this nonsense
|
||||
diskImage = com.webcodepro.shrinkit.Utilities.unpackSDKFile(filename);
|
||||
diskSize = diskImage.length;
|
||||
} else {
|
||||
diskSize = (int) file.length();
|
||||
InputStream input = new FileInputStream(file);
|
||||
if (isCompressed()) {
|
||||
input = new GZIPInputStream(input);
|
||||
}
|
||||
ByteArrayOutputStream diskImageByteArray = new ByteArrayOutputStream(diskSize);
|
||||
StreamUtil.copy(input, diskImageByteArray);
|
||||
diskImage = diskImageByteArray.toByteArray();
|
||||
}
|
||||
int diskSize = (int) file.length();
|
||||
ByteArrayOutputStream diskImageByteArray =
|
||||
new ByteArrayOutputStream(diskSize);
|
||||
StreamUtil.copy(input, diskImageByteArray);
|
||||
byte[] diskImage = diskImageByteArray.toByteArray();
|
||||
if ((diskImage[00] == '2') && (diskImage[01] == 'I') &&
|
||||
(diskImage[02] == 'M') && (diskImage[03] == 'G')) {
|
||||
boolean is2img = false;
|
||||
/* Does it have the 2IMG header? */
|
||||
if ((diskImage[00] == 0x32) && (diskImage[01] == 0x49) && (diskImage[02] == 0x4D) && (diskImage[03]) == 0x47)
|
||||
is2img = true;
|
||||
int offset = UniversalDiskImageLayout.OFFSET;
|
||||
if (is2img == true || diskImage.length == APPLE_800KB_DISK + offset || diskImage.length == APPLE_5MB_HARDDISK + offset || diskImage.length == APPLE_10MB_HARDDISK + offset || diskImage.length == APPLE_20MB_HARDDISK + offset || diskImage.length == APPLE_32MB_HARDDISK + offset) {
|
||||
diskImageManager = new UniversalDiskImageLayout(diskImage);
|
||||
} else {
|
||||
diskImageManager = new ByteArrayImageLayout(diskImage);
|
||||
}
|
||||
if (isProdosOrder()) {
|
||||
imageOrder = new ProdosOrder(diskImageManager);
|
||||
} else if (isDosOrder()) {
|
||||
imageOrder = new DosOrder(diskImageManager);
|
||||
} else if (isNibbleOrder()) {
|
||||
imageOrder = new NibbleOrder(diskImageManager);
|
||||
|
||||
ImageOrder dosOrder = new DosOrder(diskImageManager);
|
||||
ImageOrder proDosOrder = new ProdosOrder(diskImageManager);
|
||||
|
||||
/*
|
||||
* First step: test physical disk orders for viable file systems.
|
||||
*/
|
||||
int rc = -1;
|
||||
if (diskSize == APPLE_140KB_DISK) {
|
||||
// First, test the really-really likely orders/formats for
|
||||
// 5-1/4" disks.
|
||||
imageOrder = dosOrder;
|
||||
if (isProdosFormat() || isDosFormat()) {
|
||||
rc = 0;
|
||||
} else {
|
||||
imageOrder = proDosOrder;
|
||||
if (isProdosFormat() || isDosFormat()) {
|
||||
rc = 0;
|
||||
}
|
||||
}
|
||||
if (rc == -1) {
|
||||
/*
|
||||
* Ok, it's not one of those. Now, let's go back to DOS
|
||||
* order, and see if we recognize other things. If not,
|
||||
* we'll fall through to other processing later.
|
||||
*/
|
||||
imageOrder = dosOrder;
|
||||
rc = testImageOrder();
|
||||
}
|
||||
}
|
||||
if (rc == -1) {
|
||||
imageOrder = proDosOrder;
|
||||
rc = testImageOrder();
|
||||
if (rc == -1) {
|
||||
/*
|
||||
* Couldn't find anything recognizable. Final step:
|
||||
* just punt and start testing filenames.
|
||||
*/
|
||||
if (isProdosOrder() || is2ImgOrder()) {
|
||||
imageOrder = proDosOrder;
|
||||
} else if (isDosOrder()) {
|
||||
imageOrder = dosOrder;
|
||||
} else if (isNibbleOrder()) {
|
||||
imageOrder = new NibbleOrder(diskImageManager);
|
||||
} else {
|
||||
imageOrder = proDosOrder;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the image order to see if we can recognize a file system. Returns: 0
|
||||
* on recognition; -1 on failure.
|
||||
*/
|
||||
public int testImageOrder()
|
||||
{
|
||||
int rc = (true == isProdosFormat() ? 1 : 0) + (true == isDosFormat() ? 2 : 0) + (true == isCpmFormat() ? 4 : 0) + (true == isUniDosFormat() ? 8 : 0) + (true == isPascalFormat() ? 16 : 0) + (true == isOzDosFormat() ? 32 : 0);
|
||||
if (rc == 0)
|
||||
rc = -1;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a Disk image to its file.
|
||||
*/
|
||||
@ -294,6 +360,14 @@ public class Disk {
|
||||
this.filename = filename;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the name of the underlying image order.
|
||||
* @return String
|
||||
*/
|
||||
public String getOrderName() {
|
||||
return (imageOrder == null) ? textBundle.get("FormattedDisk.Unknown") : imageOrder.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate if this disk is GZIP compressed.
|
||||
*/
|
||||
@ -301,6 +375,14 @@ public class Disk {
|
||||
return filename.toLowerCase().endsWith(".gz"); //$NON-NLS-1$
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate if this disk is a ShrinkIt-compressed disk image.
|
||||
*/
|
||||
public boolean isSDK()
|
||||
{
|
||||
return filename.toLowerCase().endsWith(".sdk"); //$NON-NLS-1$
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate if this disk is ProDOS ordered (beginning with block 0).
|
||||
*/
|
||||
@ -634,8 +716,8 @@ public class Disk {
|
||||
}
|
||||
|
||||
/**
|
||||
* Change to a different ImageOrder. Remains in DOS 3.3 format but the
|
||||
* underlying order can chage.
|
||||
* Change underlying image order to DOS ImageOrder.
|
||||
* Assumes this is a 140k disk image.
|
||||
*
|
||||
* @see ImageOrder
|
||||
*/
|
||||
|
@ -195,10 +195,7 @@ public abstract class FormattedDisk extends Disk implements DirectoryEntry {
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.PhysicalSizeInKb"), getPhysicalSize() / 1024)); //$NON-NLS-1$
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.FreeSpaceInKb"), getFreeSpace() / 1024)); //$NON-NLS-1$
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.UsedSpaceInKb"), getUsedSpace() / 1024)); //$NON-NLS-1$
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.ArchiveOrder"), //$NON-NLS-1$
|
||||
is2ImgOrder() ? textBundle.get("FormattedDisk.2Img") : //$NON-NLS-1$
|
||||
isDosOrder() ? textBundle.get("Dos33") : //$NON-NLS-1$
|
||||
isProdosOrder() ? textBundle.get("Prodos") : textBundle.get("FormattedDisk.Unknown"))); //$NON-NLS-1$ //$NON-NLS-2$
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.ArchiveOrder"), getOrderName())); //$NON-NLS-1$
|
||||
list.add(new DiskInformation(textBundle.get("FormattedDisk.DiskFormat"), getFormat())); //$NON-NLS-1$
|
||||
return list;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import com.webcodepro.applecommander.util.TextBundle;
|
||||
|
||||
/**
|
||||
* Manage the domain-specific ResourceBundle. Inheritance is used to
|
||||
* allow logical groupings of ResourceBundles and to reuse the common metods.
|
||||
* allow logical groupings of ResourceBundles and to reuse the common methods.
|
||||
*
|
||||
* @author Rob Greene
|
||||
*/
|
||||
|
@ -25,7 +25,7 @@ NakedOS=NakedOS
|
||||
LockedQ=Locked?
|
||||
DirectoryCreationNotSupported=Unable to create directories.
|
||||
Gutenberg=Gutenberg
|
||||
|
||||
NotAFile='{1}' is not a file.
|
||||
##### FIX #####
|
||||
###############
|
||||
|
||||
@ -56,9 +56,13 @@ FormattedDisk.Unknown=Unknown
|
||||
FormattedDisk.DiskFormat=Disk Format
|
||||
|
||||
# DosOrder
|
||||
DosOrder.OrderName=DOS
|
||||
DosOrder.UnrecognizedFormatError=Unrecognized DOS format\!
|
||||
DosOrder.InvalidSizeError=The track ({0}) and sector ({1}) do not match the disk image size.
|
||||
|
||||
# ProdosOrder
|
||||
ProdosOrder.OrderName=ProDOS
|
||||
|
||||
# NibbleOrder
|
||||
NibbleOrder.InvalidPhysicalSectorError=Unable to locate physical sector {0} on track {1} (\#{2})
|
||||
|
||||
|
@ -122,4 +122,11 @@ public class DosOrder extends ImageOrder {
|
||||
System.arraycopy(data, Disk.SECTOR_SIZE, sectorData, 0, Disk.SECTOR_SIZE);
|
||||
writeSector(track, sector2, sectorData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the name of this image order.
|
||||
*/
|
||||
public String getName() {
|
||||
return textBundle.get("DosOrder.OrderName"); //$NON-NLS-1$
|
||||
}
|
||||
}
|
||||
|
@ -108,6 +108,11 @@ public abstract class ImageOrder {
|
||||
return getPhysicalSize() / Disk.BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the name of this image order.
|
||||
*/
|
||||
public abstract String getName();
|
||||
|
||||
/**
|
||||
* Read the block from the disk image.
|
||||
*/
|
||||
|
@ -20,6 +20,8 @@
|
||||
package com.webcodepro.applecommander.storage.physical;
|
||||
|
||||
import com.webcodepro.applecommander.storage.Disk;
|
||||
import com.webcodepro.applecommander.storage.StorageBundle;
|
||||
import com.webcodepro.applecommander.util.TextBundle;
|
||||
|
||||
/**
|
||||
* Supports disk images stored in ProDOS physical order.
|
||||
@ -27,6 +29,7 @@ import com.webcodepro.applecommander.storage.Disk;
|
||||
* @author Rob Greene (RobGreene@users.sourceforge.net)
|
||||
*/
|
||||
public class ProdosOrder extends ImageOrder {
|
||||
private TextBundle textBundle = StorageBundle.getInstance();
|
||||
/**
|
||||
* This table contains the block offset for a particular DOS sector.
|
||||
*/
|
||||
@ -101,4 +104,11 @@ public class ProdosOrder extends ImageOrder {
|
||||
System.arraycopy(bytes, 0, blockData, offset * Disk.SECTOR_SIZE, bytes.length);
|
||||
writeBlock(block, blockData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the name of this image order.
|
||||
*/
|
||||
public String getName() {
|
||||
return textBundle.get("ProdosOrder.OrderName"); //$NON-NLS-1$
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ import com.webcodepro.applecommander.util.TextBundle;
|
||||
* @author Rob Greene
|
||||
*/
|
||||
public class AppleCommander {
|
||||
public static final String VERSION = "1.3.5.11"; //$NON-NLS-1$
|
||||
public static final String VERSION = "1.3.5.12"; //$NON-NLS-1$
|
||||
private static TextBundle textBundle = UiBundle.getInstance();
|
||||
/**
|
||||
* Launch AppleCommander.
|
||||
|
91
src/com/webcodepro/shrinkit/CRC16.java
Normal file
91
src/com/webcodepro/shrinkit/CRC16.java
Normal file
@ -0,0 +1,91 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
/**
|
||||
* Crc16: Calculate 16-bit Cyclic Redundancy Check.
|
||||
* License: GPL, incorporated by reference.
|
||||
*
|
||||
* @author John B. Matthews
|
||||
*/
|
||||
public class CRC16 implements Checksum {
|
||||
|
||||
/** CCITT polynomial: x^16 + x^12 + x^5 + 1 -> 0x1021 (1000000100001) */
|
||||
private static final int poly = 0x1021;
|
||||
private static final int[] table = new int[256];
|
||||
private int value = 0;
|
||||
|
||||
static { // initialize static lookup table
|
||||
for (int i = 0; i < 256; i++) {
|
||||
int crc = i << 8;
|
||||
for (int j = 0; j < 8; j++) {
|
||||
if ((crc & 0x8000) == 0x8000) {
|
||||
crc = (crc << 1) ^ poly;
|
||||
} else {
|
||||
crc = (crc << 1);
|
||||
}
|
||||
}
|
||||
table[i] = crc & 0xffff;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update 16-bit CRC.
|
||||
*
|
||||
* @param crc starting CRC value
|
||||
* @param bytes input byte array
|
||||
* @param off start offset to data
|
||||
* @param len number of bytes to process
|
||||
* @return 16-bit unsigned CRC
|
||||
*/
|
||||
private int update(int crc, byte[] bytes, int off, int len) {
|
||||
for (int i = off; i < (off + len); i++) {
|
||||
int b = (bytes[i] & 0xff);
|
||||
crc = (table[((crc >> 8) & 0xff) ^ b] ^ (crc << 8)) & 0xffff;
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
|
||||
public static int[] getTable() {
|
||||
return table;
|
||||
}
|
||||
|
||||
public long getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
value = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update 16-bit CRC.
|
||||
*
|
||||
* @param b input byte
|
||||
*/
|
||||
public void update(int b) {
|
||||
byte[] ba = { (byte) (b & 0xff) };
|
||||
value = update(value, ba, 0, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update 16-bit CRC.
|
||||
*
|
||||
* @param b input byte array
|
||||
*/
|
||||
public void update(byte[] b) {
|
||||
value = update(value, b, 0, b.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update 16-bit CRC.
|
||||
*
|
||||
* @param b input byte array
|
||||
* @param off starting offset to data
|
||||
* @param len number of bytes to process
|
||||
*/
|
||||
public void update(byte[] b, int off, int len) {
|
||||
value = update(value, b, off, len);
|
||||
}
|
||||
|
||||
}
|
243
src/com/webcodepro/shrinkit/HeaderBlock.java
Normal file
243
src/com/webcodepro/shrinkit/HeaderBlock.java
Normal file
@ -0,0 +1,243 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import com.webcodepro.shrinkit.io.LittleEndianByteInputStream;
|
||||
|
||||
/**
|
||||
* The Header Block contains information and content
|
||||
* about a single entry (be it a file or disk image).
|
||||
* <p>
|
||||
* Note that we need to support multiple versions of the NuFX
|
||||
* archive format. Some details may be invalid, depending on
|
||||
* version, and those are documented in the getter methods.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
* @see http://www.nulib.com/library/FTN.e08002.htm
|
||||
*/
|
||||
public class HeaderBlock {
|
||||
private int headerCrc;
|
||||
private int attribCount;
|
||||
private int versionNumber;
|
||||
private long totalThreads;
|
||||
private int fileSysId;
|
||||
private int fileSysInfo;
|
||||
private long access;
|
||||
private long fileType;
|
||||
private long extraType;
|
||||
private int storageType;
|
||||
private Date createWhen;
|
||||
private Date modWhen;
|
||||
private Date archiveWhen;
|
||||
private int optionSize;
|
||||
private byte[] optionListBytes;
|
||||
private byte[] attribBytes;
|
||||
private String filename;
|
||||
private String rawFilename;
|
||||
private List<ThreadRecord> threads = new ArrayList<ThreadRecord>();
|
||||
|
||||
/**
|
||||
* Create the Header Block. This is done dynamically since
|
||||
* the Header Block size varies significantly.
|
||||
*/
|
||||
public HeaderBlock(LittleEndianByteInputStream bs) throws IOException {
|
||||
bs.checkNuFxId();
|
||||
headerCrc = bs.readWord();
|
||||
attribCount = bs.readWord();
|
||||
versionNumber = bs.readWord();
|
||||
totalThreads = bs.readLong();
|
||||
fileSysId = bs.readWord();
|
||||
fileSysInfo = bs.readWord();
|
||||
access = bs.readLong();
|
||||
fileType = bs.readLong();
|
||||
extraType = bs.readLong();
|
||||
storageType = bs.readWord();
|
||||
createWhen = bs.readDate();
|
||||
modWhen = bs.readDate();
|
||||
archiveWhen = bs.readDate();
|
||||
// Read the mysterious option_list
|
||||
if (versionNumber >= 1) {
|
||||
optionSize = bs.readWord();
|
||||
if (optionSize > 0) {
|
||||
optionListBytes = bs.readBytes(optionSize-2);
|
||||
}
|
||||
}
|
||||
// Compute attribute bytes that exist and read (if needed)
|
||||
int sizeofAttrib = attribCount - 58;
|
||||
if (versionNumber >= 1) {
|
||||
if (optionSize == 0) sizeofAttrib -= 2;
|
||||
else sizeofAttrib -= optionSize;
|
||||
}
|
||||
if (sizeofAttrib > 0) {
|
||||
attribBytes = bs.readBytes(sizeofAttrib);
|
||||
}
|
||||
// Read the (defunct) filename
|
||||
int length = bs.readWord();
|
||||
if (length > 0) {
|
||||
rawFilename = new String(bs.readBytes(length));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Read in all data threads. All ThreadRecords are read and then
|
||||
* each thread's data is read (per NuFX spec).
|
||||
*/
|
||||
public void readThreads(LittleEndianByteInputStream bs) throws IOException {
|
||||
for (long l=0; l<totalThreads; l++) threads.add(new ThreadRecord(this, bs));
|
||||
for (ThreadRecord r : threads) r.readThreadData(bs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate the filename and return it. It may have been given in the old
|
||||
* location, in which case, it is in the String filename. Otherwise it will
|
||||
* be in the filename thread. If it is in the thread, we shove it in the
|
||||
* filename variable just so we don't need to search for it later. This
|
||||
* should not be a problem, because if we write the file, we'll write the
|
||||
* more current version anyway.
|
||||
*/
|
||||
public String getFilename() {
|
||||
if (filename == null) {
|
||||
ThreadRecord r = findThreadRecord(ThreadKind.FILENAME);
|
||||
if (r != null) filename = r.getText();
|
||||
if (filename == null) filename = rawFilename;
|
||||
}
|
||||
return filename;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data fork.
|
||||
*/
|
||||
public ThreadRecord getDataForkInputStream() throws IOException {
|
||||
return findThreadRecord(ThreadKind.DATA_FORK);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the resource fork.
|
||||
*/
|
||||
public ThreadRecord getResourceForkInputStream() throws IOException {
|
||||
return findThreadRecord(ThreadKind.RESOURCE_FORK);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a ThreadRecord by it's ThreadKind.
|
||||
*/
|
||||
protected ThreadRecord findThreadRecord(ThreadKind tk) {
|
||||
for (ThreadRecord r : threads) {
|
||||
if (r.getThreadKind() == tk) return r;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// GENERATED CODE
|
||||
|
||||
public int getHeaderCrc() {
|
||||
return headerCrc;
|
||||
}
|
||||
public void setHeaderCrc(int headerCrc) {
|
||||
this.headerCrc = headerCrc;
|
||||
}
|
||||
public int getAttribCount() {
|
||||
return attribCount;
|
||||
}
|
||||
public void setAttribCount(int attribCount) {
|
||||
this.attribCount = attribCount;
|
||||
}
|
||||
public int getVersionNumber() {
|
||||
return versionNumber;
|
||||
}
|
||||
public void setVersionNumber(int versionNumber) {
|
||||
this.versionNumber = versionNumber;
|
||||
}
|
||||
public long getTotalThreads() {
|
||||
return totalThreads;
|
||||
}
|
||||
public void setTotalThreads(long totalThreads) {
|
||||
this.totalThreads = totalThreads;
|
||||
}
|
||||
public int getFileSysId() {
|
||||
return fileSysId;
|
||||
}
|
||||
public void setFileSysId(int fileSysId) {
|
||||
this.fileSysId = fileSysId;
|
||||
}
|
||||
public int getFileSysInfo() {
|
||||
return fileSysInfo;
|
||||
}
|
||||
public void setFileSysInfo(int fileSysInfo) {
|
||||
this.fileSysInfo = fileSysInfo;
|
||||
}
|
||||
public long getAccess() {
|
||||
return access;
|
||||
}
|
||||
public void setAccess(long access) {
|
||||
this.access = access;
|
||||
}
|
||||
public long getFileType() {
|
||||
return fileType;
|
||||
}
|
||||
public void setFileType(long fileType) {
|
||||
this.fileType = fileType;
|
||||
}
|
||||
public long getExtraType() {
|
||||
return extraType;
|
||||
}
|
||||
public void setExtraType(long extraType) {
|
||||
this.extraType = extraType;
|
||||
}
|
||||
public int getStorageType() {
|
||||
return storageType;
|
||||
}
|
||||
public void setStorageType(int storageType) {
|
||||
this.storageType = storageType;
|
||||
}
|
||||
public Date getCreateWhen() {
|
||||
return createWhen;
|
||||
}
|
||||
public void setCreateWhen(Date createWhen) {
|
||||
this.createWhen = createWhen;
|
||||
}
|
||||
public Date getModWhen() {
|
||||
return modWhen;
|
||||
}
|
||||
public void setModWhen(Date modWhen) {
|
||||
this.modWhen = modWhen;
|
||||
}
|
||||
public Date getArchiveWhen() {
|
||||
return archiveWhen;
|
||||
}
|
||||
public void setArchiveWhen(Date archiveWhen) {
|
||||
this.archiveWhen = archiveWhen;
|
||||
}
|
||||
public int getOptionSize() {
|
||||
return optionSize;
|
||||
}
|
||||
public void setOptionSize(int optionSize) {
|
||||
this.optionSize = optionSize;
|
||||
}
|
||||
public byte[] getOptionListBytes() {
|
||||
return optionListBytes;
|
||||
}
|
||||
public void setOptionListBytes(byte[] optionListBytes) {
|
||||
this.optionListBytes = optionListBytes;
|
||||
}
|
||||
public byte[] getAttribBytes() {
|
||||
return attribBytes;
|
||||
}
|
||||
public void setAttribBytes(byte[] attribBytes) {
|
||||
this.attribBytes = attribBytes;
|
||||
}
|
||||
public void setFilename(String filename) {
|
||||
this.filename = filename;
|
||||
}
|
||||
public String getRawFilename() {
|
||||
return rawFilename;
|
||||
}
|
||||
public List<ThreadRecord> getThreadRecords() {
|
||||
return threads;
|
||||
}
|
||||
public void setThreadRecords(List<ThreadRecord> threads) {
|
||||
this.threads = threads;
|
||||
}
|
||||
}
|
126
src/com/webcodepro/shrinkit/MasterHeaderBlock.java
Normal file
126
src/com/webcodepro/shrinkit/MasterHeaderBlock.java
Normal file
@ -0,0 +1,126 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
|
||||
import com.webcodepro.shrinkit.io.LittleEndianByteInputStream;
|
||||
|
||||
/**
|
||||
* The Master Header Block contains information about the entire
|
||||
* ShrinkIt archive.
|
||||
* <p>
|
||||
* Note that we need to support multiple versions of the NuFX
|
||||
* archive format. Some details may be invalid, depending on
|
||||
* version, and those are documented in the getter methods.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
* @see http://www.nulib.com/library/FTN.e08002.htm
|
||||
*/
|
||||
public class MasterHeaderBlock {
|
||||
private static final int MASTER_HEADER_LENGTH = 48;
|
||||
private int masterCrc;
|
||||
private boolean validCrc;
|
||||
private long totalRecords;
|
||||
private Date archiveCreateWhen;
|
||||
private Date archiveModWhen;
|
||||
private int masterVersion;
|
||||
private long masterEof;
|
||||
private byte[] nuFileId = {0,0,0,0,0,0};
|
||||
|
||||
/**
|
||||
* Create the Master Header Block, based on the LittleEndianByteInputStream.
|
||||
*/
|
||||
public MasterHeaderBlock(LittleEndianByteInputStream bs) throws IOException {
|
||||
int headerOffset = 0;
|
||||
nuFileId = bs.readBytes(6);
|
||||
|
||||
if (checkId(nuFileId,BXY_ID)) {
|
||||
bs.readBytes(127 - NUFILE_ID.length);
|
||||
headerOffset = 128;
|
||||
int count = bs.read();
|
||||
if (count != 0)
|
||||
throw new IOException("This is actually a Binary II archive with multiple files in it.");
|
||||
nuFileId = bs.readBytes(6);
|
||||
}
|
||||
if (!checkId(nuFileId,NUFILE_ID)) {
|
||||
throw new IOException("Unable to decode this archive.");
|
||||
}
|
||||
masterCrc = bs.readWord();
|
||||
bs.resetCrc(); // CRC is computed from this point to the end of the header
|
||||
totalRecords = bs.readLong();
|
||||
archiveCreateWhen = bs.readDate();
|
||||
archiveModWhen = bs.readDate();
|
||||
masterVersion = bs.readWord();
|
||||
if (masterVersion > 0) {
|
||||
bs.readBytes(8); // documented to be null, but we don't care
|
||||
masterEof = bs.readLong();
|
||||
} else {
|
||||
masterEof = -1;
|
||||
}
|
||||
// Read whatever remains of the fixed size header
|
||||
while (bs.getTotalBytesRead() < MASTER_HEADER_LENGTH + headerOffset) {
|
||||
bs.readByte();
|
||||
}
|
||||
validCrc = (masterCrc == bs.getCrcValue());
|
||||
}
|
||||
|
||||
// GENERATED CODE
|
||||
|
||||
public int getMasterCrc() {
|
||||
return masterCrc;
|
||||
}
|
||||
public void setMasterCrc(int masterCrc) {
|
||||
this.masterCrc = masterCrc;
|
||||
}
|
||||
public long getTotalRecords() {
|
||||
return totalRecords;
|
||||
}
|
||||
public void setTotalRecords(long totalRecords) {
|
||||
this.totalRecords = totalRecords;
|
||||
}
|
||||
public Date getArchiveCreateWhen() {
|
||||
return archiveCreateWhen;
|
||||
}
|
||||
public void setArchiveCreateWhen(Date archiveCreateWhen) {
|
||||
this.archiveCreateWhen = archiveCreateWhen;
|
||||
}
|
||||
public Date getArchiveModWhen() {
|
||||
return archiveModWhen;
|
||||
}
|
||||
public void setArchiveModWhen(Date archiveModWhen) {
|
||||
this.archiveModWhen = archiveModWhen;
|
||||
}
|
||||
public int getMasterVersion() {
|
||||
return masterVersion;
|
||||
}
|
||||
public void setMasterVersion(int masterVersion) {
|
||||
this.masterVersion = masterVersion;
|
||||
}
|
||||
public long getMasterEof() {
|
||||
return masterEof;
|
||||
}
|
||||
public void setMasterEof(long masterEof) {
|
||||
this.masterEof = masterEof;
|
||||
}
|
||||
public boolean isValidCrc() {
|
||||
return validCrc;
|
||||
}
|
||||
/**
|
||||
* Test that the requested constant is present.
|
||||
*/
|
||||
private boolean checkId(byte[] data, byte[] constant) {
|
||||
for (int i = 0; i < constant.length; i++){
|
||||
if (data[i] != constant[i])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Master Header Block identifier "magic" bytes. */
|
||||
public static final byte[] NUFILE_ID = { 0x4e, (byte)0xf5, 0x46, (byte)0xe9, 0x6c, (byte)0xe5 };
|
||||
/** Header Block identifier "magic" bytes. */
|
||||
public static final byte[] NUFX_ID = { 0x4e, (byte)0xf5, 0x46, (byte)0xd8 };
|
||||
/** Binay II identifier "magic" bytes. */
|
||||
public static final byte[] BXY_ID = { 0x0a, 0x47, 0x4c };
|
||||
|
||||
}
|
39
src/com/webcodepro/shrinkit/NuFileArchive.java
Normal file
39
src/com/webcodepro/shrinkit/NuFileArchive.java
Normal file
@ -0,0 +1,39 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.webcodepro.shrinkit.io.LittleEndianByteInputStream;
|
||||
|
||||
/**
|
||||
* Basic reading of a NuFX archive.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class NuFileArchive {
|
||||
private MasterHeaderBlock master;
|
||||
private List<HeaderBlock> headers;
|
||||
|
||||
/**
|
||||
* Read in the NuFile/NuFX/Shrinkit archive.
|
||||
*/
|
||||
public NuFileArchive(InputStream inputStream) throws IOException {
|
||||
LittleEndianByteInputStream bs = new LittleEndianByteInputStream(inputStream);
|
||||
master = new MasterHeaderBlock(bs);
|
||||
headers = new ArrayList<HeaderBlock>();
|
||||
for (int i=0; i<master.getTotalRecords(); i++) {
|
||||
HeaderBlock header = new HeaderBlock(bs);
|
||||
header.readThreads(bs);
|
||||
headers.add(header);
|
||||
}
|
||||
}
|
||||
|
||||
public MasterHeaderBlock getMasterHeaderBlock() {
|
||||
return master;
|
||||
}
|
||||
public List<HeaderBlock> getHeaderBlocks() {
|
||||
return headers;
|
||||
}
|
||||
}
|
24
src/com/webcodepro/shrinkit/ThreadClass.java
Normal file
24
src/com/webcodepro/shrinkit/ThreadClass.java
Normal file
@ -0,0 +1,24 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
/**
|
||||
* Define and decode the thread_class field.
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public enum ThreadClass {
|
||||
MESSAGE, CONTROL, DATA, FILENAME;
|
||||
|
||||
/**
|
||||
* Find the given ThreadClass.
|
||||
* @throws IllegalArgumentException if the thread_class is unknown
|
||||
*/
|
||||
public static ThreadClass find(int threadClass) {
|
||||
switch (threadClass) {
|
||||
case 0x0000: return MESSAGE;
|
||||
case 0x0001: return CONTROL;
|
||||
case 0x0002: return DATA;
|
||||
case 0x0003: return FILENAME;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown thread_class of " + threadClass);
|
||||
}
|
||||
}
|
||||
}
|
32
src/com/webcodepro/shrinkit/ThreadFormat.java
Normal file
32
src/com/webcodepro/shrinkit/ThreadFormat.java
Normal file
@ -0,0 +1,32 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
/**
|
||||
* Define and decode the thread_format field.
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public enum ThreadFormat {
|
||||
UNCOMPRESSED(0x0000), HUFFMAN_SQUEEZE(0x0001), DYNAMIC_LZW1(0x0002), DYNAMIC_LZW2(0x0003),
|
||||
UNIX_12BIT_COMPRESS(0x0004), UNIX_16BIT_COMPRESS(0x0005);
|
||||
|
||||
/** Associate the hex codes with the enum */
|
||||
private int threadFormat;
|
||||
|
||||
private ThreadFormat(int threadFormat) {
|
||||
this.threadFormat = threadFormat;
|
||||
}
|
||||
|
||||
public int getThreadFormat() {
|
||||
return threadFormat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the ThreadFormat.
|
||||
* @throws IllegalArgumentException if the thread_format is unknown
|
||||
*/
|
||||
public static ThreadFormat find(int threadFormat) {
|
||||
for (ThreadFormat f : values()) {
|
||||
if (threadFormat == f.getThreadFormat()) return f;
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown thread_format of " + threadFormat);
|
||||
}
|
||||
}
|
41
src/com/webcodepro/shrinkit/ThreadKind.java
Normal file
41
src/com/webcodepro/shrinkit/ThreadKind.java
Normal file
@ -0,0 +1,41 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
/**
|
||||
* Define and decode the thread_kind field.
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public enum ThreadKind {
|
||||
ASCII_TEXT, ALLOCATED_SPACE, APPLE_IIGS_ICON, CREATE_DIRECTORY, DATA_FORK, DISK_IMAGE, RESOURCE_FORK,
|
||||
FILENAME;
|
||||
|
||||
/**
|
||||
* Find the specific ThreadKind.
|
||||
* @throws IllegalArgumentException when the thread_kind cannot be determined
|
||||
*/
|
||||
public static ThreadKind find(int threadKind, ThreadClass threadClass) {
|
||||
switch (threadClass) {
|
||||
case MESSAGE:
|
||||
switch (threadKind) {
|
||||
case 0x0000: return ASCII_TEXT;
|
||||
case 0x0001: return ALLOCATED_SPACE;
|
||||
case 0x0002: return APPLE_IIGS_ICON;
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown thread_kind "+threadKind+" for message thread_class of " + threadClass);
|
||||
case CONTROL:
|
||||
if (threadKind == 0x0000) return CREATE_DIRECTORY;
|
||||
throw new IllegalArgumentException("Unknown thread_kind "+threadKind+" for control thread_class of " + threadClass);
|
||||
case DATA:
|
||||
switch (threadKind) {
|
||||
case 0x0000: return DATA_FORK;
|
||||
case 0x0001: return DISK_IMAGE;
|
||||
case 0x0002: return RESOURCE_FORK;
|
||||
}
|
||||
throw new IllegalArgumentException("Unknown thread_kind "+threadKind+" for data thread_class of " + threadClass);
|
||||
case FILENAME:
|
||||
if (threadKind == 0x0000) return FILENAME;
|
||||
throw new IllegalArgumentException("Unknown thread_kind "+threadKind+" for filename thread_class of " + threadClass);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown thread_class of " + threadClass);
|
||||
}
|
||||
}
|
||||
}
|
163
src/com/webcodepro/shrinkit/ThreadRecord.java
Normal file
163
src/com/webcodepro/shrinkit/ThreadRecord.java
Normal file
@ -0,0 +1,163 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import com.webcodepro.shrinkit.io.LittleEndianByteInputStream;
|
||||
import com.webcodepro.shrinkit.io.NufxLzw1InputStream;
|
||||
import com.webcodepro.shrinkit.io.NufxLzw2InputStream;
|
||||
|
||||
/**
|
||||
* This represents a single thread from the Shrinkit archive.
|
||||
* As it is constructed, the thread "header" is read. Once all
|
||||
* threads have been constructed, use <code>readThreadData</code>
|
||||
* to load up the data.
|
||||
* <p>
|
||||
* Depending on the type of thread, the data may be text. If so,
|
||||
* <code>isText</code> will return true and <code>getText</code>
|
||||
* will return the string. Otherwise the data should be read through
|
||||
* one of the <code>InputStream</code> options.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class ThreadRecord {
|
||||
private ThreadClass threadClass;
|
||||
private ThreadFormat threadFormat;
|
||||
private ThreadKind threadKind;
|
||||
private int threadCrc;
|
||||
private long threadEof;
|
||||
private long compThreadEof;
|
||||
private byte[] threadData;
|
||||
|
||||
/**
|
||||
* Construct the ThreadRecord and read the header details with no hints
|
||||
* from the Header Block.
|
||||
*/
|
||||
public ThreadRecord(LittleEndianByteInputStream bs) throws IOException {
|
||||
this(null, bs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the ThreadRecord and read the header details.
|
||||
*/
|
||||
public ThreadRecord(HeaderBlock hb, LittleEndianByteInputStream bs) throws IOException {
|
||||
threadClass = ThreadClass.find(bs.readWord());
|
||||
threadFormat = ThreadFormat.find(bs.readWord());
|
||||
threadKind = ThreadKind.find(bs.readWord(), threadClass);
|
||||
threadCrc = bs.readWord();
|
||||
threadEof = bs.readLong();
|
||||
compThreadEof = bs.readLong();
|
||||
if ((threadKind == ThreadKind.DISK_IMAGE) && (hb != null)) {
|
||||
/* If we have hints from the header block, repair some disk image related bugs. */
|
||||
if (hb.getStorageType() <= 13 ) {
|
||||
/* supposed to be block size, but SHK v3.0.1 stored it wrong */
|
||||
threadEof = hb.getExtraType() * 512;
|
||||
// System.out.println("Found erroneous storage type... fixing.");
|
||||
} else if (hb.getStorageType() == 256 &&
|
||||
hb.getExtraType() == 280 &&
|
||||
hb.getFileSysId() == 2 ) { // FileSysDOS33
|
||||
/*
|
||||
* Fix for less-common ShrinkIt problem: looks like an old
|
||||
* version of GS/ShrinkIt used 256 as the block size when
|
||||
* compressing DOS 3.3 images from 5.25" disks. If that
|
||||
* appears to be the case here, crank up the block size.
|
||||
*/
|
||||
threadEof = hb.getExtraType() * 512;
|
||||
} else {
|
||||
threadEof = hb.getExtraType() * hb.getStorageType();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the raw thread data. This must be called.
|
||||
*/
|
||||
public void readThreadData(LittleEndianByteInputStream bs) throws IOException {
|
||||
threadData = bs.readBytes((int)compThreadEof);
|
||||
}
|
||||
/**
|
||||
* Determine if this is a text-type field.
|
||||
*/
|
||||
public boolean isText() {
|
||||
return threadKind == ThreadKind.ASCII_TEXT || threadKind == ThreadKind.FILENAME;
|
||||
}
|
||||
/**
|
||||
* Return the text data.
|
||||
*/
|
||||
public String getText() {
|
||||
return isText() ? new String(threadData, 0, (int)threadEof) : null;
|
||||
}
|
||||
/**
|
||||
* Get raw data bytes (compressed).
|
||||
*/
|
||||
public byte[] getBytes() {
|
||||
return threadData;
|
||||
}
|
||||
/**
|
||||
* Get the raw data input stream.
|
||||
*/
|
||||
public InputStream getRawInputStream() {
|
||||
return new ByteArrayInputStream(threadData);
|
||||
}
|
||||
/**
|
||||
* Get the appropriate input data stream for this thread to decompress the contents.
|
||||
*/
|
||||
public InputStream getInputStream() throws IOException {
|
||||
switch (threadFormat) {
|
||||
case UNCOMPRESSED:
|
||||
return getRawInputStream();
|
||||
case DYNAMIC_LZW1:
|
||||
return new NufxLzw1InputStream(new LittleEndianByteInputStream(getRawInputStream()));
|
||||
case DYNAMIC_LZW2:
|
||||
return new NufxLzw2InputStream(new LittleEndianByteInputStream(getRawInputStream()));
|
||||
default:
|
||||
throw new IOException("The thread format " + threadFormat + " does not have an InputStream associated with it!");
|
||||
}
|
||||
}
|
||||
|
||||
// GENERATED CODE
|
||||
|
||||
public ThreadClass getThreadClass() {
|
||||
return threadClass;
|
||||
}
|
||||
public void setThreadClass(ThreadClass threadClass) {
|
||||
this.threadClass = threadClass;
|
||||
}
|
||||
public ThreadFormat getThreadFormat() {
|
||||
return threadFormat;
|
||||
}
|
||||
public void setThreadFormat(ThreadFormat threadFormat) {
|
||||
this.threadFormat = threadFormat;
|
||||
}
|
||||
public ThreadKind getThreadKind() {
|
||||
return threadKind;
|
||||
}
|
||||
public void setThreadKind(ThreadKind threadKind) {
|
||||
this.threadKind = threadKind;
|
||||
}
|
||||
public int getThreadCrc() {
|
||||
return threadCrc;
|
||||
}
|
||||
public void setThreadCrc(int threadCrc) {
|
||||
this.threadCrc = threadCrc;
|
||||
}
|
||||
public long getThreadEof() {
|
||||
return threadEof;
|
||||
}
|
||||
public void setThreadEof(long threadEof) {
|
||||
this.threadEof = threadEof;
|
||||
}
|
||||
public long getCompThreadEof() {
|
||||
return compThreadEof;
|
||||
}
|
||||
public void setCompThreadEof(long compThreadEof) {
|
||||
this.compThreadEof = compThreadEof;
|
||||
}
|
||||
public byte[] getThreadData() {
|
||||
return threadData;
|
||||
}
|
||||
public void setThreadData(byte[] threadData) {
|
||||
this.threadData = threadData;
|
||||
}
|
||||
}
|
91
src/com/webcodepro/shrinkit/TimeRec.java
Normal file
91
src/com/webcodepro/shrinkit/TimeRec.java
Normal file
@ -0,0 +1,91 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
|
||||
/**
|
||||
* Apple IIgs Toolbox TimeRec object.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class TimeRec {
|
||||
private static final int SECOND = 0;
|
||||
private static final int MINUTE = 1;
|
||||
private static final int HOUR = 2;
|
||||
private static final int YEAR = 3;
|
||||
private static final int DAY = 4;
|
||||
private static final int MONTH = 5;
|
||||
private static final int WEEKDAY = 7;
|
||||
private static final int LENGTH = 8;
|
||||
private byte[] data = null;
|
||||
|
||||
/**
|
||||
* Construct a TimeRec with the current date.
|
||||
*/
|
||||
public TimeRec() {
|
||||
this(new Date());
|
||||
}
|
||||
/**
|
||||
* Construct a TimeRec with the specified date. You may pass in a null for a null date (all 0x00's).
|
||||
*/
|
||||
public TimeRec(Date date) {
|
||||
setDate(date);
|
||||
}
|
||||
/**
|
||||
* Construct a TimeRec from the given LENGTH byte array.
|
||||
*/
|
||||
public TimeRec(byte[] bytes, int offset) {
|
||||
if (bytes == null || bytes.length - offset < LENGTH) {
|
||||
throw new IllegalArgumentException("TimeRec requires a " + LENGTH + " byte array.");
|
||||
}
|
||||
//data = Arrays.copyOfRange(bytes, offset, LENGTH);
|
||||
data = new byte[LENGTH];
|
||||
System.arraycopy(bytes, offset, data, 0, LENGTH);
|
||||
}
|
||||
/**
|
||||
* Construct a TimeRec from the InputStream.
|
||||
*/
|
||||
public TimeRec(InputStream inputStream) throws IOException {
|
||||
data = new byte[LENGTH];
|
||||
for (int i=0; i<LENGTH; i++) {
|
||||
data[i] = (byte)inputStream.read();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the date.
|
||||
*/
|
||||
public void setDate(Date date) {
|
||||
data = new byte[LENGTH];
|
||||
if (date != null) {
|
||||
GregorianCalendar gc = new GregorianCalendar();
|
||||
gc.setTime(date);
|
||||
data[SECOND] = (byte)gc.get(Calendar.SECOND);
|
||||
data[MINUTE] = (byte)gc.get(Calendar.MINUTE);
|
||||
data[HOUR] = (byte)gc.get(Calendar.HOUR_OF_DAY);
|
||||
data[YEAR] = (byte)(gc.get(Calendar.YEAR) - 1900);
|
||||
data[DAY] = (byte)(gc.get(Calendar.DAY_OF_MONTH) - 1);
|
||||
data[MONTH] = (byte)gc.get(Calendar.MONTH);
|
||||
data[WEEKDAY] = (byte)gc.get(Calendar.DAY_OF_WEEK);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the TimeRec into a Java Date object.
|
||||
* Note that years 1900-1939 are assumed to be 2000-2039 per the NuFX addendum
|
||||
* at http://www.nulib.com/library/nufx-addendum.htm.
|
||||
* @see http://www.nulib.com/library/nufx-addendum.htm
|
||||
*/
|
||||
public Date getDate() {
|
||||
int year = data[YEAR]+1900;
|
||||
if (year < 1940) year+= 100;
|
||||
GregorianCalendar gc = new GregorianCalendar(year, data[MONTH]+1, data[DAY], data[HOUR], data[MINUTE], data[SECOND]);
|
||||
return gc.getTime();
|
||||
}
|
||||
public byte[] getBytes() {
|
||||
return data;
|
||||
}
|
||||
}
|
78
src/com/webcodepro/shrinkit/Utilities.java
Normal file
78
src/com/webcodepro/shrinkit/Utilities.java
Normal file
@ -0,0 +1,78 @@
|
||||
package com.webcodepro.shrinkit;
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012 by David Schmidt
|
||||
* david__schmidt at users.sourceforge.net
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
||||
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import com.webcodepro.applecommander.storage.StorageBundle;
|
||||
import com.webcodepro.applecommander.util.TextBundle;
|
||||
import com.webcodepro.shrinkit.io.LittleEndianByteInputStream;
|
||||
|
||||
/**
|
||||
* Some higher-level utilities for dealing with a NuFX archive.
|
||||
*
|
||||
* @author david__schmidt at users.sourceforge.net
|
||||
*/
|
||||
public class Utilities
|
||||
{
|
||||
/**
|
||||
* Interpret a SDK NuFile/NuFX/Shrinkit archive as a full disk image.
|
||||
*
|
||||
* @return byte[] buffer containing full disk of data; null if unable to read
|
||||
* @throws IllegalArgumentException if the filename is not able to be read
|
||||
* @throws IOException the file has some malformed-ness about it
|
||||
*/
|
||||
public static byte[] unpackSDKFile(String fileName) throws IOException {
|
||||
TextBundle textBundle = StorageBundle.getInstance();
|
||||
byte buffer[] = null;
|
||||
ThreadRecord dataThread = null;
|
||||
File file = new File(fileName);
|
||||
if (file.isDirectory() || !file.canRead()) {
|
||||
throw new IllegalArgumentException(textBundle.format("NotAFile", fileName, 1)); //$NON-NLS-1$
|
||||
}
|
||||
InputStream is = new FileInputStream(file);
|
||||
NuFileArchive a = new NuFileArchive(is);
|
||||
for (HeaderBlock b : a.getHeaderBlocks()) {
|
||||
for (ThreadRecord r : b.getThreadRecords()) {
|
||||
try
|
||||
{
|
||||
if (r.getThreadKind() == ThreadKind.DISK_IMAGE)
|
||||
{
|
||||
dataThread = r;
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
System.out.println(ex);
|
||||
}
|
||||
}
|
||||
dataThread.readThreadData(new LittleEndianByteInputStream(dataThread.getRawInputStream()));
|
||||
InputStream fis = dataThread.getInputStream();
|
||||
int dmgLen = (int)(dataThread.getThreadEof());
|
||||
buffer = new byte[dmgLen];
|
||||
fis.read(buffer,0,dmgLen);
|
||||
fis.close();
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
}
|
19
src/com/webcodepro/shrinkit/io/BitConstants.java
Normal file
19
src/com/webcodepro/shrinkit/io/BitConstants.java
Normal file
@ -0,0 +1,19 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
/**
|
||||
* This interface allows bit-related constants to be shared among
|
||||
* classes.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public interface BitConstants {
|
||||
/**
|
||||
* The low-tech way to compute a bit mask. Allowing up to 16 bits at this time.
|
||||
*/
|
||||
public static final int[] BIT_MASKS = new int[] {
|
||||
0x0000, 0x0001, 0x0003, 0x0007, 0x000f,
|
||||
0x001f, 0x003f, 0x007f, 0x00ff, 0x01ff,
|
||||
0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff,
|
||||
0x7fff, 0xffff
|
||||
};
|
||||
}
|
90
src/com/webcodepro/shrinkit/io/BitInputStream.java
Normal file
90
src/com/webcodepro/shrinkit/io/BitInputStream.java
Normal file
@ -0,0 +1,90 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* The BitInputStream allows varying bit sizes to be pulled out of the
|
||||
* wrapped InputStream. This is useful for LZW type compression algorithms
|
||||
* where 9-12 bit codes are used instead of the 8-bit byte.
|
||||
* <p>
|
||||
* Warning: The <code>read(byte[])</code> and <code>read(byte[], int, int)</code>
|
||||
* methods of <code>InputStream</code> will not work appropriately with any
|
||||
* bit size > 8 bits.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class BitInputStream extends InputStream implements BitConstants {
|
||||
/** Our source of data. */
|
||||
private InputStream is;
|
||||
/** The number of bits to read for a request. This can be adjusted dynamically. */
|
||||
private int requestedNumberOfBits;
|
||||
/** The current bit mask to use when returning a <code>read()</code> request. */
|
||||
private int bitMask;
|
||||
/** The buffer containing our bits. An int allows 32 bits which should cover up to a 24 bit read if my math is correct. :-) */
|
||||
private int data = 0;
|
||||
/** Number of bits remaining in our buffer */
|
||||
private int bitsOfData = 0;
|
||||
|
||||
/**
|
||||
* Create a BitInputStream wrapping the given <code>InputStream</code>
|
||||
* and reading the number of bits specified.
|
||||
*/
|
||||
public BitInputStream(InputStream is, int startingNumberOfBits) {
|
||||
this.is = is;
|
||||
setRequestedNumberOfBits(startingNumberOfBits);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of bits to be read with each call to <code>read()</code>.
|
||||
*/
|
||||
public void setRequestedNumberOfBits(int numberOfBits) {
|
||||
this.requestedNumberOfBits = numberOfBits;
|
||||
this.bitMask = BIT_MASKS[numberOfBits];
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the requested number of bits by one.
|
||||
* This is the general usage and prevents client from needing to track
|
||||
* the requested number of bits or from making various method calls.
|
||||
*/
|
||||
public void increaseRequestedNumberOfBits() {
|
||||
setRequestedNumberOfBits(requestedNumberOfBits + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Answer with the current bit mask for the current bit size.
|
||||
*/
|
||||
public int getBitMask() {
|
||||
return bitMask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a number of bits off of the wrapped InputStream.
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
while (bitsOfData < requestedNumberOfBits) {
|
||||
int b = is.read();
|
||||
if (b == -1) return b;
|
||||
if (bitsOfData > 0) {
|
||||
b <<= bitsOfData; // We're placing b on the high-bit side
|
||||
}
|
||||
data|= b;
|
||||
bitsOfData+= 8;
|
||||
}
|
||||
int b = data & bitMask;
|
||||
data >>= requestedNumberOfBits;
|
||||
bitsOfData-= requestedNumberOfBits;
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* When shifting from buffer to buffer, the input stream also should be reset.
|
||||
* This allows the "left over" bits to be cleared.
|
||||
*/
|
||||
public void clearRemainingBitsOfData() {
|
||||
this.bitsOfData = 0;
|
||||
this.data = 0;
|
||||
}
|
||||
}
|
||||
|
98
src/com/webcodepro/shrinkit/io/BitOutputStream.java
Normal file
98
src/com/webcodepro/shrinkit/io/BitOutputStream.java
Normal file
@ -0,0 +1,98 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
* The BitOutputStream allows varying bit sizes to be written to the wrapped
|
||||
* OutputStream. This is useful for LZW type compression algorithms
|
||||
* where 9-12 bit codes are used instead of the 8-bit byte.
|
||||
* <p>
|
||||
* Warning: The <code>write(byte[])</code> and <code>write(byte[], int, int)</code>
|
||||
* methods of <code>OutputStream</code> will not work appropriately with any
|
||||
* bit size > 8 bits.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class BitOutputStream extends OutputStream implements BitConstants {
|
||||
/** Our data target. */
|
||||
private OutputStream os;
|
||||
/** The number of bits to write for a request. This can be adjusted dynamically. */
|
||||
private int requestedNumberOfBits;
|
||||
/** The current bit mask to use for a <code>write(int)</code> request. */
|
||||
private int bitMask;
|
||||
/** The buffer containing our bits. */
|
||||
private int data = 0;
|
||||
/** Number of bits remaining in our buffer */
|
||||
private int bitsOfData = 0;
|
||||
|
||||
/**
|
||||
* Create a BitOutpuStream wrapping the given <code>OutputStream</code>
|
||||
* and writing the number of bits specified.
|
||||
*/
|
||||
public BitOutputStream(OutputStream os, int startingNumberOfBits) {
|
||||
this.os = os;
|
||||
setRequestedNumberOfBits(startingNumberOfBits);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of bits to be write with each call to <code>write(int)</code>.
|
||||
*/
|
||||
public void setRequestedNumberOfBits(int numberOfBits) {
|
||||
this.requestedNumberOfBits = numberOfBits;
|
||||
this.bitMask = BIT_MASKS[numberOfBits];
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the requested number of bits by one.
|
||||
* This is the general usage and prevents client from needing to track
|
||||
* the requested number of bits or from making various method calls.
|
||||
*/
|
||||
public void increaseRequestedNumberOfBits() {
|
||||
setRequestedNumberOfBits(requestedNumberOfBits + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Answer with the current bit mask for the current bit size.
|
||||
*/
|
||||
public int getBitMask() {
|
||||
return bitMask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the number of bits to the wrapped OutputStream.
|
||||
*/
|
||||
public void write(int b) throws IOException {
|
||||
b &= bitMask; // Ensure we don't have extra baggage
|
||||
b <<= bitsOfData; // Move beyond existing bits of data
|
||||
data|= b; // Add in the additional data
|
||||
bitsOfData+= requestedNumberOfBits;
|
||||
while (bitsOfData >= 8) {
|
||||
os.write(data & 0xff);
|
||||
data >>= 8;
|
||||
bitsOfData-= 8;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When shifting from buffer to buffer, this OutputStream also should be reset.
|
||||
* This allows the "left over" bits to be cleared.
|
||||
*/
|
||||
public void clearRemainingBitsOfData() {
|
||||
this.bitsOfData = 0;
|
||||
this.data = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the output stream and write any remaining byte to the output.
|
||||
* Note that we may very well end up with extra bits if there are < 8
|
||||
* bits remaining.
|
||||
*/
|
||||
public void close() throws IOException {
|
||||
if (bitsOfData > 0) {
|
||||
write(0x00); // forces a flush of the remaining bits in the proper order
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
34
src/com/webcodepro/shrinkit/io/ByteConstants.java
Normal file
34
src/com/webcodepro/shrinkit/io/ByteConstants.java
Normal file
@ -0,0 +1,34 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
|
||||
/**
|
||||
* Provides constants for the LittleEndianByteInputStream and ByteTarget classes.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
* @see LittleEndianByteInputStream
|
||||
* @see ByteTarget
|
||||
*/
|
||||
public interface ByteConstants {
|
||||
/** Master Header Block identifier "magic" bytes. */
|
||||
public static final byte[] NUFILE_ID = { 0x4e, (byte)0xf5, 0x46, (byte)0xe9, 0x6c, (byte)0xe5 };
|
||||
/** Header Block identifier "magic" bytes. */
|
||||
public static final byte[] NUFX_ID = { 0x4e, (byte)0xf5, 0x46, (byte)0xd8 };
|
||||
/** Apple IIgs Toolbox TimeRec seconds byte position. */
|
||||
public static final int TIMEREC_SECOND = 0;
|
||||
/** Apple IIgs Toolbox TimeRec seconds byte position. */
|
||||
public static final int TIMEREC_MINUTE = 1;
|
||||
/** Apple IIgs Toolbox TimeRec minutes byte position. */
|
||||
public static final int TIMEREC_HOUR = 2;
|
||||
/** Apple IIgs Toolbox TimeRec hours byte position. */
|
||||
public static final int TIMEREC_YEAR = 3;
|
||||
/** Apple IIgs Toolbox TimeRec year byte position. */
|
||||
public static final int TIMEREC_DAY = 4;
|
||||
/** Apple IIgs Toolbox TimeRec day byte position. */
|
||||
public static final int TIMEREC_MONTH = 5;
|
||||
/** Apple IIgs Toolbox TimeRec weekday (Mon, Tue, etc) byte position. */
|
||||
public static final int TIMEREC_WEEKDAY = 7;
|
||||
/** Apple IIgs Toolbox TimeRec length. */
|
||||
public static final int TIMEREC_LENGTH = 8;
|
||||
/** A null TimeRec */
|
||||
public static final byte[] TIMEREC_NULL = new byte[TIMEREC_LENGTH];
|
||||
}
|
136
src/com/webcodepro/shrinkit/io/LittleEndianByteInputStream.java
Normal file
136
src/com/webcodepro/shrinkit/io/LittleEndianByteInputStream.java
Normal file
@ -0,0 +1,136 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
|
||||
import com.webcodepro.shrinkit.CRC16;
|
||||
|
||||
/**
|
||||
* A simple class to hide the source of byte data.
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class LittleEndianByteInputStream extends InputStream implements ByteConstants {
|
||||
private InputStream inputStream;
|
||||
private long bytesRead = 0;
|
||||
private CRC16 crc = new CRC16();
|
||||
|
||||
/**
|
||||
* Construct a LittleEndianByteInputStream from an InputStream.
|
||||
*/
|
||||
public LittleEndianByteInputStream(InputStream inputStream) {
|
||||
this.inputStream = inputStream;
|
||||
}
|
||||
/**
|
||||
* Construct a LittleEndianByteInputStream from a byte array.
|
||||
*/
|
||||
public LittleEndianByteInputStream(byte[] data) {
|
||||
this.inputStream = new ByteArrayInputStream(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the next byte.
|
||||
* Returns -1 if at end of input.
|
||||
* Note that an unsigned byte needs to be returned in a larger container (ie, a short or int or long).
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
int b = inputStream.read();
|
||||
if (b != -1) {
|
||||
crc.update(b);
|
||||
bytesRead++;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
/**
|
||||
* Get the next byte and fail if we are at EOF.
|
||||
* Note that an unsigned byte needs to be returned in a larger container (ie, a short or int or long).
|
||||
*/
|
||||
public int readByte() throws IOException {
|
||||
int i = read();
|
||||
if (i == -1) throw new IOException("Expecting a byte but at EOF");
|
||||
return i;
|
||||
}
|
||||
/**
|
||||
* Get the next set of bytes as an array.
|
||||
* If EOF encountered, an IOException is thrown.
|
||||
*/
|
||||
public byte[] readBytes(int bytes) throws IOException {
|
||||
byte[] data = new byte[bytes];
|
||||
int read = inputStream.read(data);
|
||||
bytesRead+= read;
|
||||
if (read < bytes) {
|
||||
throw new IOException("Requested " + bytes + " bytes, but " + read + " read");
|
||||
}
|
||||
crc.update(data);
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the NuFile id is embedded in the LittleEndianByteInputStream.
|
||||
*/
|
||||
public boolean checkNuFileId() throws IOException {
|
||||
byte[] data = readBytes(6);
|
||||
return Arrays.equals(data, NUFILE_ID);
|
||||
}
|
||||
/**
|
||||
* Test that the NuFx id is embedded in the LittleEndianByteInputStream.
|
||||
*/
|
||||
public boolean checkNuFxId() throws IOException {
|
||||
byte[] data = readBytes(4);
|
||||
return Arrays.equals(data, NUFX_ID);
|
||||
}
|
||||
/**
|
||||
* Read the two bytes in as a "Word" which needs to be stored as a Java int.
|
||||
*/
|
||||
public int readWord() throws IOException {
|
||||
return (readByte() | readByte() << 8) & 0xffff;
|
||||
}
|
||||
/**
|
||||
* Read the two bytes in as a "Long" which needs to be stored as a Java long.
|
||||
*/
|
||||
public long readLong() throws IOException {
|
||||
long a = readByte();
|
||||
long b = readByte();
|
||||
long c = readByte();
|
||||
long d = readByte();
|
||||
return (long)(a | b<<8 | c<<16 | d<<24);
|
||||
}
|
||||
/**
|
||||
* Read the TimeRec into a Java Date object.
|
||||
* Note that years 00-39 are assumed to be 2000-2039 per the NuFX addendum
|
||||
* at http://www.nulib.com/library/nufx-addendum.htm.
|
||||
* @see http://www.nulib.com/library/nufx-addendum.htm
|
||||
*/
|
||||
public Date readDate() throws IOException {
|
||||
byte[] data = readBytes(TIMEREC_LENGTH);
|
||||
if (Arrays.equals(TIMEREC_NULL, data)) return null;
|
||||
int year = data[TIMEREC_YEAR]+1900;
|
||||
if (year < 1940) year+= 100;
|
||||
GregorianCalendar gc = new GregorianCalendar(year, data[TIMEREC_MONTH]-1, data[TIMEREC_DAY],
|
||||
data[TIMEREC_HOUR], data[TIMEREC_MINUTE], data[TIMEREC_SECOND]);
|
||||
return gc.getTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the CRC-16 to $0000.
|
||||
*/
|
||||
public void resetCrc() {
|
||||
crc.reset();
|
||||
}
|
||||
/**
|
||||
* Get the current CRC-16 value.
|
||||
*/
|
||||
public long getCrcValue() {
|
||||
return crc.getValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* Answer with the total number of bytes read.
|
||||
*/
|
||||
public long getTotalBytesRead() {
|
||||
return bytesRead;
|
||||
}
|
||||
}
|
124
src/com/webcodepro/shrinkit/io/LittleEndianByteOutputStream.java
Normal file
124
src/com/webcodepro/shrinkit/io/LittleEndianByteOutputStream.java
Normal file
@ -0,0 +1,124 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.GregorianCalendar;
|
||||
|
||||
import com.webcodepro.shrinkit.CRC16;
|
||||
|
||||
/**
|
||||
* An OutputStream with helper methods to write little endian numbers
|
||||
* and other Apple-specific tidbits.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class LittleEndianByteOutputStream extends OutputStream implements ByteConstants {
|
||||
private OutputStream outputStream;
|
||||
private long bytesWritten = 0;
|
||||
private CRC16 crc = new CRC16();
|
||||
|
||||
/**
|
||||
* Construct a LittleEndianByteOutputStream from an OutputStream.
|
||||
*/
|
||||
public LittleEndianByteOutputStream(OutputStream outputStream) {
|
||||
this.outputStream = outputStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a next byte.
|
||||
*/
|
||||
public void write(int b) throws IOException {
|
||||
outputStream.write(b);
|
||||
crc.update(b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the NuFile id to the LittleEndianByteOutputStream.
|
||||
*/
|
||||
public void writeNuFileId() throws IOException {
|
||||
write(NUFILE_ID);
|
||||
}
|
||||
/**
|
||||
* Write the NuFX id to the LittleEndianByteOutputStream.
|
||||
*/
|
||||
public void writeNuFxId() throws IOException {
|
||||
write(NUFX_ID);
|
||||
}
|
||||
/**
|
||||
* Write a "Word".
|
||||
*/
|
||||
public void writeWord(int w) throws IOException {
|
||||
write(w & 0xff);
|
||||
write(w >> 8);
|
||||
}
|
||||
/**
|
||||
* Write a "Long".
|
||||
*/
|
||||
public void writeLong(long l) throws IOException {
|
||||
write((int)(l & 0xff));
|
||||
write((int)((l >> 8) & 0xff));
|
||||
write((int)((l >> 16) & 0xff));
|
||||
write((int)((l >> 24) & 0xff));
|
||||
}
|
||||
/**
|
||||
* Write the Java Date object as a TimeRec.
|
||||
* Note that years 2000-2039 are assumed to be 00-39 per the NuFX addendum
|
||||
* at http://www.nulib.com/library/nufx-addendum.htm.
|
||||
* @see http://www.nulib.com/library/nufx-addendum.htm
|
||||
*/
|
||||
public void writeDate(Date date) throws IOException {
|
||||
byte[] data = null;
|
||||
if (date == null) {
|
||||
data = TIMEREC_NULL;
|
||||
} else {
|
||||
data = new byte[TIMEREC_LENGTH];
|
||||
GregorianCalendar gc = new GregorianCalendar();
|
||||
gc.setTime(date);
|
||||
int year = gc.get(Calendar.YEAR);
|
||||
year -= (year < 2000) ? 1900 : 2000;
|
||||
data[TIMEREC_YEAR] = (byte)(year & 0xff);
|
||||
data[TIMEREC_MONTH] = (byte)(gc.get(Calendar.MONTH) + 1);
|
||||
data[TIMEREC_DAY] = (byte)gc.get(Calendar.DAY_OF_MONTH);
|
||||
data[TIMEREC_HOUR] = (byte)gc.get(Calendar.HOUR_OF_DAY);
|
||||
data[TIMEREC_MINUTE] = (byte)gc.get(Calendar.MINUTE);
|
||||
data[TIMEREC_SECOND] = (byte)gc.get(Calendar.SECOND);
|
||||
data[TIMEREC_WEEKDAY] = (byte)gc.get(Calendar.DAY_OF_WEEK);
|
||||
}
|
||||
write(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the CRC-16 to $0000.
|
||||
*/
|
||||
public void resetCrc() {
|
||||
crc.reset();
|
||||
}
|
||||
/**
|
||||
* Get the current CRC-16 value.
|
||||
*/
|
||||
public long getCrcValue() {
|
||||
return crc.getValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* Answer with the total number of bytes written.
|
||||
*/
|
||||
public long getTotalBytesWritten() {
|
||||
return bytesWritten;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pass the flush request to the wrapped stream.
|
||||
*/
|
||||
public void flush() throws IOException {
|
||||
outputStream.flush();
|
||||
}
|
||||
/**
|
||||
* Pass the close request to the wrapped stream.
|
||||
*/
|
||||
public void close() throws IOException {
|
||||
outputStream.close();
|
||||
}
|
||||
}
|
136
src/com/webcodepro/shrinkit/io/LzwInputStream.java
Normal file
136
src/com/webcodepro/shrinkit/io/LzwInputStream.java
Normal file
@ -0,0 +1,136 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
|
||||
/**
|
||||
* This is the generic Shrinkit LZW decompression algorithm.
|
||||
* It does not deal with the vagaries of the LZW/1 and LZW/2 data streams.
|
||||
* It does, however, deal with dictionary clears (0x100) and the
|
||||
* <code>BitInputStream</code> bit sizes.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class LzwInputStream extends InputStream {
|
||||
private BitInputStream is;
|
||||
private List<int[]> dictionary;
|
||||
private Queue<Integer> outputBuffer = new ConcurrentLinkedQueue<Integer>();
|
||||
private boolean newBuffer = true;
|
||||
// See Wikipedia entry on LZW for variable naming
|
||||
private int k;
|
||||
private int[] w;
|
||||
private int[] entry;
|
||||
|
||||
/**
|
||||
* Create the <code>LzwInputStream</code> based on the given
|
||||
* <code>BitInputStream</code>.
|
||||
* @see BitInputStream
|
||||
*/
|
||||
public LzwInputStream(BitInputStream is) {
|
||||
this.is = is;
|
||||
}
|
||||
|
||||
/**
|
||||
* Answer with the next byte from the (now) decompressed input stream.
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
if (outputBuffer.isEmpty()) {
|
||||
fillBuffer();
|
||||
}
|
||||
return outputBuffer.remove();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the buffer up with some decompressed data.
|
||||
* This may range from one byte to many bytes, depending on what is in the
|
||||
* dictionary.
|
||||
* @see http://en.wikipedia.org/wiki/Lzw for the general algorithm
|
||||
*/
|
||||
public void fillBuffer() throws IOException {
|
||||
if (dictionary == null) {
|
||||
is.setRequestedNumberOfBits(9);
|
||||
// Setup default dictionary for all bytes
|
||||
dictionary = new ArrayList<int[]>();
|
||||
for (short i=0; i<256; i++) dictionary.add(new int[] { i });
|
||||
dictionary.add(new int[] { 0x100 }); // 0x100 not used by NuFX
|
||||
}
|
||||
if (newBuffer) {
|
||||
// Setup for decompression;
|
||||
k = is.read();
|
||||
outputBuffer.add(k);
|
||||
if (k == -1) return;
|
||||
w = new int[] { k };
|
||||
newBuffer = false;
|
||||
}
|
||||
// LZW decompression
|
||||
k = is.read();
|
||||
if (k == -1) {
|
||||
outputBuffer.add(k);
|
||||
return;
|
||||
}
|
||||
if (k == 0x100) {
|
||||
dictionary = null;
|
||||
is.setRequestedNumberOfBits(9);
|
||||
k = 0;
|
||||
w = null;
|
||||
entry = null;
|
||||
newBuffer = true;
|
||||
fillBuffer(); // Warning: recursive call
|
||||
return;
|
||||
}
|
||||
if (k < dictionary.size()) {
|
||||
entry = dictionary.get(k);
|
||||
} else if (k == dictionary.size()) {
|
||||
//entry = Arrays.copyOf(w, w.length+1);
|
||||
entry = new int[w.length+1];
|
||||
System.arraycopy(w, 0, entry, 0, w.length);
|
||||
entry[w.length] = w[0];
|
||||
} else {
|
||||
throw new IOException("Invalid code of <" + k + "> encountered");
|
||||
}
|
||||
for (int i : entry) outputBuffer.add(i);
|
||||
//int[] newEntry = Arrays.copyOf(w, w.length+1);
|
||||
int[] newEntry = new int[w.length+1];
|
||||
System.arraycopy(w, 0, newEntry, 0, w.length);
|
||||
newEntry[w.length] = entry[0];
|
||||
dictionary.add(newEntry);
|
||||
w = entry;
|
||||
// Exclusive-OR the current bitmask against the new dictionary size -- if all bits are
|
||||
// on, we'll get 0. (That is, all 9 bits on is 0x01ff exclusive or bit mask of 0x01ff
|
||||
// yields 0x0000.) This tells us we need to increase the number of bits we're pulling
|
||||
// from the bit stream.
|
||||
if ((dictionary.size() ^ is.getBitMask()) == 0) {
|
||||
is.increaseRequestedNumberOfBits();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear out the dictionary. It will be rebuilt on the next call to
|
||||
* <code>fillBuffer</code>.
|
||||
*/
|
||||
public void clearDictionary() {
|
||||
dictionary = null;
|
||||
is.setRequestedNumberOfBits(9);
|
||||
is.clearRemainingBitsOfData();
|
||||
outputBuffer.clear();
|
||||
k = 0;
|
||||
w = null;
|
||||
entry = null;
|
||||
newBuffer = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide necessary housekeeping to reset LZW stream between NuFX buffer changes.
|
||||
* The dictionary is the only item that is not cleared -- that needs to be done
|
||||
* explicitly since behavior between LZW/1 and LZW/2 differ.
|
||||
*/
|
||||
public void clearData() {
|
||||
is.clearRemainingBitsOfData();
|
||||
outputBuffer.clear();
|
||||
}
|
||||
}
|
96
src/com/webcodepro/shrinkit/io/LzwOutputStream.java
Normal file
96
src/com/webcodepro/shrinkit/io/LzwOutputStream.java
Normal file
@ -0,0 +1,96 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.webcodepro.shrinkit.CRC16;
|
||||
|
||||
/**
|
||||
* This is the generic Shrinkit LZW compression algorithm.
|
||||
* It does not deal with the vagaries of the LZW/1 and LZW/2 data streams.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class LzwOutputStream extends OutputStream {
|
||||
private BitOutputStream os;
|
||||
private Map<ByteArray,Integer> dictionary = new HashMap<ByteArray,Integer>();
|
||||
private int[] w = new int[0];
|
||||
private int nextCode = 0x101;
|
||||
|
||||
/**
|
||||
* This simple class can be used as a key into a Map.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
private class ByteArray {
|
||||
/** Data being managed. */
|
||||
private int[] data;
|
||||
/** The computed hash code -- CRC-16 for lack of imagination. */
|
||||
private int hashCode;
|
||||
|
||||
public ByteArray(int d) {
|
||||
this(new int[] { d });
|
||||
}
|
||||
public ByteArray(int[] data) {
|
||||
this.data = data;
|
||||
CRC16 crc = new CRC16();
|
||||
for (int b : data) crc.update(b);
|
||||
hashCode = (int)crc.getValue();
|
||||
}
|
||||
public boolean equals(Object obj) {
|
||||
ByteArray ba = (ByteArray)obj;
|
||||
if (data.length != ba.data.length) return false;
|
||||
for (int i=0; i<data.length; i++) {
|
||||
if (data[i] != ba.data[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
public int hashCode() {
|
||||
return hashCode;
|
||||
}
|
||||
}
|
||||
|
||||
public LzwOutputStream(BitOutputStream os) {
|
||||
this.os = os;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int c) throws IOException {
|
||||
if (dictionary.isEmpty()) {
|
||||
for (int i=0; i<256; i++) dictionary.put(new ByteArray(i), i);
|
||||
dictionary.put(new ByteArray(0x100), null); // just to mark its spot
|
||||
}
|
||||
c &= 0xff;
|
||||
int[] wc = new int[w.length + 1];
|
||||
if (w.length > 0) System.arraycopy(w, 0, wc, 0, w.length);
|
||||
wc[wc.length-1]= c;
|
||||
if (dictionary.containsKey(new ByteArray(wc))) {
|
||||
w = wc;
|
||||
} else {
|
||||
dictionary.put(new ByteArray(wc), nextCode++);
|
||||
os.write(dictionary.get(new ByteArray(w)));
|
||||
w = new int[] { c };
|
||||
}
|
||||
// Exclusive-OR the current bitmask against the new dictionary size -- if all bits are
|
||||
// on, we'll get 0. (That is, all 9 bits on is 0x01ff exclusive or bit mask of 0x01ff
|
||||
// yields 0x0000.) This tells us we need to increase the number of bits we're writing
|
||||
// to the bit stream.
|
||||
if ((dictionary.size() ^ os.getBitMask()) == 0) {
|
||||
os.increaseRequestedNumberOfBits();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
os.write(dictionary.get(new ByteArray(w)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
flush();
|
||||
os.flush();
|
||||
os.close();
|
||||
}
|
||||
}
|
144
src/com/webcodepro/shrinkit/io/NufxLzw1InputStream.java
Normal file
144
src/com/webcodepro/shrinkit/io/NufxLzw1InputStream.java
Normal file
@ -0,0 +1,144 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import com.webcodepro.shrinkit.CRC16;
|
||||
|
||||
/**
|
||||
* The <code>NufxLzw1InputStream</code> reads a data fork or
|
||||
* resource fork written in the NuFX LZW/1 format.
|
||||
* <p>
|
||||
* The layout of the LZW/1 data is as follows:
|
||||
* <table border="0">
|
||||
* <tr>
|
||||
* <th colspan="3">"Fork" Header</th>
|
||||
* </tr><tr>
|
||||
* <td>+0</td>
|
||||
* <td>Word</td>
|
||||
* <td>CRC-16 of the uncompressed data within the thread</td>
|
||||
* </tr><tr>
|
||||
* <td>+2</td>
|
||||
* <td>Byte</td>
|
||||
* <td>Low-level volume number use to format 5.25" disks</td>
|
||||
* </tr><tr>
|
||||
* <td>+3</td>
|
||||
* <td>Byte</td>
|
||||
* <td>RLE character used to decode this thread</td>
|
||||
* </tr><tr>
|
||||
* <th colspan="3">Each subsequent 4K chunk of data</th>
|
||||
* </tr><tr>
|
||||
* <td>+0</td>
|
||||
* <td>Word</td>
|
||||
* <td>Length after RLE compression (if RLE is not used, length
|
||||
* will be 4096</td>
|
||||
* </tr><tr>
|
||||
* <td>+2</td>
|
||||
* <td>Byte</td>
|
||||
* <td>A $01 indicates LZW applied to this chunk; $00 that LZW
|
||||
* <b>was not</b> applied to this chunk</td>
|
||||
* </tr>
|
||||
* <table>
|
||||
* <p>
|
||||
* Note that the LZW string table is <em>cleared</em> after
|
||||
* every chunk.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class NufxLzw1InputStream extends InputStream {
|
||||
/** This is the raw data stream with all markers and compressed data. */
|
||||
private LittleEndianByteInputStream dataStream;
|
||||
/** Used for an LZW-only <code>InputStream</code>. */
|
||||
private LzwInputStream lzwStream;
|
||||
/** Used for an RLE-only <code>InputStream</code>. */
|
||||
private RleInputStream rleStream;
|
||||
/** Used for an LZW+RLE <code>InputStream</code>. */
|
||||
private InputStream lzwRleStream;
|
||||
/** This is the generic decompression stream from which we read. */
|
||||
private InputStream decompressionStream;
|
||||
/** Counts the number of bytes in the 4096 byte chunk. */
|
||||
private int bytesLeftInChunk;
|
||||
/** This is the CRC-16 for the uncompressed fork. */
|
||||
private int givenCrc = -1;
|
||||
/** This is the volume number for 5.25" disks. */
|
||||
private int volumeNumber;
|
||||
/** This is the RLE character to use. */
|
||||
private int rleCharacter;
|
||||
/** Used to track the CRC of data we've extracted */
|
||||
private CRC16 dataCrc = new CRC16();
|
||||
|
||||
/**
|
||||
* Create the LZW/1 input stream.
|
||||
*/
|
||||
public NufxLzw1InputStream(LittleEndianByteInputStream dataStream) {
|
||||
this.dataStream = dataStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the next byte in the decompressed data stream.
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
if (givenCrc == -1) { // read the data or resource fork header
|
||||
givenCrc = dataStream.readWord();
|
||||
volumeNumber = dataStream.readByte();
|
||||
rleCharacter = dataStream.readByte();
|
||||
lzwStream = new LzwInputStream(new BitInputStream(dataStream, 9));
|
||||
rleStream = new RleInputStream(dataStream, rleCharacter);
|
||||
lzwRleStream = new RleInputStream(lzwStream);
|
||||
}
|
||||
if (bytesLeftInChunk == 0) { // read the chunk header
|
||||
bytesLeftInChunk = 4096; // NuFX always reads 4096 bytes
|
||||
lzwStream.clearDictionary(); // Always clear dictionary
|
||||
int length = dataStream.readWord();
|
||||
int lzwFlag = dataStream.readByte();
|
||||
int flag = lzwFlag + (length == 4096 ? 0 : 2);
|
||||
switch (flag) {
|
||||
case 0: decompressionStream = dataStream;
|
||||
break;
|
||||
case 1: decompressionStream = lzwStream;
|
||||
break;
|
||||
case 2: decompressionStream = rleStream;
|
||||
break;
|
||||
case 3: decompressionStream = lzwRleStream;
|
||||
break;
|
||||
default: throw new IOException("Unknown type of decompression, flag = " + flag);
|
||||
}
|
||||
}
|
||||
// Now we can read a data byte
|
||||
int b = decompressionStream.read();
|
||||
bytesLeftInChunk--;
|
||||
dataCrc.update(b);
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates if the computed CRC matches the CRC given in the data stream.
|
||||
*/
|
||||
public boolean isCrcValid() {
|
||||
return givenCrc == dataCrc.getValue();
|
||||
}
|
||||
|
||||
// GENERATED CODE
|
||||
|
||||
public int getGivenCrc() {
|
||||
return givenCrc;
|
||||
}
|
||||
public void setGivenCrc(int givenCrc) {
|
||||
this.givenCrc = givenCrc;
|
||||
}
|
||||
public int getVolumeNumber() {
|
||||
return volumeNumber;
|
||||
}
|
||||
public void setVolumeNumber(int volumeNumber) {
|
||||
this.volumeNumber = volumeNumber;
|
||||
}
|
||||
public int getRleCharacter() {
|
||||
return rleCharacter;
|
||||
}
|
||||
public void setRleCharacter(int rleCharacter) {
|
||||
this.rleCharacter = rleCharacter;
|
||||
}
|
||||
public long getDataCrc() {
|
||||
return dataCrc.getValue();
|
||||
}
|
||||
}
|
131
src/com/webcodepro/shrinkit/io/NufxLzw2InputStream.java
Normal file
131
src/com/webcodepro/shrinkit/io/NufxLzw2InputStream.java
Normal file
@ -0,0 +1,131 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import com.webcodepro.shrinkit.CRC16;
|
||||
|
||||
/**
|
||||
* The <code>NufxLzw2InputStream</code> reads a data fork or
|
||||
* resource fork written in the NuFX LZW/2 format.
|
||||
* <p>
|
||||
* The layout of the LZW/2 data is as follows:
|
||||
* <table border="0">
|
||||
* <tr>
|
||||
* <th colspan="3">"Fork" Header</th>
|
||||
* </tr><tr>
|
||||
* <td>+0</td>
|
||||
* <td>Byte</td>
|
||||
* <td>Low-level volume number used to format 5.25" disks</td>
|
||||
* </tr><tr>
|
||||
* <td>+1</td>
|
||||
* <td>Byte</td>
|
||||
* <td>RLE character used to decode this thread</td>
|
||||
* </tr><tr>
|
||||
* <th colspan="3">Each subsequent 4K chunk of data</th>
|
||||
* </tr><tr>
|
||||
* <td>+0</td>
|
||||
* <td>Word</td>
|
||||
* <td>Bits 0-12: Length after RLE compression<br/>
|
||||
* Bit 15: LZW flag (set to 1 if LZW used)</td>
|
||||
* </tr><tr>
|
||||
* <td>+2</td>
|
||||
* <td>Word</td>
|
||||
* <td>If LZW flag = 1, total bytes in chunk<br/>
|
||||
* Else (flag = 0) start of data</td>
|
||||
* </tr>
|
||||
* <table>
|
||||
* <p>
|
||||
* The LZW/2 dictionary is only cleared when the table becomes full and is indicated
|
||||
* in the input stream by 0x100. It is also cleared whenever a chunk that is not
|
||||
* LZW encoded is encountered.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class NufxLzw2InputStream extends InputStream {
|
||||
/** This is the raw data stream with all markers and compressed data. */
|
||||
private LittleEndianByteInputStream dataStream;
|
||||
/** Used for an LZW-only <code>InputStream</code>. */
|
||||
private LzwInputStream lzwStream;
|
||||
/** Used for an RLE-only <code>InputStream</code>. */
|
||||
private RleInputStream rleStream;
|
||||
/** Used for an LZW+RLE <code>InputStream</code>. */
|
||||
private InputStream lzwRleStream;
|
||||
/** This is the generic decompression stream from which we read. */
|
||||
private InputStream decompressionStream;
|
||||
/** Counts the number of bytes in the 4096 byte chunk. */
|
||||
private int bytesLeftInChunk;
|
||||
/** This is the volume number for 5.25" disks. */
|
||||
private int volumeNumber = -1;
|
||||
/** This is the RLE character to use. */
|
||||
private int rleCharacter;
|
||||
/** Used to track the CRC of data we've extracted */
|
||||
private CRC16 dataCrc = new CRC16();
|
||||
|
||||
/**
|
||||
* Create the LZW/2 input stream.
|
||||
*/
|
||||
public NufxLzw2InputStream(LittleEndianByteInputStream dataStream) {
|
||||
this.dataStream = dataStream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the next byte in the decompressed data stream.
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
if (volumeNumber == -1) { // read the data or resource fork header
|
||||
volumeNumber = dataStream.readByte();
|
||||
rleCharacter = dataStream.readByte();
|
||||
lzwStream = new LzwInputStream(new BitInputStream(dataStream, 9));
|
||||
rleStream = new RleInputStream(dataStream, rleCharacter);
|
||||
lzwRleStream = new RleInputStream(lzwStream);
|
||||
}
|
||||
if (bytesLeftInChunk == 0) { // read the chunk header
|
||||
bytesLeftInChunk = 4096; // NuFX always reads 4096 bytes
|
||||
lzwStream.clearData(); // Allow the LZW stream to do a little housekeeping
|
||||
int word = dataStream.readWord();
|
||||
int length = word & 0x7fff;
|
||||
int lzwFlag = word & 0x8000;
|
||||
if (lzwFlag == 0) { // We clear dictionary whenever a non-LZW chunk is encountered
|
||||
lzwStream.clearDictionary();
|
||||
} else {
|
||||
dataStream.readWord(); // At this time, I just throw away the total bytes in this chunk...
|
||||
}
|
||||
int flag = (lzwFlag == 0 ? 0 : 1) + (length == 4096 ? 0 : 2);
|
||||
switch (flag) {
|
||||
case 0: decompressionStream = dataStream;
|
||||
break;
|
||||
case 1: decompressionStream = lzwStream;
|
||||
break;
|
||||
case 2: decompressionStream = rleStream;
|
||||
break;
|
||||
case 3: decompressionStream = lzwRleStream;
|
||||
break;
|
||||
default: throw new IOException("Unknown type of decompression, flag = " + flag);
|
||||
}
|
||||
}
|
||||
// Now we can read a data byte
|
||||
int b = decompressionStream.read();
|
||||
bytesLeftInChunk--;
|
||||
dataCrc.update(b);
|
||||
return b;
|
||||
}
|
||||
|
||||
// GENERATED CODE
|
||||
|
||||
public int getVolumeNumber() {
|
||||
return volumeNumber;
|
||||
}
|
||||
public void setVolumeNumber(int volumeNumber) {
|
||||
this.volumeNumber = volumeNumber;
|
||||
}
|
||||
public int getRleCharacter() {
|
||||
return rleCharacter;
|
||||
}
|
||||
public void setRleCharacter(int rleCharacter) {
|
||||
this.rleCharacter = rleCharacter;
|
||||
}
|
||||
public long getDataCrc() {
|
||||
return dataCrc.getValue();
|
||||
}
|
||||
}
|
53
src/com/webcodepro/shrinkit/io/RleInputStream.java
Normal file
53
src/com/webcodepro/shrinkit/io/RleInputStream.java
Normal file
@ -0,0 +1,53 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
|
||||
/**
|
||||
* The RleInputStream handles the NuFX RLE data stream.
|
||||
* This data stream is byte oriented. If a repeat occurs,
|
||||
* the data stream will contain the marker byte, byte to
|
||||
* repeat, and the number of repeats (zero based; ie, $00=1,
|
||||
* $01=2, ... $ff=256). The default marker is $DB.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class RleInputStream extends InputStream {
|
||||
private InputStream bs;
|
||||
private int escapeChar;
|
||||
private int repeatedByte;
|
||||
private int numBytes = -1;
|
||||
|
||||
/**
|
||||
* Create an RLE input stream with the default marker byte.
|
||||
*/
|
||||
public RleInputStream(InputStream bs) {
|
||||
this(bs, 0xdb);
|
||||
}
|
||||
/**
|
||||
* Create an RLE input stream with the specified marker byte.
|
||||
*/
|
||||
public RleInputStream(InputStream bs, int escapeChar) {
|
||||
this.bs = bs;
|
||||
this.escapeChar = escapeChar;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the next byte from the input stream.
|
||||
*/
|
||||
public int read() throws IOException {
|
||||
if (numBytes == -1) {
|
||||
int b = bs.read();
|
||||
if (b == escapeChar) {
|
||||
repeatedByte = bs.read();
|
||||
numBytes = bs.read();
|
||||
} else {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
numBytes--;
|
||||
return repeatedByte;
|
||||
}
|
||||
|
||||
}
|
82
src/com/webcodepro/shrinkit/io/RleOutputStream.java
Normal file
82
src/com/webcodepro/shrinkit/io/RleOutputStream.java
Normal file
@ -0,0 +1,82 @@
|
||||
package com.webcodepro.shrinkit.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
* The RleOutputStream handles the NuFX RLE data stream.
|
||||
* This data stream is byte oriented. If a repeat occurs,
|
||||
* the data stream will contain the marker byte, byte to
|
||||
* repeat, and the number of repeats (zero based; ie, $00=1,
|
||||
* $01=2, ... $ff=256). The default marker is $DB.
|
||||
*
|
||||
* @author robgreene@users.sourceforge.net
|
||||
*/
|
||||
public class RleOutputStream extends OutputStream {
|
||||
private OutputStream os;
|
||||
private int escapeChar;
|
||||
private int repeatedByte;
|
||||
private int numBytes = -1;
|
||||
|
||||
/**
|
||||
* Create an RLE output stream with the default marker byte.
|
||||
*/
|
||||
public RleOutputStream(OutputStream bs) {
|
||||
this(bs, 0xdb);
|
||||
}
|
||||
/**
|
||||
* Create an RLE output stream with the specified marker byte.
|
||||
*/
|
||||
public RleOutputStream(OutputStream os, int escapeChar) {
|
||||
this.os = os;
|
||||
this.escapeChar = escapeChar;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the next byte to the output stream.
|
||||
*/
|
||||
public void write(int b) throws IOException {
|
||||
if (numBytes == -1) {
|
||||
repeatedByte = b;
|
||||
numBytes++;
|
||||
} else if (repeatedByte == b) {
|
||||
numBytes++;
|
||||
if (numBytes > 255) {
|
||||
flush();
|
||||
}
|
||||
} else {
|
||||
flush();
|
||||
repeatedByte = b;
|
||||
numBytes++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush out any remaining data.
|
||||
* If we only have 1 byte and it is <em>not</em> the repeated
|
||||
* byte, we can just dump that byte. Otherwise, we need to
|
||||
* write out the escape character, the repeated byte, and
|
||||
* the number of bytes.
|
||||
*/
|
||||
public void flush() throws IOException {
|
||||
if (numBytes != -1) {
|
||||
if (numBytes == 0 && escapeChar != repeatedByte) {
|
||||
os.write(repeatedByte);
|
||||
} else {
|
||||
os.write(escapeChar);
|
||||
os.write(repeatedByte);
|
||||
os.write(numBytes);
|
||||
}
|
||||
numBytes = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close out the data stream. Makes sure the repeate buffer
|
||||
* is flushed.
|
||||
*/
|
||||
public void close() throws IOException {
|
||||
flush();
|
||||
os.close();
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user