From b5117c3dfe091581d52f4b0a9e8a580d098b4714 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 00:25:46 +0200 Subject: [PATCH 01/17] Fixes for -stats --- src/lzsa.c | 1 + src/shrink_context.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/lzsa.c b/src/lzsa.c index 2574562..f9bcfcb 100755 --- a/src/lzsa.c +++ b/src/lzsa.c @@ -1073,6 +1073,7 @@ int main(int argc, char **argv) { fprintf(stderr, " -cbench: benchmark in-memory compression\n"); fprintf(stderr, " -dbench: benchmark in-memory decompression\n"); fprintf(stderr, " -test: run automated self-tests\n"); + fprintf(stderr, " -stats: show compressed data stats\n"); fprintf(stderr, " -v: be verbose\n"); fprintf(stderr, " -f : LZSA compression format (1-2)\n"); fprintf(stderr, " -r: raw block format (max. 64 Kb files)\n"); diff --git a/src/shrink_context.c b/src/shrink_context.c index e1cc9c6..f3ec444 100644 --- a/src/shrink_context.c +++ b/src/shrink_context.c @@ -74,6 +74,8 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->stats.min_literals = -1; pCompressor->stats.min_match_len = -1; pCompressor->stats.min_offset = -1; + pCompressor->stats.min_rle1_len = -1; + pCompressor->stats.min_rle2_len = -1; if (!nResult) { pCompressor->intervals = (unsigned int *)malloc(nMaxWindowSize * sizeof(unsigned int)); From baa53f68898358ae1cf3f59e3c5c11b4b3aac3ec Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:05:58 +0200 Subject: [PATCH 02/17] Newly compressed LZSA2 files depack 0.7% faster --- src/shrink_block_v2.c | 147 +++++++++++++++++++++++++++++++++++++----- src/shrink_context.c | 25 ++++++- src/shrink_context.h | 3 + 3 files changed, 159 insertions(+), 16 deletions(-) diff --git a/src/shrink_block_v2.c b/src/shrink_block_v2.c index 96452c4..4d2ff7d 100644 --- a/src/shrink_block_v2.c +++ b/src/shrink_block_v2.c @@ -183,12 +183,12 @@ static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOf * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes */ -static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset) { +static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce) { lzsa_arrival *arrival = pCompressor->arrival; const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; const int nMinMatchSize = pCompressor->min_match_size; + const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); int i, j, n; - lzsa_match match[32]; memset(arrival + (nStartOffset << MATCHES_PER_OFFSET_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset) << MATCHES_PER_OFFSET_SHIFT)); @@ -199,7 +199,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne arrival[nStartOffset << MATCHES_PER_OFFSET_SHIFT].from_slot = -1; for (i = nStartOffset; i != (nEndOffset - 1); i++) { - int m, nMatches; + int m; for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { const int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost & 0x3fffffff; @@ -232,9 +232,11 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 1; for (n = 0; n < NMATCHES_PER_OFFSET; n++) { lzsa_arrival *pDestArrival = &pDestSlots[n]; - if (nCodingChoiceCost <= pDestArrival->cost) { + if (nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { if (pDestArrival->from_slot) { memmove(&pDestSlots[n + 1], @@ -248,6 +250,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->match_offset = 0; pDestArrival->match_len = 0; pDestArrival->num_literals = nNumLiterals; + pDestArrival->score = nScore; pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; break; } @@ -256,9 +259,9 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } } - nMatches = lzsa_find_matches_at(pCompressor, i, match, 32); + lzsa_match *match = pCompressor->match + (i << 5); - for (m = 0; m < nMatches; m++) { + for (m = 0; m < 32 && match[m].length; m++) { int nMatchLen = match[m].length; int nMatchOffset = match[m].offset; int nNoRepmatchOffsetCost = (nMatchOffset <= 32) ? 4 : ((nMatchOffset <= 512) ? 8 : ((nMatchOffset <= (8192 + 512)) ? 12 : 16)); @@ -319,10 +322,13 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + ((nMatchOffset == nRepOffset) ? 2 : 3); + for (n = 0; n < NMATCHES_PER_OFFSET; n++) { lzsa_arrival *pDestArrival = &pDestSlots[n]; - if (nCodingChoiceCost <= pDestArrival->cost) { + if (nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { if (pDestArrival->from_slot) { memmove(&pDestSlots[n + 1], &pDestSlots[n], @@ -335,6 +341,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->match_offset = nMatchOffset; pDestArrival->match_len = k; pDestArrival->num_literals = 0; + pDestArrival->score = nScore; pDestArrival->rep_offset = nMatchOffset; break; } @@ -361,10 +368,13 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 2; + for (n = 0; n < NMATCHES_PER_OFFSET; n++) { lzsa_arrival *pDestArrival = &pDestSlots[n]; - if (nRepCodingChoiceCost <= pDestArrival->cost) { + if (nRepCodingChoiceCost < pDestArrival->cost || + (nRepCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { if (pDestArrival->from_slot) { memmove(&pDestSlots[n + 1], &pDestSlots[n], @@ -377,6 +387,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->match_offset = nRepOffset; pDestArrival->match_len = k; pDestArrival->num_literals = 0; + pDestArrival->score = nScore; pDestArrival->rep_offset = nRepOffset; break; } @@ -390,12 +401,12 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne } lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_OFFSET_SHIFT) + 0]; - pCompressor->best_match[i].length = 0; - pCompressor->best_match[i].offset = 0; + pBestMatch[i].length = 0; + pBestMatch[i].offset = 0; while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { - pCompressor->best_match[end_arrival->from_pos].length = end_arrival->match_len; - pCompressor->best_match[end_arrival->from_pos].offset = end_arrival->match_offset; + pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; + pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_OFFSET_SHIFT) + (end_arrival->from_slot - 1)]; } } @@ -568,6 +579,78 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un return nDidReduce; } +/** + * Get compressed data block size + * + * @param pCompressor compression context + * @param pBestMatch optimal matches to emit + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * + * @return size of compressed data that will be written to output buffer + */ +static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { + int i; + int nNumLiterals = 0; + int nOutOffset = 0; + int nRepMatchOffset = 0; + int nCompressedSize = 0; + + for (i = nStartOffset; i < nEndOffset; ) { + const lzsa_match *pMatch = pBestMatch + i; + + if (pMatch->length >= MIN_MATCH_SIZE_V2) { + int nMatchOffset = pMatch->offset; + int nMatchLen = pMatch->length; + int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V2; + int nOffsetSize; + + if (nMatchOffset == nRepMatchOffset) { + nOffsetSize = 0; + } + else { + if (nMatchOffset <= 32) { + nOffsetSize = 4; + } + else if (nMatchOffset <= 512) { + nOffsetSize = 8; + } + else if (nMatchOffset <= (8192 + 512)) { + nOffsetSize = 12; + } + else { + nOffsetSize = 16; + } + } + + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + nOffsetSize /* match offset */ + lzsa_get_match_varlen_size_v2(nEncodedMatchLen); + nCompressedSize += nCommandSize; + + nNumLiterals = 0; + nRepMatchOffset = nMatchOffset; + i += nMatchLen; + } + else { + nNumLiterals++; + i++; + } + } + + { + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); + + nCompressedSize += nCommandSize; + nNumLiterals = 0; + } + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + nCompressedSize += (8 + 4 + 8); + } + + return nCompressedSize; +} + /** * Emit block of compressed data * @@ -852,9 +935,21 @@ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, co * @return size of compressed data in output buffer, or -1 if the data is uncompressible */ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { - int nResult; + int i, nResult, nBaseCompressedSize; - lzsa_optimize_forward_v2(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + /* Find matches */ + for (i = nPreviousBlockSize; i != (nPreviousBlockSize + nInDataSize - 1); i++) { + int nMatches = lzsa_find_matches_at(pCompressor, i, pCompressor->match + (i << 5), 32); + while (nMatches < 32) { + pCompressor->match[(i << 5) + nMatches].offset = 0; + pCompressor->match[(i << 5) + nMatches].length = 0; + nMatches++; + } + } + + /* Compress optimally without breaking ties in favor of less tokens */ + + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */); int nDidReduce; int nPasses = 0; @@ -863,7 +958,29 @@ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigne nPasses++; } while (nDidReduce && nPasses < 20); - nResult = lzsa_write_block_v2(pCompressor, pCompressor->best_match, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); + nBaseCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + lzsa_match *pBestMatch = pCompressor->best_match; + + if (nBaseCompressedSize > 0 && nInDataSize < 65536) { + int nReducedCompressedSize; + + /* Compress optimally and do break ties in favor of less tokens */ + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */); + + nPasses = 0; + do { + nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nPasses++; + } while (nDidReduce && nPasses < 20); + + nReducedCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + if (nReducedCompressedSize > 0 && nReducedCompressedSize <= nBaseCompressedSize) { + /* Pick the parse with the reduced number of tokens as it didn't negatively affect the size */ + pBestMatch = pCompressor->improved_match; + } + } + + nResult = lzsa_write_block_v2(pCompressor, pBestMatch, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); if (nResult < 0 && pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { nResult = lzsa_write_raw_uncompressed_block_v2(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); } diff --git a/src/shrink_context.c b/src/shrink_context.c index f3ec444..74a7a16 100644 --- a/src/shrink_context.c +++ b/src/shrink_context.c @@ -58,7 +58,9 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->intervals = NULL; pCompressor->pos_data = NULL; pCompressor->open_intervals = NULL; + pCompressor->match = NULL; pCompressor->best_match = NULL; + pCompressor->improved_match = NULL; pCompressor->arrival = NULL; pCompressor->min_match_size = nMinMatchSize; if (pCompressor->min_match_size < nMinMatchSizeForFormat) @@ -93,7 +95,18 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->best_match = (lzsa_match *)malloc(nMaxWindowSize * sizeof(lzsa_match)); if (pCompressor->best_match) { - return 0; + if (pCompressor->format_version == 2) { + pCompressor->improved_match = (lzsa_match *)malloc(nMaxWindowSize * sizeof(lzsa_match)); + + if (pCompressor->improved_match) { + pCompressor->match = (lzsa_match *)malloc(nMaxWindowSize * 32 * sizeof(lzsa_match)); + if (pCompressor->match) + return 0; + } + } + else { + return 0; + } } } } @@ -113,6 +126,16 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, void lzsa_compressor_destroy(lzsa_compressor *pCompressor) { divsufsort_destroy(&pCompressor->divsufsort_context); + if (pCompressor->match) { + free(pCompressor->match); + pCompressor->match = NULL; + } + + if (pCompressor->improved_match) { + free(pCompressor->improved_match); + pCompressor->improved_match = NULL; + } + if (pCompressor->arrival) { free(pCompressor->arrival); pCompressor->arrival = NULL; diff --git a/src/shrink_context.h b/src/shrink_context.h index fd8176b..8e51822 100644 --- a/src/shrink_context.h +++ b/src/shrink_context.h @@ -74,6 +74,7 @@ typedef struct { unsigned short rep_offset; int num_literals; + int score; unsigned short match_offset; unsigned short match_len; @@ -114,7 +115,9 @@ typedef struct _lzsa_compressor { unsigned int *intervals; unsigned int *pos_data; unsigned int *open_intervals; + lzsa_match *match; lzsa_match *best_match; + lzsa_match *improved_match; lzsa_arrival *arrival; int min_match_size; int format_version; From 4436f216ced3c3e5304e76bf55ef650f3b89c334 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:06:50 +0200 Subject: [PATCH 03/17] Bump version --- src/lzsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lzsa.c b/src/lzsa.c index f9bcfcb..68eb5f5 100755 --- a/src/lzsa.c +++ b/src/lzsa.c @@ -48,7 +48,7 @@ #define OPT_RAW_BACKWARD 8 #define OPT_STATS 16 -#define TOOL_VERSION "1.1.0" +#define TOOL_VERSION "1.1.1" /*---------------------------------------------------------------------------*/ From 115a81cb718ed9d2765469aa315d068d9c631bc8 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:09:42 +0200 Subject: [PATCH 04/17] Remove unused code --- VS2017/lzsa.vcxproj | 2 -- VS2017/lzsa.vcxproj.filters | 6 ------ 2 files changed, 8 deletions(-) diff --git a/VS2017/lzsa.vcxproj b/VS2017/lzsa.vcxproj index 66d68ba..f9275dc 100755 --- a/VS2017/lzsa.vcxproj +++ b/VS2017/lzsa.vcxproj @@ -185,7 +185,6 @@ - @@ -207,7 +206,6 @@ - diff --git a/VS2017/lzsa.vcxproj.filters b/VS2017/lzsa.vcxproj.filters index 24c8840..415b4dd 100755 --- a/VS2017/lzsa.vcxproj.filters +++ b/VS2017/lzsa.vcxproj.filters @@ -84,9 +84,6 @@ Fichiers sources\libdivsufsort\include - - Fichiers sources - @@ -146,8 +143,5 @@ Fichiers sources\libdivsufsort\lib - - Fichiers sources - \ No newline at end of file From c77c6665683bdb73fed792a99180f61dcf15c678 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:10:07 +0200 Subject: [PATCH 05/17] Remove unused code --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index cf84d1b..9e98565 100755 --- a/Makefile +++ b/Makefile @@ -18,7 +18,6 @@ OBJS += $(OBJDIR)/src/expand_context.o OBJS += $(OBJDIR)/src/expand_inmem.o OBJS += $(OBJDIR)/src/expand_streaming.o OBJS += $(OBJDIR)/src/frame.o -OBJS += $(OBJDIR)/src/hashmap.o OBJS += $(OBJDIR)/src/matchfinder.o OBJS += $(OBJDIR)/src/shrink_block_v1.o OBJS += $(OBJDIR)/src/shrink_block_v2.o From 5141ed7c598336e4a7a6aff635eca4bdcd367873 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:11:41 +0200 Subject: [PATCH 06/17] Remove unused code --- src/hashmap.c | 138 -------------------------------------------------- 1 file changed, 138 deletions(-) delete mode 100644 src/hashmap.c diff --git a/src/hashmap.c b/src/hashmap.c deleted file mode 100644 index 71bf01b..0000000 --- a/src/hashmap.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * hashmap.c - integer hashmap implementation - * - * Copyright (C) 2019 Emmanuel Marty - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgment in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori - * - * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 - * With help, ideas, optimizations and speed measurements by spke - * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard - * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ - * - */ - -#include -#include -#include "hashmap.h" - -/** - * Generate key hash by mixing - * - * @param key key to get hash for - * - * @return hash - */ -static unsigned int lzsa_hashmap_get_hash(unsigned long long key) { - key = (~key) + (key << 21); - key = key ^ (key >> 24); - key = (key + (key << 3)) + (key << 8); - key = key ^ (key >> 14); - key = (key + (key << 2)) + (key << 4); - key = key ^ (key >> 28); - key = key + (key << 31); - return key & (LZSA_HASH_NBUCKETS - 1); -} - -/** - * Initialize hashmap - * - * @param pHashMap hashmap - */ -void lzsa_hashmap_init(lzsa_hashmap_t *pHashMap) { - pHashMap->pBuffer = NULL; - memset(pHashMap->pBucket, 0, sizeof(lzsa_hashvalue_t *) * LZSA_HASH_NBUCKETS); -} - -/** - * Set value for key - * - * @param pHashMap hashmap - * @param key key to set value for - * @param value new value - */ -void lzsa_hashmap_insert(lzsa_hashmap_t *pHashMap, unsigned long long key, unsigned int value) { - unsigned int hash = lzsa_hashmap_get_hash(key); - lzsa_hashvalue_t **pBucket = &pHashMap->pBucket[hash]; - while (*pBucket) { - if ((*pBucket)->key == key) { - (*pBucket)->value = value; - return; - } - - pBucket = &((*pBucket)->pNext); - } - - if (!pHashMap->pBuffer || pHashMap->pBuffer->nFreeEntryIdx >= 255) { - lzsa_hashbuffer_t *pNewBuffer = (lzsa_hashbuffer_t *)malloc(sizeof(lzsa_hashbuffer_t)); - if (!pNewBuffer) return; - - pNewBuffer->pNext = pHashMap->pBuffer; - pNewBuffer->nFreeEntryIdx = 0; - pHashMap->pBuffer = pNewBuffer; - } - - *pBucket = &pHashMap->pBuffer->value[pHashMap->pBuffer->nFreeEntryIdx++]; - (*pBucket)->pNext = NULL; - (*pBucket)->key = key; - (*pBucket)->value = value; -} - -/** - * Get value for key - * - * @param pHashMap hashmap - * @param key key to get value for - * @param pValue pointer to where to store value if found - * - * @return 0 if found, nonzero if not found - */ -int lzsa_hashmap_find(lzsa_hashmap_t *pHashMap, unsigned long long key, unsigned int *pValue) { - unsigned int hash = lzsa_hashmap_get_hash(key); - lzsa_hashvalue_t **pBucket = &pHashMap->pBucket[hash]; - while (*pBucket) { - if ((*pBucket)->key == key) { - *pValue = (*pBucket)->value; - return 0; - } - - pBucket = &((*pBucket)->pNext); - } - - return -1; -} - -/** - * Clear hashmap - * - * @param pHashMap hashmap - */ -void lzsa_hashmap_clear(lzsa_hashmap_t *pHashMap) { - while (pHashMap->pBuffer) { - lzsa_hashbuffer_t *pCurBuffer = pHashMap->pBuffer; - pHashMap->pBuffer = pCurBuffer->pNext; - free(pCurBuffer); - pCurBuffer = NULL; - } - - memset(pHashMap->pBucket, 0, sizeof(lzsa_hashvalue_t *) * LZSA_HASH_NBUCKETS); -} - From c363ecf527e39bdf065e5297e7819aa26d463a2f Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:11:49 +0200 Subject: [PATCH 07/17] Remove unused code --- src/hashmap.h | 99 --------------------------------------------------- 1 file changed, 99 deletions(-) delete mode 100644 src/hashmap.h diff --git a/src/hashmap.h b/src/hashmap.h deleted file mode 100644 index e16cb91..0000000 --- a/src/hashmap.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * hashmap.h - integer hashmap definitions - * - * Copyright (C) 2019 Emmanuel Marty - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgment in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori - * - * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 - * With help, ideas, optimizations and speed measurements by spke - * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard - * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ - * - */ - -#ifndef _HASHMAP_H -#define _HASHMAP_H - -#include - -/** Number of hashmap buckets */ -#define LZSA_HASH_NBUCKETS 256 - -/* Forward definitions */ -typedef struct _lzsa_hashvalue_t lzsa_hashvalue_t; -typedef struct _lzsa_hashbuffer_t lzsa_hashbuffer_t; - -/** One hashmap bucket entry */ -typedef struct _lzsa_hashvalue_t { - lzsa_hashvalue_t *pNext; - unsigned long long key; - unsigned int value; -} lzsa_hashvalue_t; - -/** One buffer storing hashmap bucket entries */ -typedef struct _lzsa_hashbuffer_t { - lzsa_hashbuffer_t *pNext; - int nFreeEntryIdx; - lzsa_hashvalue_t value[255]; -} lzsa_hashbuffer_t; - -/** Hashmap */ -typedef struct { - lzsa_hashbuffer_t *pBuffer; - lzsa_hashvalue_t *pBucket[LZSA_HASH_NBUCKETS]; -} lzsa_hashmap_t; - -/** - * Initialize hashmap - * - * @param pHashMap hashmap - */ -void lzsa_hashmap_init(lzsa_hashmap_t *pHashMap); - -/** - * Set value for key - * - * @param pHashMap hashmap - * @param key key to set value for - * @param value new value - */ -void lzsa_hashmap_insert(lzsa_hashmap_t *pHashMap, unsigned long long key, unsigned int value); - -/** - * Get value for key - * - * @param pHashMap hashmap - * @param key key to get value for - * @param pValue pointer to where to store value if found - * - * @return 0 if found, nonzero if not found - */ -int lzsa_hashmap_find(lzsa_hashmap_t *pHashMap, unsigned long long key, unsigned int *pValue); - -/** - * Clear hashmap - * - * @param pHashMap hashmap - */ -void lzsa_hashmap_clear(lzsa_hashmap_t *pHashMap); - -#endif /* _HASHMAP_H */ From 89f1664ae62646154ac49003028eda65c01270f6 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:14:19 +0200 Subject: [PATCH 08/17] Remove unused code --- src/shrink_block_v2.c | 1 - src/shrink_context.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/shrink_block_v2.c b/src/shrink_block_v2.c index 4d2ff7d..18f8657 100644 --- a/src/shrink_block_v2.c +++ b/src/shrink_block_v2.c @@ -35,7 +35,6 @@ #include "lib.h" #include "shrink_block_v2.h" #include "format.h" -#include "hashmap.h" #include "matchfinder.h" /** diff --git a/src/shrink_context.h b/src/shrink_context.h index 8e51822..c5cc501 100644 --- a/src/shrink_context.h +++ b/src/shrink_context.h @@ -34,7 +34,6 @@ #define _SHRINK_CONTEXT_H #include "divsufsort.h" -#include "hashmap.h" #ifdef __cplusplus extern "C" { @@ -124,7 +123,6 @@ typedef struct _lzsa_compressor { int flags; int safe_dist; int num_commands; - lzsa_hashmap_t cost_map; lzsa_stats stats; } lzsa_compressor; From 96df02c532af2edf8ed30731865402c00c1bcffd Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Fri, 11 Oct 2019 09:20:36 +0200 Subject: [PATCH 09/17] Remove unused code --- Xcode/lzsa.xcodeproj/project.pbxproj | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Xcode/lzsa.xcodeproj/project.pbxproj b/Xcode/lzsa.xcodeproj/project.pbxproj index e5ec4a9..608a7d6 100644 --- a/Xcode/lzsa.xcodeproj/project.pbxproj +++ b/Xcode/lzsa.xcodeproj/project.pbxproj @@ -26,7 +26,6 @@ 0CADC64722AAD8EB003E9821 /* expand_context.c in Sources */ = {isa = PBXBuildFile; fileRef = 0CADC62F22AAD8EB003E9821 /* expand_context.c */; }; 0CADC64822AAD8EB003E9821 /* shrink_block_v2.c in Sources */ = {isa = PBXBuildFile; fileRef = 0CADC63022AAD8EB003E9821 /* shrink_block_v2.c */; }; 0CADC64A22AB8DAD003E9821 /* divsufsort_utils.c in Sources */ = {isa = PBXBuildFile; fileRef = 0CADC64922AB8DAD003E9821 /* divsufsort_utils.c */; }; - 0CADC69622C8A420003E9821 /* hashmap.c in Sources */ = {isa = PBXBuildFile; fileRef = 0CADC69522C8A41F003E9821 /* hashmap.c */; }; /* End PBXBuildFile section */ /* Begin PBXCopyFilesBuildPhase section */ @@ -81,8 +80,6 @@ 0CADC63022AAD8EB003E9821 /* shrink_block_v2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = shrink_block_v2.c; path = ../../src/shrink_block_v2.c; sourceTree = ""; }; 0CADC64922AB8DAD003E9821 /* divsufsort_utils.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = divsufsort_utils.c; sourceTree = ""; }; 0CADC64B22AB8DC3003E9821 /* divsufsort_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = divsufsort_config.h; sourceTree = ""; }; - 0CADC69422C8A41F003E9821 /* hashmap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hashmap.h; path = ../../src/hashmap.h; sourceTree = ""; }; - 0CADC69522C8A41F003E9821 /* hashmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = hashmap.c; path = ../../src/hashmap.c; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -130,8 +127,6 @@ 0CADC62422AAD8EB003E9821 /* format.h */, 0CADC5F322AAD8EB003E9821 /* frame.c */, 0CADC62C22AAD8EB003E9821 /* frame.h */, - 0CADC69522C8A41F003E9821 /* hashmap.c */, - 0CADC69422C8A41F003E9821 /* hashmap.h */, 0CADC5F222AAD8EB003E9821 /* lib.h */, 0CADC5FC22AAD8EB003E9821 /* libdivsufsort */, 0CADC62222AAD8EB003E9821 /* lzsa.c */, @@ -240,7 +235,6 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 0CADC69622C8A420003E9821 /* hashmap.c in Sources */, 0CADC64822AAD8EB003E9821 /* shrink_block_v2.c in Sources */, 0CADC63D22AAD8EB003E9821 /* sssort.c in Sources */, 0CADC64322AAD8EB003E9821 /* expand_block_v2.c in Sources */, From d9156d3d2bc7bf7c022271bac65c27ee63546011 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Sat, 19 Oct 2019 13:10:41 +0200 Subject: [PATCH 10/17] Reduce LZSA1 token count by 2.5% on average --- src/matchfinder.c | 25 +++++++ src/matchfinder.h | 10 +++ src/shrink_block_v1.c | 167 ++++++++++++++++++++++++++++++++---------- src/shrink_block_v2.c | 12 +-- src/shrink_context.c | 17 ++--- 5 files changed, 171 insertions(+), 60 deletions(-) diff --git a/src/matchfinder.c b/src/matchfinder.c index 6862143..d545bec 100644 --- a/src/matchfinder.c +++ b/src/matchfinder.c @@ -285,3 +285,28 @@ void lzsa_skip_matches(lzsa_compressor *pCompressor, const int nStartOffset, con lzsa_find_matches_at(pCompressor, i, &match, 0); } } + +/** + * Find all matches for the data to be compressed + * + * @param pCompressor compression context + * @param nMatchesPerOffset maximum number of matches to store for each offset + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + */ +void lzsa_find_all_matches(lzsa_compressor *pCompressor, const int nMatchesPerOffset, const int nStartOffset, const int nEndOffset) { + lzsa_match *pMatch = pCompressor->match + (nStartOffset * nMatchesPerOffset); + int i; + + for (i = nStartOffset; i < nEndOffset; i++) { + int nMatches = lzsa_find_matches_at(pCompressor, i, pMatch, nMatchesPerOffset); + + while (nMatches < nMatchesPerOffset) { + pMatch[nMatches].length = 0; + pMatch[nMatches].offset = 0; + nMatches++; + } + + pMatch += nMatchesPerOffset; + } +} diff --git a/src/matchfinder.h b/src/matchfinder.h index b61a6af..3526948 100644 --- a/src/matchfinder.h +++ b/src/matchfinder.h @@ -73,6 +73,16 @@ int lzsa_find_matches_at(lzsa_compressor *pCompressor, const int nOffset, lzsa_m */ void lzsa_skip_matches(lzsa_compressor *pCompressor, const int nStartOffset, const int nEndOffset); +/** + * Find all matches for the data to be compressed + * + * @param pCompressor compression context + * @param nMatchesPerOffset maximum number of matches to store for each offset + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + */ +void lzsa_find_all_matches(lzsa_compressor *pCompressor, const int nMatchesPerOffset, const int nStartOffset, const int nEndOffset); + #ifdef __cplusplus } #endif diff --git a/src/shrink_block_v1.c b/src/shrink_block_v1.c index 1182c56..7a011ec 100644 --- a/src/shrink_block_v1.c +++ b/src/shrink_block_v1.c @@ -157,23 +157,24 @@ static inline int lzsa_get_offset_cost_v1(const unsigned int nMatchOffset) { * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes */ -static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nStartOffset, const int nEndOffset) { +static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce) { lzsa_arrival *arrival = pCompressor->arrival; const int nMinMatchSize = pCompressor->min_match_size; const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; + const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); int i, j, n; - lzsa_match match[8]; memset(arrival + (nStartOffset << MATCHES_PER_OFFSET_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset) << MATCHES_PER_OFFSET_SHIFT)); arrival[nStartOffset << MATCHES_PER_OFFSET_SHIFT].from_slot = -1; for (i = nStartOffset; i != (nEndOffset - 1); i++) { - int m, nMatches; + int m; for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost; int nCodingChoiceCost = nPrevCost + 8 /* literal */; + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 1; int nNumLiterals = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals + 1; if (nNumLiterals == LITERALS_RUN_LEN_V1 || nNumLiterals == 256 || nNumLiterals == 512) { @@ -183,27 +184,32 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt if (!nFavorRatio && nNumLiterals == 1) nCodingChoiceCost += MODESWITCH_PENALTY; - lzsa_arrival *pDestArrival = &arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT)]; - if (pDestArrival->from_slot == 0 || - nCodingChoiceCost <= pDestArrival->cost) { + for (n = 0; n < NMATCHES_PER_OFFSET /* we only need the literals + short match cost + long match cost cases */; n++) { + lzsa_arrival *pDestArrival = &arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT) + n]; - memmove(&arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT) + 1], - &arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT)], - sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - 1)); + if (pDestArrival->from_slot == 0 || + nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + memmove(&arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT) + n + 1], + &arrival[((i + 1) << MATCHES_PER_OFFSET_SHIFT) + n], + sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = 0; - pDestArrival->match_len = 0; - pDestArrival->num_literals = nNumLiterals; - pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_offset = 0; + pDestArrival->match_len = 0; + pDestArrival->num_literals = nNumLiterals; + pDestArrival->score = nScore; + pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + break; + } } } - nMatches = lzsa_find_matches_at(pCompressor, i, match, 8); + const lzsa_match *match = pCompressor->match + (i << 3); - for (m = 0; m < nMatches; m++) { + for (m = 0; m < 8 && match[m].length; m++) { int nMatchLen = match[m].length; int nMatchOffsetCost = lzsa_get_offset_cost_v1(match[m].offset); int nStartingMatchLen, k; @@ -221,13 +227,14 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost; int nCodingChoiceCost = nPrevCost + 8 /* token */ /* the actual cost of the literals themselves accumulates up the chain */ + nMatchOffsetCost + nMatchLenCost; + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 5; int exists = 0; if (!nFavorRatio && !arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals) nCodingChoiceCost += MODESWITCH_PENALTY; for (n = 0; - n < 3 && arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n].from_slot && arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n].cost <= nCodingChoiceCost; + n < NMATCHES_PER_OFFSET && arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n].from_slot && arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n].cost <= nCodingChoiceCost; n++) { if (lzsa_get_offset_cost_v1(arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n].rep_offset) == lzsa_get_offset_cost_v1(match[m].offset)) { exists = 1; @@ -235,11 +242,12 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt } } - for (n = 0; !exists && n < 3 /* we only need the literals + short match cost + long match cost cases */; n++) { + for (n = 0; !exists && n < NMATCHES_PER_OFFSET /* we only need the literals + short match cost + long match cost cases */; n++) { lzsa_arrival *pDestArrival = &arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n]; if (pDestArrival->from_slot == 0 || - nCodingChoiceCost <= pDestArrival->cost) { + nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { memmove(&arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n + 1], &arrival[((i + k) << MATCHES_PER_OFFSET_SHIFT) + n], sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); @@ -250,6 +258,7 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt pDestArrival->match_offset = match[m].offset; pDestArrival->match_len = k; pDestArrival->num_literals = 0; + pDestArrival->score = nScore; pDestArrival->rep_offset = match[m].offset; break; } @@ -260,12 +269,12 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt } lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_OFFSET_SHIFT) + 0]; - pCompressor->best_match[i].length = 0; - pCompressor->best_match[i].offset = 0; + pBestMatch[i].length = 0; + pBestMatch[i].offset = 0; while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { - pCompressor->best_match[end_arrival->from_pos].length = end_arrival->match_len; - pCompressor->best_match[end_arrival->from_pos].offset = end_arrival->match_offset; + pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; + pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_OFFSET_SHIFT) + (end_arrival->from_slot - 1)]; } @@ -276,18 +285,19 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, const int nSt * impacting the compression ratio * * @param pCompressor compression context + * @param pBestMatch optimal matches to emit * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes * * @return non-zero if the number of tokens was reduced, 0 if it wasn't */ -static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const int nStartOffset, const int nEndOffset) { +static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { int i; int nNumLiterals = 0; int nDidReduce = 0; for (i = nStartOffset; i < nEndOffset; ) { - lzsa_match *pMatch = pCompressor->best_match + i; + lzsa_match *pMatch = pBestMatch + i; if (pMatch->length >= MIN_MATCH_SIZE_V1) { if (pMatch->length <= 9 /* Don't waste time considering large matches, they will always win over literals */ && @@ -295,7 +305,7 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const in int nNextIndex = i + pMatch->length; int nNextLiterals = 0; - while (nNextIndex < nEndOffset && pCompressor->best_match[nNextIndex].length < MIN_MATCH_SIZE_V1) { + while (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length < MIN_MATCH_SIZE_V1) { nNextLiterals++; nNextIndex++; } @@ -309,7 +319,7 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const in int j; for (j = 0; j < nMatchLen; j++) { - pCompressor->best_match[i + j].length = 0; + pBestMatch[i + j].length = 0; } nDidReduce = 1; @@ -318,15 +328,15 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const in } if ((i + pMatch->length) < nEndOffset && pMatch->length >= LCP_MAX && - pMatch->offset && pMatch->offset <= 32 && pCompressor->best_match[i + pMatch->length].offset == pMatch->offset && (pMatch->length % pMatch->offset) == 0 && - (pMatch->length + pCompressor->best_match[i + pMatch->length].length) <= MAX_VARLEN) { + pMatch->offset && pMatch->offset <= 32 && pBestMatch[i + pMatch->length].offset == pMatch->offset && (pMatch->length % pMatch->offset) == 0 && + (pMatch->length + pBestMatch[i + pMatch->length].length) <= MAX_VARLEN) { int nMatchLen = pMatch->length; /* Join */ - pMatch->length += pCompressor->best_match[i + nMatchLen].length; - pCompressor->best_match[i + nMatchLen].offset = 0; - pCompressor->best_match[i + nMatchLen].length = -1; + pMatch->length += pBestMatch[i + nMatchLen].length; + pBestMatch[i + nMatchLen].offset = 0; + pBestMatch[i + nMatchLen].length = -1; continue; } @@ -342,10 +352,63 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const in return nDidReduce; } +/** + * Get compressed data block size + * + * @param pCompressor compression context + * @param pBestMatch optimal matches to emit + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * + * @return size of compressed data that will be written to output buffer + */ +static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { + int i; + int nNumLiterals = 0; + int nCompressedSize = 0; + + for (i = nStartOffset; i < nEndOffset; ) { + const lzsa_match *pMatch = pBestMatch + i; + + if (pMatch->length >= MIN_MATCH_SIZE_V1) { + int nMatchOffset = pMatch->offset; + int nMatchLen = pMatch->length; + int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V1; + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V1) ? LITERALS_RUN_LEN_V1 : nNumLiterals; + int nTokenMatchLen = (nEncodedMatchLen >= MATCH_RUN_LEN_V1) ? MATCH_RUN_LEN_V1 : nEncodedMatchLen; + int nTokenLongOffset = (nMatchOffset <= 256) ? 0x00 : 0x80; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v1(nNumLiterals) + (nNumLiterals << 3) + (nTokenLongOffset ? 16 : 8) /* match offset */ + lzsa_get_match_varlen_size_v1(nEncodedMatchLen); + + nCompressedSize += nCommandSize; + nNumLiterals = 0; + i += nMatchLen; + } + else { + nNumLiterals++; + i++; + } + } + + { + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V1) ? LITERALS_RUN_LEN_V1 : nNumLiterals; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v1(nNumLiterals) + (nNumLiterals << 3); + + nCompressedSize += nCommandSize; + nNumLiterals = 0; + } + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + nCompressedSize += 8 * 4; + } + + return nCompressedSize; +} + /** * Emit block of compressed data * * @param pCompressor compression context + * @param pBestMatch optimal matches to emit * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes @@ -354,14 +417,14 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const in * * @return size of compressed data in output buffer, or -1 if the data is uncompressible */ -static int lzsa_write_block_v1(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { +static int lzsa_write_block_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { int i; int nNumLiterals = 0; int nInFirstLiteralOffset = 0; int nOutOffset = 0; for (i = nStartOffset; i < nEndOffset; ) { - lzsa_match *pMatch = pCompressor->best_match + i; + const lzsa_match *pMatch = pBestMatch + i; if (pMatch->length >= MIN_MATCH_SIZE_V1) { int nMatchOffset = pMatch->offset; @@ -554,18 +617,42 @@ static int lzsa_write_raw_uncompressed_block_v1(lzsa_compressor *pCompressor, co * @return size of compressed data in output buffer, or -1 if the data is uncompressible */ int lzsa_optimize_and_write_block_v1(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { - int nResult; + int nResult, nBaseCompressedSize; - lzsa_optimize_forward_v1(pCompressor, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + /* Compress optimally without breaking ties in favor of less tokens */ + + lzsa_optimize_forward_v1(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */); int nDidReduce; int nPasses = 0; do { - nDidReduce = lzsa_optimize_command_count_v1(pCompressor, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nDidReduce = lzsa_optimize_command_count_v1(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); nPasses++; } while (nDidReduce && nPasses < 20); - nResult = lzsa_write_block_v1(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); + nBaseCompressedSize = lzsa_get_compressed_size_v1(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + lzsa_match *pBestMatch = pCompressor->best_match; + + if (nBaseCompressedSize > 0 && nInDataSize < 65536) { + int nReducedCompressedSize; + + /* Compress optimally and do break ties in favor of less tokens */ + lzsa_optimize_forward_v1(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */); + + nPasses = 0; + do { + nDidReduce = lzsa_optimize_command_count_v1(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nPasses++; + } while (nDidReduce && nPasses < 20); + + nReducedCompressedSize = lzsa_get_compressed_size_v1(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + if (nReducedCompressedSize > 0 && nReducedCompressedSize <= nBaseCompressedSize) { + /* Pick the parse with the reduced number of tokens as it didn't negatively affect the size */ + pBestMatch = pCompressor->improved_match; + } + } + + nResult = lzsa_write_block_v1(pCompressor, pBestMatch, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); if (nResult < 0 && pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { nResult = lzsa_write_raw_uncompressed_block_v1(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); } diff --git a/src/shrink_block_v2.c b/src/shrink_block_v2.c index 18f8657..987b7d5 100644 --- a/src/shrink_block_v2.c +++ b/src/shrink_block_v2.c @@ -934,17 +934,7 @@ static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, co * @return size of compressed data in output buffer, or -1 if the data is uncompressible */ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { - int i, nResult, nBaseCompressedSize; - - /* Find matches */ - for (i = nPreviousBlockSize; i != (nPreviousBlockSize + nInDataSize - 1); i++) { - int nMatches = lzsa_find_matches_at(pCompressor, i, pCompressor->match + (i << 5), 32); - while (nMatches < 32) { - pCompressor->match[(i << 5) + nMatches].offset = 0; - pCompressor->match[(i << 5) + nMatches].length = 0; - nMatches++; - } - } + int nResult, nBaseCompressedSize; /* Compress optimally without breaking ties in favor of less tokens */ diff --git a/src/shrink_context.c b/src/shrink_context.c index 74a7a16..a9bcc1e 100644 --- a/src/shrink_context.c +++ b/src/shrink_context.c @@ -95,17 +95,15 @@ int lzsa_compressor_init(lzsa_compressor *pCompressor, const int nMaxWindowSize, pCompressor->best_match = (lzsa_match *)malloc(nMaxWindowSize * sizeof(lzsa_match)); if (pCompressor->best_match) { - if (pCompressor->format_version == 2) { - pCompressor->improved_match = (lzsa_match *)malloc(nMaxWindowSize * sizeof(lzsa_match)); + pCompressor->improved_match = (lzsa_match *)malloc(nMaxWindowSize * sizeof(lzsa_match)); - if (pCompressor->improved_match) { + if (pCompressor->improved_match) { + if (pCompressor->format_version == 2) pCompressor->match = (lzsa_match *)malloc(nMaxWindowSize * 32 * sizeof(lzsa_match)); - if (pCompressor->match) - return 0; - } - } - else { - return 0; + else + pCompressor->match = (lzsa_match *)malloc(nMaxWindowSize * 8 * sizeof(lzsa_match)); + if (pCompressor->match) + return 0; } } } @@ -187,6 +185,7 @@ int lzsa_compressor_shrink_block(lzsa_compressor *pCompressor, unsigned char *pI if (nPreviousBlockSize) { lzsa_skip_matches(pCompressor, 0, nPreviousBlockSize); } + lzsa_find_all_matches(pCompressor, (pCompressor->format_version == 2) ? 32 : 8, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); if (pCompressor->format_version == 1) { nCompressedSize = lzsa_optimize_and_write_block_v1(pCompressor, pInWindow, nPreviousBlockSize, nInDataSize, pOutData, nMaxOutDataSize); From 2926ad843601a6b4a918017bbe9b65ef2ad9a3a6 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Mon, 21 Oct 2019 12:29:38 +0200 Subject: [PATCH 11/17] Remove unused #includes --- src/shrink_block_v1.c | 1 - src/shrink_block_v2.c | 1 - 2 files changed, 2 deletions(-) diff --git a/src/shrink_block_v1.c b/src/shrink_block_v1.c index 7a011ec..bafb6d9 100644 --- a/src/shrink_block_v1.c +++ b/src/shrink_block_v1.c @@ -35,7 +35,6 @@ #include "lib.h" #include "shrink_block_v1.h" #include "format.h" -#include "matchfinder.h" /** * Get the number of extra bits required to represent a literals length diff --git a/src/shrink_block_v2.c b/src/shrink_block_v2.c index 987b7d5..db7785d 100644 --- a/src/shrink_block_v2.c +++ b/src/shrink_block_v2.c @@ -35,7 +35,6 @@ #include "lib.h" #include "shrink_block_v2.h" #include "format.h" -#include "matchfinder.h" /** * Write 4-bit nibble to output (compressed) buffer From dbaa3fa921b0c02dc0ddf882ab72a1de6d38cba5 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Tue, 22 Oct 2019 12:36:41 +0200 Subject: [PATCH 12/17] Further increase LZSA2 ratio by ~0.1% on average --- shrink_block_v2.c | 1014 +++++++++++++++++++++++++++++++++++++++++++++ shrink_context.h | 175 ++++++++ 2 files changed, 1189 insertions(+) create mode 100644 shrink_block_v2.c create mode 100644 shrink_context.h diff --git a/shrink_block_v2.c b/shrink_block_v2.c new file mode 100644 index 0000000..4d23314 --- /dev/null +++ b/shrink_block_v2.c @@ -0,0 +1,1014 @@ +/* + * shrink_block_v2.c - LZSA2 block compressor implementation + * + * Copyright (C) 2019 Emmanuel Marty + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori + * + * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 + * With help, ideas, optimizations and speed measurements by spke + * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard + * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ + * + */ + +#include +#include +#include "lib.h" +#include "shrink_block_v2.h" +#include "format.h" + +/** + * Write 4-bit nibble to output (compressed) buffer + * + * @param pOutData pointer to output buffer + * @param nOutOffset current write index into output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * @param nCurNibbleOffset write index into output buffer, of current byte being filled with nibbles + * @param nCurFreeNibbles current number of free nibbles in byte + * @param nNibbleValue value to write (0..15) + */ +static int lzsa_write_nibble_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nNibbleValue) { + if (nOutOffset < 0) return -1; + + if ((*nCurNibbleOffset) == -1) { + if (nOutOffset >= nMaxOutDataSize) return -1; + (*nCurNibbleOffset) = nOutOffset; + (*nCurFreeNibbles) = 2; + pOutData[nOutOffset++] = 0; + } + + pOutData[*nCurNibbleOffset] = (pOutData[*nCurNibbleOffset] << 4) | (nNibbleValue & 0x0f); + (*nCurFreeNibbles)--; + if ((*nCurFreeNibbles) == 0) { + (*nCurNibbleOffset) = -1; + } + + return nOutOffset; +} + +/** + * Get the number of extra bits required to represent a literals length + * + * @param nLength literals length + * + * @return number of extra bits required + */ +static inline int lzsa_get_literals_varlen_size_v2(const int nLength) { + if (nLength < LITERALS_RUN_LEN_V2) { + return 0; + } + else { + if (nLength < (LITERALS_RUN_LEN_V2 + 15)) { + return 4; + } + else { + if (nLength < 256) + return 4+8; + else { + return 4+24; + } + } + } +} + +/** + * Write extra literals length bytes to output (compressed) buffer. The caller must first check that there is enough + * room to write the bytes. + * + * @param pOutData pointer to output buffer + * @param nOutOffset current write index into output buffer + * @param nLength literals length + */ +static inline int lzsa_write_literals_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { + if (nLength >= LITERALS_RUN_LEN_V2) { + if (nLength < (LITERALS_RUN_LEN_V2 + 15)) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - LITERALS_RUN_LEN_V2); + } + else { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); + if (nOutOffset < 0) return -1; + + if (nLength < 256) + pOutData[nOutOffset++] = nLength - 18; + else { + pOutData[nOutOffset++] = 239; + pOutData[nOutOffset++] = nLength & 0xff; + pOutData[nOutOffset++] = (nLength >> 8) & 0xff; + } + } + } + + return nOutOffset; +} + +/** + * Get the number of extra bits required to represent an encoded match length + * + * @param nLength encoded match length (actual match length - MIN_MATCH_SIZE_V2) + * + * @return number of extra bits required + */ +static inline int lzsa_get_match_varlen_size_v2(const int nLength) { + if (nLength < MATCH_RUN_LEN_V2) { + return 0; + } + else { + if (nLength < (MATCH_RUN_LEN_V2 + 15)) + return 4; + else { + if ((nLength + MIN_MATCH_SIZE_V2) < 256) + return 4+8; + else { + return 4 + 24; + } + } + } +} + +/** + * Write extra encoded match length bytes to output (compressed) buffer. The caller must first check that there is enough + * room to write the bytes. + * + * @param pOutData pointer to output buffer + * @param nOutOffset current write index into output buffer + * @param nLength encoded match length (actual match length - MIN_MATCH_SIZE_V2) + */ +static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { + if (nLength >= MATCH_RUN_LEN_V2) { + if (nLength < (MATCH_RUN_LEN_V2 + 15)) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - MATCH_RUN_LEN_V2); + } + else { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); + if (nOutOffset < 0) return -1; + + if ((nLength + MIN_MATCH_SIZE_V2) < 256) + pOutData[nOutOffset++] = nLength + MIN_MATCH_SIZE_V2 - 24; + else { + pOutData[nOutOffset++] = 233; + pOutData[nOutOffset++] = (nLength + MIN_MATCH_SIZE_V2) & 0xff; + pOutData[nOutOffset++] = ((nLength + MIN_MATCH_SIZE_V2) >> 8) & 0xff; + } + } + } + + return nOutOffset; +} + +/** + * Attempt to pick optimal matches using a forward arrivals parser, so as to produce the smallest possible output that decompresses to the same input + * + * @param pCompressor compression context + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * @param nInsertForwardReps non-zero to insert forward repmatch candidates, zero to use the previously inserted candidates + */ +static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce, const int nInsertForwardReps) { + lzsa_arrival *arrival = pCompressor->arrival; + const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; + const int nMinMatchSize = pCompressor->min_match_size; + const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); + int i, j, n; + + memset(arrival + (nStartOffset << MATCHES_PER_OFFSET_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset) << MATCHES_PER_OFFSET_SHIFT)); + + for (i = (nStartOffset << MATCHES_PER_OFFSET_SHIFT); i != (nEndOffset << MATCHES_PER_OFFSET_SHIFT); i++) { + arrival[i].cost = 0x40000000; + } + + arrival[nStartOffset << MATCHES_PER_OFFSET_SHIFT].from_slot = -1; + + for (i = nStartOffset; i != (nEndOffset - 1); i++) { + int m; + + for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { + const int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost & 0x3fffffff; + int nCodingChoiceCost = nPrevCost + 8 /* literal */; + int nNumLiterals = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals + 1; + + if (nNumLiterals == LITERALS_RUN_LEN_V2) { + nCodingChoiceCost += 4; + } + else if (nNumLiterals == (LITERALS_RUN_LEN_V2 + 15)) { + nCodingChoiceCost += 8; + } + else if (nNumLiterals == 256) { + nCodingChoiceCost += 16; + } + + if (!nFavorRatio && nNumLiterals == 1) + nCodingChoiceCost += MODESWITCH_PENALTY; + + lzsa_arrival *pDestSlots = &arrival[(i + 1) << MATCHES_PER_OFFSET_SHIFT]; + if (nCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { + int exists = 0; + for (n = 0; + n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nCodingChoiceCost; + n++) { + if (pDestSlots[n].rep_offset == arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset) { + exists = 1; + break; + } + } + + if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 1; + for (n = 0; n < NMATCHES_PER_OFFSET; n++) { + lzsa_arrival *pDestArrival = &pDestSlots[n]; + if (nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + + if (pDestArrival->from_slot) { + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); + } + + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_offset = 0; + pDestArrival->match_len = 0; + pDestArrival->num_literals = nNumLiterals; + pDestArrival->score = nScore; + pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + pDestArrival->rep_pos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; + pDestArrival->rep_len = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; + break; + } + } + } + } + } + + lzsa_match *match = pCompressor->match + (i << 5); + + for (m = 0; m < 32 && match[m].length; m++) { + int nMatchLen = match[m].length; + int nMatchOffset = match[m].offset; + int nNoRepmatchOffsetCost = (nMatchOffset <= 32) ? 4 : ((nMatchOffset <= 512) ? 8 : ((nMatchOffset <= (8192 + 512)) ? 12 : 16)); + int nStartingMatchLen, k; + int nMaxRepLen[NMATCHES_PER_OFFSET]; + + if ((i + nMatchLen) > (nEndOffset - LAST_LITERALS)) + nMatchLen = nEndOffset - LAST_LITERALS - i; + + for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { + int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + int nCurMaxRepLen = 0; + + if (nMatchOffset != nRepOffset && + nRepOffset && + i > nRepOffset && + (i - nRepOffset + nMatchLen) <= (nEndOffset - LAST_LITERALS)) { + while (nCurMaxRepLen < nMatchLen && pInWindow[i - nRepOffset + nCurMaxRepLen] == pInWindow[i - nMatchOffset + nCurMaxRepLen]) + nCurMaxRepLen++; + } + + nMaxRepLen[j] = nCurMaxRepLen; + } + while (j < NMATCHES_PER_OFFSET) + nMaxRepLen[j++] = 0; + + for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { + int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + + if (nMatchOffset != nRepOffset && nRepOffset && nInsertForwardReps && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len >= MIN_MATCH_SIZE_V2) { + int nRepPos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; + int nRepLen = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; + + if (nRepPos > nMatchOffset && + (nRepPos - nMatchOffset + nRepLen) <= (nEndOffset - LAST_LITERALS) && + !memcmp(pInWindow + nRepPos - nRepOffset, pInWindow + nRepPos - nMatchOffset, nRepLen)) { + + lzsa_match *fwd_match = pCompressor->match + (nRepPos << 5); + int exists = 0; + int r; + + for (r = 0; r < 32 && fwd_match[r].length >= MIN_MATCH_SIZE_V2; r++) { + if (fwd_match[r].offset == nMatchOffset) { + exists = 1; + break; + } + } + + if (!exists && r < 32) { + fwd_match[r].offset = nMatchOffset; + fwd_match[r].length = nRepLen; + } + } + } + } + + if (nMatchLen >= LEAVE_ALONE_MATCH_SIZE) + nStartingMatchLen = nMatchLen; + else + nStartingMatchLen = nMinMatchSize; + + for (k = nStartingMatchLen; k <= nMatchLen; k++) { + int nMatchLenCost = lzsa_get_match_varlen_size_v2(k - MIN_MATCH_SIZE_V2); + lzsa_arrival *pDestSlots = &arrival[(i + k) << MATCHES_PER_OFFSET_SHIFT]; + + for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { + const int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost & 0x3fffffff; + int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + + int nMatchOffsetCost = (nMatchOffset == nRepOffset) ? 0 : nNoRepmatchOffsetCost; + int nRepCodingChoiceCost = nPrevCost + 8 /* token */ /* the actual cost of the literals themselves accumulates up the chain */ + nMatchLenCost; + int nCodingChoiceCost = nRepCodingChoiceCost + nMatchOffsetCost; + + if (!nFavorRatio && !arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals) + nCodingChoiceCost += MODESWITCH_PENALTY; + + if (nRepCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { + if (nCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { + int exists = 0; + + for (n = 0; + n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nCodingChoiceCost; + n++) { + if (pDestSlots[n].rep_offset == nMatchOffset) { + exists = 1; + break; + } + } + + if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + ((nMatchOffset == nRepOffset) ? 2 : 3); + + for (n = 0; n < NMATCHES_PER_OFFSET; n++) { + lzsa_arrival *pDestArrival = &pDestSlots[n]; + + if (nCodingChoiceCost < pDestArrival->cost || + (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + if (pDestArrival->from_slot) { + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); + } + + pDestArrival->cost = nCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_offset = nMatchOffset; + pDestArrival->match_len = k; + pDestArrival->num_literals = 0; + pDestArrival->score = nScore; + pDestArrival->rep_offset = nMatchOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; + break; + } + } + } + } + + /* If this coding choice doesn't rep-match, see if we still get a match by using the current repmatch offset for this arrival. This can occur (and not have the + * matchfinder offer the offset in the first place, or have too many choices with the same cost to retain the repmatchable offset) when compressing regions + * of identical bytes, for instance. Checking for this provides a big compression win on some files. */ + + if (nMaxRepLen[j] >= k) { + int exists = 0; + + /* A match is possible at the rep offset; insert the extra coding choice. */ + + for (n = 0; + n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nRepCodingChoiceCost; + n++) { + if (pDestSlots[n].rep_offset == nRepOffset) { + exists = 1; + break; + } + } + + if (!exists) { + int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 2; + + for (n = 0; n < NMATCHES_PER_OFFSET; n++) { + lzsa_arrival *pDestArrival = &pDestSlots[n]; + + if (nRepCodingChoiceCost < pDestArrival->cost || + (nRepCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { + if (pDestArrival->from_slot) { + memmove(&pDestSlots[n + 1], + &pDestSlots[n], + sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); + } + + pDestArrival->cost = nRepCodingChoiceCost; + pDestArrival->from_pos = i; + pDestArrival->from_slot = j + 1; + pDestArrival->match_offset = nRepOffset; + pDestArrival->match_len = k; + pDestArrival->num_literals = 0; + pDestArrival->score = nScore; + pDestArrival->rep_offset = nRepOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; + break; + } + } + } + } + } + } + } + } + } + + lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_OFFSET_SHIFT) + 0]; + pBestMatch[i].length = 0; + pBestMatch[i].offset = 0; + + while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { + pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; + pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; + end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_OFFSET_SHIFT) + (end_arrival->from_slot - 1)]; + } +} + +/** + * Attempt to minimize the number of commands issued in the compressed data block, in order to speed up decompression without + * impacting the compression ratio + * + * @param pCompressor compression context + * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) + * @param pBestMatch optimal matches to evaluate and update + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * + * @return non-zero if the number of tokens was reduced, 0 if it wasn't + */ +static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { + int i; + int nNumLiterals = 0; + int nPrevRepMatchOffset = 0; + int nRepMatchOffset = 0; + int nRepMatchLen = 0; + int nRepIndex = 0; + int nDidReduce = 0; + + for (i = nStartOffset; i < nEndOffset; ) { + lzsa_match *pMatch = pBestMatch + i; + + if (pMatch->length >= MIN_MATCH_SIZE_V2) { + if ((i + pMatch->length) < nEndOffset /* Don't consider the last match in the block, we can only reduce a match inbetween other tokens */) { + int nNextIndex = i + pMatch->length; + int nNextLiterals = 0; + + while (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length < MIN_MATCH_SIZE_V2) { + nNextLiterals++; + nNextIndex++; + } + + if (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length >= MIN_MATCH_SIZE_V2) { + /* This command is a match, is followed by 'nNextLiterals' literals and then by another match */ + + if (nRepMatchOffset && pMatch->offset != nRepMatchOffset && (pBestMatch[nNextIndex].offset != pMatch->offset || pBestMatch[nNextIndex].offset == nRepMatchOffset || + ((pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16))) > + ((pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16))))) { + /* Check if we can change the current match's offset to be the same as the previous match's offset, and get an extra repmatch. This will occur when + * matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the + * ratio (the forward arrivals parser already has this covered). */ + if (i > nRepMatchOffset && + (i - nRepMatchOffset + pMatch->length) <= (nEndOffset - LAST_LITERALS) && + !memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i - pMatch->offset, pMatch->length)) { + pMatch->offset = nRepMatchOffset; + nDidReduce = 1; + } + } + + if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) { + /* Otherwise, try to gain a match forward as well */ + if (i > pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= (nEndOffset - LAST_LITERALS)) { + int nMaxLen = 0; + while (nMaxLen < pMatch->length && pInWindow[i - pBestMatch[nNextIndex].offset + nMaxLen] == pInWindow[i - pMatch->offset + nMaxLen]) + nMaxLen++; + if (nMaxLen >= pMatch->length) { + /* Replace */ + pMatch->offset = pBestMatch[nNextIndex].offset; + nDidReduce = 1; + } + else if (nMaxLen >= 2 && pMatch->offset != nRepMatchOffset) { + int nPartialSizeBefore, nPartialSizeAfter; + + nPartialSizeBefore = lzsa_get_match_varlen_size_v2(pMatch->length - MIN_MATCH_SIZE_V2); + nPartialSizeBefore += (pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16)); + nPartialSizeBefore += lzsa_get_literals_varlen_size_v2(nNextLiterals); + + nPartialSizeAfter = lzsa_get_match_varlen_size_v2(nMaxLen - MIN_MATCH_SIZE_V2); + nPartialSizeAfter += lzsa_get_literals_varlen_size_v2(nNextLiterals + (pMatch->length - nMaxLen)) + ((pMatch->length - nMaxLen) << 3); + + if (nPartialSizeAfter < nPartialSizeBefore) { + int j; + + /* We gain a repmatch that is shorter than the original match as this is the best we can do, so it is followed by extra literals, but + * we have calculated that this is shorter */ + pMatch->offset = pBestMatch[nNextIndex].offset; + for (j = nMaxLen; j < pMatch->length; j++) { + pBestMatch[i + j].length = 0; + } + pMatch->length = nMaxLen; + nDidReduce = 1; + } + } + } + } + + if (pMatch->length < 9 /* Don't waste time considering large matches, they will always win over literals */) { + /* Calculate this command's current cost (excluding 'nNumLiterals' bytes) */ + + int nCurCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + lzsa_get_match_varlen_size_v2(pMatch->length - MIN_MATCH_SIZE_V2); + if (pMatch->offset != nRepMatchOffset) + nCurCommandSize += (pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16)); + + /* Calculate the next command's current cost */ + int nNextCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); + if (pBestMatch[nNextIndex].offset != pMatch->offset) + nNextCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); + + int nOriginalCombinedCommandSize = nCurCommandSize + nNextCommandSize; + + /* Calculate the cost of replacing this match command by literals + the next command with the cost of encoding these literals (excluding 'nNumLiterals' bytes) */ + int nReducedCommandSize = (pMatch->length << 3) + 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals + pMatch->length + nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); + if (pBestMatch[nNextIndex].offset != nRepMatchOffset) + nReducedCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); + + int nReplaceRepOffset = 0; + if (nRepMatchOffset && nRepMatchOffset != nPrevRepMatchOffset && nRepMatchLen >= MIN_MATCH_SIZE_V2 && nRepMatchOffset != pBestMatch[nNextIndex].offset && nRepIndex > pBestMatch[nNextIndex].offset && + (nRepIndex - pBestMatch[nNextIndex].offset + nRepMatchLen) <= (nEndOffset - LAST_LITERALS) && + !memcmp(pInWindow + nRepIndex - nRepMatchOffset, pInWindow + nRepIndex - pBestMatch[nNextIndex].offset, nRepMatchLen)) { + /* Replacing this match command by literals would let us create a repmatch */ + nReplaceRepOffset = 1; + nReducedCommandSize -= (nRepMatchOffset <= 32) ? 4 : ((nRepMatchOffset <= 512) ? 8 : ((nRepMatchOffset <= (8192 + 512)) ? 12 : 16)); + } + + if (nOriginalCombinedCommandSize >= nReducedCommandSize) { + /* Reduce */ + int nMatchLen = pMatch->length; + int j; + + for (j = 0; j < nMatchLen; j++) { + pBestMatch[i + j].length = 0; + } + + nDidReduce = 1; + + if (nReplaceRepOffset) { + pBestMatch[nRepIndex].offset = pBestMatch[nNextIndex].offset; + nRepMatchOffset = pBestMatch[nNextIndex].offset; + } + continue; + } + } + } + } + + if ((i + pMatch->length) < nEndOffset && pMatch->length >= LCP_MAX && + pMatch->offset && pMatch->offset <= 32 && pBestMatch[i + pMatch->length].offset == pMatch->offset && (pMatch->length % pMatch->offset) == 0 && + (pMatch->length + pBestMatch[i + pMatch->length].length) <= MAX_VARLEN) { + int nMatchLen = pMatch->length; + + /* Join */ + + pMatch->length += pBestMatch[i + nMatchLen].length; + pBestMatch[i + nMatchLen].offset = 0; + pBestMatch[i + nMatchLen].length = -1; + nDidReduce = 1; + continue; + } + + nPrevRepMatchOffset = nRepMatchOffset; + nRepMatchOffset = pMatch->offset; + nRepMatchLen = pMatch->length; + nRepIndex = i; + + i += pMatch->length; + nNumLiterals = 0; + } + else { + nNumLiterals++; + i++; + } + } + + return nDidReduce; +} + +/** + * Get compressed data block size + * + * @param pCompressor compression context + * @param pBestMatch optimal matches to emit + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * + * @return size of compressed data that will be written to output buffer + */ +static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { + int i; + int nNumLiterals = 0; + int nOutOffset = 0; + int nRepMatchOffset = 0; + int nCompressedSize = 0; + + for (i = nStartOffset; i < nEndOffset; ) { + const lzsa_match *pMatch = pBestMatch + i; + + if (pMatch->length >= MIN_MATCH_SIZE_V2) { + int nMatchOffset = pMatch->offset; + int nMatchLen = pMatch->length; + int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V2; + int nOffsetSize; + + if (nMatchOffset == nRepMatchOffset) { + nOffsetSize = 0; + } + else { + if (nMatchOffset <= 32) { + nOffsetSize = 4; + } + else if (nMatchOffset <= 512) { + nOffsetSize = 8; + } + else if (nMatchOffset <= (8192 + 512)) { + nOffsetSize = 12; + } + else { + nOffsetSize = 16; + } + } + + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + nOffsetSize /* match offset */ + lzsa_get_match_varlen_size_v2(nEncodedMatchLen); + nCompressedSize += nCommandSize; + + nNumLiterals = 0; + nRepMatchOffset = nMatchOffset; + i += nMatchLen; + } + else { + nNumLiterals++; + i++; + } + } + + { + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); + + nCompressedSize += nCommandSize; + nNumLiterals = 0; + } + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + nCompressedSize += (8 + 4 + 8); + } + + return nCompressedSize; +} + +/** + * Emit block of compressed data + * + * @param pCompressor compression context + * @param pBestMatch optimal matches to emit + * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * @param pOutData pointer to output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * + * @return size of compressed data in output buffer, or -1 if the data is uncompressible + */ +static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { + int i; + int nNumLiterals = 0; + int nInFirstLiteralOffset = 0; + int nOutOffset = 0; + int nCurNibbleOffset = -1, nCurFreeNibbles = 0; + int nRepMatchOffset = 0; + + for (i = nStartOffset; i < nEndOffset; ) { + const lzsa_match *pMatch = pBestMatch + i; + + if (pMatch->length >= MIN_MATCH_SIZE_V2) { + int nMatchOffset = pMatch->offset; + int nMatchLen = pMatch->length; + int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V2; + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; + int nTokenMatchLen = (nEncodedMatchLen >= MATCH_RUN_LEN_V2) ? MATCH_RUN_LEN_V2 : nEncodedMatchLen; + int nTokenOffsetMode; + int nOffsetSize; + + if (nMatchOffset == nRepMatchOffset) { + nTokenOffsetMode = 0xe0; + nOffsetSize = 0; + } + else { + if (nMatchOffset <= 32) { + nTokenOffsetMode = 0x00 | ((((-nMatchOffset) & 0x01) << 5) ^ 0x20); + nOffsetSize = 4; + } + else if (nMatchOffset <= 512) { + nTokenOffsetMode = 0x40 | ((((-nMatchOffset) & 0x100) >> 3) ^ 0x20); + nOffsetSize = 8; + } + else if (nMatchOffset <= (8192 + 512)) { + nTokenOffsetMode = 0x80 | ((((-(nMatchOffset - 512)) & 0x0100) >> 3) ^ 0x20); + nOffsetSize = 12; + } + else { + nTokenOffsetMode = 0xc0; + nOffsetSize = 16; + } + } + + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + nOffsetSize /* match offset */ + lzsa_get_match_varlen_size_v2(nEncodedMatchLen); + + if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) + return -1; + if (nMatchOffset < MIN_OFFSET || nMatchOffset > MAX_OFFSET) + return -1; + + pOutData[nOutOffset++] = nTokenOffsetMode | (nTokenLiteralsLen << 3) | nTokenMatchLen; + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + if (nOutOffset < 0) return -1; + + if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) + pCompressor->stats.min_literals = nNumLiterals; + if (nNumLiterals > pCompressor->stats.max_literals) + pCompressor->stats.max_literals = nNumLiterals; + pCompressor->stats.total_literals += nNumLiterals; + pCompressor->stats.literals_divisor++; + + if (nNumLiterals != 0) { + memcpy(pOutData + nOutOffset, pInWindow + nInFirstLiteralOffset, nNumLiterals); + nOutOffset += nNumLiterals; + nNumLiterals = 0; + } + + if (nTokenOffsetMode == 0x00 || nTokenOffsetMode == 0x20) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-nMatchOffset) & 0x1e) >> 1); + if (nOutOffset < 0) return -1; + } + else if (nTokenOffsetMode == 0x40 || nTokenOffsetMode == 0x60) { + pOutData[nOutOffset++] = (-nMatchOffset) & 0xff; + } + else if (nTokenOffsetMode == 0x80 || nTokenOffsetMode == 0xa0) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-(nMatchOffset - 512)) >> 9) & 0x0f); + if (nOutOffset < 0) return -1; + pOutData[nOutOffset++] = (-(nMatchOffset - 512)) & 0xff; + } + else if (nTokenOffsetMode == 0xc0) { + pOutData[nOutOffset++] = (-nMatchOffset) >> 8; + pOutData[nOutOffset++] = (-nMatchOffset) & 0xff; + } + + if (nMatchOffset == nRepMatchOffset) + pCompressor->stats.num_rep_offsets++; + + nRepMatchOffset = nMatchOffset; + + nOutOffset = lzsa_write_match_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nEncodedMatchLen); + if (nOutOffset < 0) return -1; + + if (nMatchOffset < pCompressor->stats.min_offset || pCompressor->stats.min_offset == -1) + pCompressor->stats.min_offset = nMatchOffset; + if (nMatchOffset > pCompressor->stats.max_offset) + pCompressor->stats.max_offset = nMatchOffset; + pCompressor->stats.total_offsets += nMatchOffset; + + if (nMatchLen < pCompressor->stats.min_match_len || pCompressor->stats.min_match_len == -1) + pCompressor->stats.min_match_len = nMatchLen; + if (nMatchLen > pCompressor->stats.max_match_len) + pCompressor->stats.max_match_len = nMatchLen; + pCompressor->stats.total_match_lens += nMatchLen; + pCompressor->stats.match_divisor++; + + if (nMatchOffset == 1) { + if (nMatchLen < pCompressor->stats.min_rle1_len || pCompressor->stats.min_rle1_len == -1) + pCompressor->stats.min_rle1_len = nMatchLen; + if (nMatchLen > pCompressor->stats.max_rle1_len) + pCompressor->stats.max_rle1_len = nMatchLen; + pCompressor->stats.total_rle1_lens += nMatchLen; + pCompressor->stats.rle1_divisor++; + } + else if (nMatchOffset == 2) { + if (nMatchLen < pCompressor->stats.min_rle2_len || pCompressor->stats.min_rle2_len == -1) + pCompressor->stats.min_rle2_len = nMatchLen; + if (nMatchLen > pCompressor->stats.max_rle2_len) + pCompressor->stats.max_rle2_len = nMatchLen; + pCompressor->stats.total_rle2_lens += nMatchLen; + pCompressor->stats.rle2_divisor++; + } + + i += nMatchLen; + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + int nCurSafeDist = (i - nStartOffset) - nOutOffset; + if (nCurSafeDist >= 0 && pCompressor->safe_dist < nCurSafeDist) + pCompressor->safe_dist = nCurSafeDist; + } + + pCompressor->num_commands++; + } + else { + if (nNumLiterals == 0) + nInFirstLiteralOffset = i; + nNumLiterals++; + i++; + } + } + + { + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); + + if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) + return -1; + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) + pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; + else + pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x00; + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + if (nOutOffset < 0) return -1; + + if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) + pCompressor->stats.min_literals = nNumLiterals; + if (nNumLiterals > pCompressor->stats.max_literals) + pCompressor->stats.max_literals = nNumLiterals; + pCompressor->stats.total_literals += nNumLiterals; + pCompressor->stats.literals_divisor++; + + if (nNumLiterals != 0) { + memcpy(pOutData + nOutOffset, pInWindow + nInFirstLiteralOffset, nNumLiterals); + nOutOffset += nNumLiterals; + nNumLiterals = 0; + } + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + int nCurSafeDist = (i - nStartOffset) - nOutOffset; + if (nCurSafeDist >= 0 && pCompressor->safe_dist < nCurSafeDist) + pCompressor->safe_dist = nCurSafeDist; + } + + pCompressor->num_commands++; + } + + if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + /* Emit EOD marker for raw block */ + + if (nOutOffset >= nMaxOutDataSize) + return -1; + pOutData[nOutOffset++] = 0; /* Match offset */ + + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ + if (nOutOffset < 0) return -1; + + if ((nOutOffset + 1) > nMaxOutDataSize) + return -1; + + pOutData[nOutOffset++] = 232; /* EOD match length byte */ + } + + if (nCurNibbleOffset != -1) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); + if (nOutOffset < 0 || nCurNibbleOffset != -1) + return -1; + } + + return nOutOffset; +} + +/** + * Emit raw block of uncompressible data + * + * @param pCompressor compression context + * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) + * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) + * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * @param pOutData pointer to output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * + * @return size of compressed data in output buffer, or -1 if the data is uncompressible + */ +static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { + int nCurNibbleOffset = -1, nCurFreeNibbles = 0; + int nNumLiterals = nEndOffset - nStartOffset; + int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; + int nOutOffset = 0; + + int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + 8 + 4 + 8; + if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) + return -1; + + pCompressor->num_commands = 0; + pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; + + nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); + if (nOutOffset < 0) return -1; + + if (nNumLiterals != 0) { + memcpy(pOutData + nOutOffset, pInWindow + nStartOffset, nNumLiterals); + nOutOffset += nNumLiterals; + nNumLiterals = 0; + } + + /* Emit EOD marker for raw block */ + + pOutData[nOutOffset++] = 0; /* Match offset */ + + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ + if (nOutOffset < 0) return -1; + + if ((nOutOffset + 1) > nMaxOutDataSize) + return -1; + + pOutData[nOutOffset++] = 232; /* EOD match length byte */ + + pCompressor->num_commands++; + + if (nCurNibbleOffset != -1) { + nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); + if (nOutOffset < 0 || nCurNibbleOffset != -1) + return -1; + } + + return nOutOffset; +} + +/** + * Select the most optimal matches, reduce the token count if possible, and then emit a block of compressed LZSA2 data + * + * @param pCompressor compression context + * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) + * @param nPreviousBlockSize number of previously compressed bytes (or 0 for none) + * @param nInDataSize number of input bytes to compress + * @param pOutData pointer to output buffer + * @param nMaxOutDataSize maximum size of output buffer, in bytes + * + * @return size of compressed data in output buffer, or -1 if the data is uncompressible + */ +int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { + int nResult, nBaseCompressedSize; + + /* Compress optimally without breaking ties in favor of less tokens */ + + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */, (nInDataSize < 65536) ? 1 : 0 /* insert forward reps */); + + int nDidReduce; + int nPasses = 0; + do { + nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nPasses++; + } while (nDidReduce && nPasses < 20); + + nBaseCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + lzsa_match *pBestMatch = pCompressor->best_match; + + if (nBaseCompressedSize > 0 && nInDataSize < 65536) { + int nReducedCompressedSize; + + /* Compress optimally and do break ties in favor of less tokens */ + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */); + + nPasses = 0; + do { + nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + nPasses++; + } while (nDidReduce && nPasses < 20); + + nReducedCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); + if (nReducedCompressedSize > 0 && nReducedCompressedSize <= nBaseCompressedSize) { + /* Pick the parse with the reduced number of tokens as it didn't negatively affect the size */ + pBestMatch = pCompressor->improved_match; + } + } + + nResult = lzsa_write_block_v2(pCompressor, pBestMatch, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); + if (nResult < 0 && pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { + nResult = lzsa_write_raw_uncompressed_block_v2(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); + } + + return nResult; +} diff --git a/shrink_context.h b/shrink_context.h new file mode 100644 index 0000000..5872b59 --- /dev/null +++ b/shrink_context.h @@ -0,0 +1,175 @@ +/* + * shrink_context.h - compression context definitions + * + * Copyright (C) 2019 Emmanuel Marty + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori + * + * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 + * With help, ideas, optimizations and speed measurements by spke + * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard + * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ + * + */ + +#ifndef _SHRINK_CONTEXT_H +#define _SHRINK_CONTEXT_H + +#include "divsufsort.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LCP_BITS 14 +#define TAG_BITS 3 +#define LCP_MAX (1U<<(LCP_BITS - TAG_BITS - 1)) +#define LCP_AND_TAG_MAX (1U<<(LCP_BITS - 1)) +#define LCP_SHIFT (31-LCP_BITS) +#define LCP_MASK (((1U< Date: Tue, 22 Oct 2019 12:37:04 +0200 Subject: [PATCH 13/17] Delete shrink_block_v2.c --- shrink_block_v2.c | 1014 --------------------------------------------- 1 file changed, 1014 deletions(-) delete mode 100644 shrink_block_v2.c diff --git a/shrink_block_v2.c b/shrink_block_v2.c deleted file mode 100644 index 4d23314..0000000 --- a/shrink_block_v2.c +++ /dev/null @@ -1,1014 +0,0 @@ -/* - * shrink_block_v2.c - LZSA2 block compressor implementation - * - * Copyright (C) 2019 Emmanuel Marty - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgment in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori - * - * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 - * With help, ideas, optimizations and speed measurements by spke - * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard - * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ - * - */ - -#include -#include -#include "lib.h" -#include "shrink_block_v2.h" -#include "format.h" - -/** - * Write 4-bit nibble to output (compressed) buffer - * - * @param pOutData pointer to output buffer - * @param nOutOffset current write index into output buffer - * @param nMaxOutDataSize maximum size of output buffer, in bytes - * @param nCurNibbleOffset write index into output buffer, of current byte being filled with nibbles - * @param nCurFreeNibbles current number of free nibbles in byte - * @param nNibbleValue value to write (0..15) - */ -static int lzsa_write_nibble_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nNibbleValue) { - if (nOutOffset < 0) return -1; - - if ((*nCurNibbleOffset) == -1) { - if (nOutOffset >= nMaxOutDataSize) return -1; - (*nCurNibbleOffset) = nOutOffset; - (*nCurFreeNibbles) = 2; - pOutData[nOutOffset++] = 0; - } - - pOutData[*nCurNibbleOffset] = (pOutData[*nCurNibbleOffset] << 4) | (nNibbleValue & 0x0f); - (*nCurFreeNibbles)--; - if ((*nCurFreeNibbles) == 0) { - (*nCurNibbleOffset) = -1; - } - - return nOutOffset; -} - -/** - * Get the number of extra bits required to represent a literals length - * - * @param nLength literals length - * - * @return number of extra bits required - */ -static inline int lzsa_get_literals_varlen_size_v2(const int nLength) { - if (nLength < LITERALS_RUN_LEN_V2) { - return 0; - } - else { - if (nLength < (LITERALS_RUN_LEN_V2 + 15)) { - return 4; - } - else { - if (nLength < 256) - return 4+8; - else { - return 4+24; - } - } - } -} - -/** - * Write extra literals length bytes to output (compressed) buffer. The caller must first check that there is enough - * room to write the bytes. - * - * @param pOutData pointer to output buffer - * @param nOutOffset current write index into output buffer - * @param nLength literals length - */ -static inline int lzsa_write_literals_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { - if (nLength >= LITERALS_RUN_LEN_V2) { - if (nLength < (LITERALS_RUN_LEN_V2 + 15)) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - LITERALS_RUN_LEN_V2); - } - else { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); - if (nOutOffset < 0) return -1; - - if (nLength < 256) - pOutData[nOutOffset++] = nLength - 18; - else { - pOutData[nOutOffset++] = 239; - pOutData[nOutOffset++] = nLength & 0xff; - pOutData[nOutOffset++] = (nLength >> 8) & 0xff; - } - } - } - - return nOutOffset; -} - -/** - * Get the number of extra bits required to represent an encoded match length - * - * @param nLength encoded match length (actual match length - MIN_MATCH_SIZE_V2) - * - * @return number of extra bits required - */ -static inline int lzsa_get_match_varlen_size_v2(const int nLength) { - if (nLength < MATCH_RUN_LEN_V2) { - return 0; - } - else { - if (nLength < (MATCH_RUN_LEN_V2 + 15)) - return 4; - else { - if ((nLength + MIN_MATCH_SIZE_V2) < 256) - return 4+8; - else { - return 4 + 24; - } - } - } -} - -/** - * Write extra encoded match length bytes to output (compressed) buffer. The caller must first check that there is enough - * room to write the bytes. - * - * @param pOutData pointer to output buffer - * @param nOutOffset current write index into output buffer - * @param nLength encoded match length (actual match length - MIN_MATCH_SIZE_V2) - */ -static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOffset, const int nMaxOutDataSize, int *nCurNibbleOffset, int *nCurFreeNibbles, int nLength) { - if (nLength >= MATCH_RUN_LEN_V2) { - if (nLength < (MATCH_RUN_LEN_V2 + 15)) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, nLength - MATCH_RUN_LEN_V2); - } - else { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, nCurNibbleOffset, nCurFreeNibbles, 15); - if (nOutOffset < 0) return -1; - - if ((nLength + MIN_MATCH_SIZE_V2) < 256) - pOutData[nOutOffset++] = nLength + MIN_MATCH_SIZE_V2 - 24; - else { - pOutData[nOutOffset++] = 233; - pOutData[nOutOffset++] = (nLength + MIN_MATCH_SIZE_V2) & 0xff; - pOutData[nOutOffset++] = ((nLength + MIN_MATCH_SIZE_V2) >> 8) & 0xff; - } - } - } - - return nOutOffset; -} - -/** - * Attempt to pick optimal matches using a forward arrivals parser, so as to produce the smallest possible output that decompresses to the same input - * - * @param pCompressor compression context - * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) - * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * @param nInsertForwardReps non-zero to insert forward repmatch candidates, zero to use the previously inserted candidates - */ -static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce, const int nInsertForwardReps) { - lzsa_arrival *arrival = pCompressor->arrival; - const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; - const int nMinMatchSize = pCompressor->min_match_size; - const int nDisableScore = nReduce ? 0 : (2 * BLOCK_SIZE); - int i, j, n; - - memset(arrival + (nStartOffset << MATCHES_PER_OFFSET_SHIFT), 0, sizeof(lzsa_arrival) * ((nEndOffset - nStartOffset) << MATCHES_PER_OFFSET_SHIFT)); - - for (i = (nStartOffset << MATCHES_PER_OFFSET_SHIFT); i != (nEndOffset << MATCHES_PER_OFFSET_SHIFT); i++) { - arrival[i].cost = 0x40000000; - } - - arrival[nStartOffset << MATCHES_PER_OFFSET_SHIFT].from_slot = -1; - - for (i = nStartOffset; i != (nEndOffset - 1); i++) { - int m; - - for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { - const int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost & 0x3fffffff; - int nCodingChoiceCost = nPrevCost + 8 /* literal */; - int nNumLiterals = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals + 1; - - if (nNumLiterals == LITERALS_RUN_LEN_V2) { - nCodingChoiceCost += 4; - } - else if (nNumLiterals == (LITERALS_RUN_LEN_V2 + 15)) { - nCodingChoiceCost += 8; - } - else if (nNumLiterals == 256) { - nCodingChoiceCost += 16; - } - - if (!nFavorRatio && nNumLiterals == 1) - nCodingChoiceCost += MODESWITCH_PENALTY; - - lzsa_arrival *pDestSlots = &arrival[(i + 1) << MATCHES_PER_OFFSET_SHIFT]; - if (nCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { - int exists = 0; - for (n = 0; - n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nCodingChoiceCost; - n++) { - if (pDestSlots[n].rep_offset == arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset) { - exists = 1; - break; - } - } - - if (!exists) { - int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 1; - for (n = 0; n < NMATCHES_PER_OFFSET; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; - if (nCodingChoiceCost < pDestArrival->cost || - (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - - if (pDestArrival->from_slot) { - memmove(&pDestSlots[n + 1], - &pDestSlots[n], - sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); - } - - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = 0; - pDestArrival->match_len = 0; - pDestArrival->num_literals = nNumLiterals; - pDestArrival->score = nScore; - pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; - pDestArrival->rep_pos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; - pDestArrival->rep_len = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; - break; - } - } - } - } - } - - lzsa_match *match = pCompressor->match + (i << 5); - - for (m = 0; m < 32 && match[m].length; m++) { - int nMatchLen = match[m].length; - int nMatchOffset = match[m].offset; - int nNoRepmatchOffsetCost = (nMatchOffset <= 32) ? 4 : ((nMatchOffset <= 512) ? 8 : ((nMatchOffset <= (8192 + 512)) ? 12 : 16)); - int nStartingMatchLen, k; - int nMaxRepLen[NMATCHES_PER_OFFSET]; - - if ((i + nMatchLen) > (nEndOffset - LAST_LITERALS)) - nMatchLen = nEndOffset - LAST_LITERALS - i; - - for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { - int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; - int nCurMaxRepLen = 0; - - if (nMatchOffset != nRepOffset && - nRepOffset && - i > nRepOffset && - (i - nRepOffset + nMatchLen) <= (nEndOffset - LAST_LITERALS)) { - while (nCurMaxRepLen < nMatchLen && pInWindow[i - nRepOffset + nCurMaxRepLen] == pInWindow[i - nMatchOffset + nCurMaxRepLen]) - nCurMaxRepLen++; - } - - nMaxRepLen[j] = nCurMaxRepLen; - } - while (j < NMATCHES_PER_OFFSET) - nMaxRepLen[j++] = 0; - - for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { - int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; - - if (nMatchOffset != nRepOffset && nRepOffset && nInsertForwardReps && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len >= MIN_MATCH_SIZE_V2) { - int nRepPos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; - int nRepLen = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; - - if (nRepPos > nMatchOffset && - (nRepPos - nMatchOffset + nRepLen) <= (nEndOffset - LAST_LITERALS) && - !memcmp(pInWindow + nRepPos - nRepOffset, pInWindow + nRepPos - nMatchOffset, nRepLen)) { - - lzsa_match *fwd_match = pCompressor->match + (nRepPos << 5); - int exists = 0; - int r; - - for (r = 0; r < 32 && fwd_match[r].length >= MIN_MATCH_SIZE_V2; r++) { - if (fwd_match[r].offset == nMatchOffset) { - exists = 1; - break; - } - } - - if (!exists && r < 32) { - fwd_match[r].offset = nMatchOffset; - fwd_match[r].length = nRepLen; - } - } - } - } - - if (nMatchLen >= LEAVE_ALONE_MATCH_SIZE) - nStartingMatchLen = nMatchLen; - else - nStartingMatchLen = nMinMatchSize; - - for (k = nStartingMatchLen; k <= nMatchLen; k++) { - int nMatchLenCost = lzsa_get_match_varlen_size_v2(k - MIN_MATCH_SIZE_V2); - lzsa_arrival *pDestSlots = &arrival[(i + k) << MATCHES_PER_OFFSET_SHIFT]; - - for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { - const int nPrevCost = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].cost & 0x3fffffff; - int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; - - int nMatchOffsetCost = (nMatchOffset == nRepOffset) ? 0 : nNoRepmatchOffsetCost; - int nRepCodingChoiceCost = nPrevCost + 8 /* token */ /* the actual cost of the literals themselves accumulates up the chain */ + nMatchLenCost; - int nCodingChoiceCost = nRepCodingChoiceCost + nMatchOffsetCost; - - if (!nFavorRatio && !arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].num_literals) - nCodingChoiceCost += MODESWITCH_PENALTY; - - if (nRepCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { - if (nCodingChoiceCost <= pDestSlots[NMATCHES_PER_OFFSET - 1].cost) { - int exists = 0; - - for (n = 0; - n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nCodingChoiceCost; - n++) { - if (pDestSlots[n].rep_offset == nMatchOffset) { - exists = 1; - break; - } - } - - if (!exists) { - int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + ((nMatchOffset == nRepOffset) ? 2 : 3); - - for (n = 0; n < NMATCHES_PER_OFFSET; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; - - if (nCodingChoiceCost < pDestArrival->cost || - (nCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - if (pDestArrival->from_slot) { - memmove(&pDestSlots[n + 1], - &pDestSlots[n], - sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); - } - - pDestArrival->cost = nCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = nMatchOffset; - pDestArrival->match_len = k; - pDestArrival->num_literals = 0; - pDestArrival->score = nScore; - pDestArrival->rep_offset = nMatchOffset; - pDestArrival->rep_pos = i; - pDestArrival->rep_len = k; - break; - } - } - } - } - - /* If this coding choice doesn't rep-match, see if we still get a match by using the current repmatch offset for this arrival. This can occur (and not have the - * matchfinder offer the offset in the first place, or have too many choices with the same cost to retain the repmatchable offset) when compressing regions - * of identical bytes, for instance. Checking for this provides a big compression win on some files. */ - - if (nMaxRepLen[j] >= k) { - int exists = 0; - - /* A match is possible at the rep offset; insert the extra coding choice. */ - - for (n = 0; - n < NMATCHES_PER_OFFSET && pDestSlots[n].cost <= nRepCodingChoiceCost; - n++) { - if (pDestSlots[n].rep_offset == nRepOffset) { - exists = 1; - break; - } - } - - if (!exists) { - int nScore = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].score + 2; - - for (n = 0; n < NMATCHES_PER_OFFSET; n++) { - lzsa_arrival *pDestArrival = &pDestSlots[n]; - - if (nRepCodingChoiceCost < pDestArrival->cost || - (nRepCodingChoiceCost == pDestArrival->cost && nScore < (pDestArrival->score + nDisableScore))) { - if (pDestArrival->from_slot) { - memmove(&pDestSlots[n + 1], - &pDestSlots[n], - sizeof(lzsa_arrival) * (NMATCHES_PER_OFFSET - n - 1)); - } - - pDestArrival->cost = nRepCodingChoiceCost; - pDestArrival->from_pos = i; - pDestArrival->from_slot = j + 1; - pDestArrival->match_offset = nRepOffset; - pDestArrival->match_len = k; - pDestArrival->num_literals = 0; - pDestArrival->score = nScore; - pDestArrival->rep_offset = nRepOffset; - pDestArrival->rep_pos = i; - pDestArrival->rep_len = k; - break; - } - } - } - } - } - } - } - } - } - - lzsa_arrival *end_arrival = &arrival[(i << MATCHES_PER_OFFSET_SHIFT) + 0]; - pBestMatch[i].length = 0; - pBestMatch[i].offset = 0; - - while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0) { - pBestMatch[end_arrival->from_pos].length = end_arrival->match_len; - pBestMatch[end_arrival->from_pos].offset = end_arrival->match_offset; - end_arrival = &arrival[(end_arrival->from_pos << MATCHES_PER_OFFSET_SHIFT) + (end_arrival->from_slot - 1)]; - } -} - -/** - * Attempt to minimize the number of commands issued in the compressed data block, in order to speed up decompression without - * impacting the compression ratio - * - * @param pCompressor compression context - * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) - * @param pBestMatch optimal matches to evaluate and update - * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) - * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * - * @return non-zero if the number of tokens was reduced, 0 if it wasn't - */ -static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { - int i; - int nNumLiterals = 0; - int nPrevRepMatchOffset = 0; - int nRepMatchOffset = 0; - int nRepMatchLen = 0; - int nRepIndex = 0; - int nDidReduce = 0; - - for (i = nStartOffset; i < nEndOffset; ) { - lzsa_match *pMatch = pBestMatch + i; - - if (pMatch->length >= MIN_MATCH_SIZE_V2) { - if ((i + pMatch->length) < nEndOffset /* Don't consider the last match in the block, we can only reduce a match inbetween other tokens */) { - int nNextIndex = i + pMatch->length; - int nNextLiterals = 0; - - while (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length < MIN_MATCH_SIZE_V2) { - nNextLiterals++; - nNextIndex++; - } - - if (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length >= MIN_MATCH_SIZE_V2) { - /* This command is a match, is followed by 'nNextLiterals' literals and then by another match */ - - if (nRepMatchOffset && pMatch->offset != nRepMatchOffset && (pBestMatch[nNextIndex].offset != pMatch->offset || pBestMatch[nNextIndex].offset == nRepMatchOffset || - ((pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16))) > - ((pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16))))) { - /* Check if we can change the current match's offset to be the same as the previous match's offset, and get an extra repmatch. This will occur when - * matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the - * ratio (the forward arrivals parser already has this covered). */ - if (i > nRepMatchOffset && - (i - nRepMatchOffset + pMatch->length) <= (nEndOffset - LAST_LITERALS) && - !memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i - pMatch->offset, pMatch->length)) { - pMatch->offset = nRepMatchOffset; - nDidReduce = 1; - } - } - - if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) { - /* Otherwise, try to gain a match forward as well */ - if (i > pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= (nEndOffset - LAST_LITERALS)) { - int nMaxLen = 0; - while (nMaxLen < pMatch->length && pInWindow[i - pBestMatch[nNextIndex].offset + nMaxLen] == pInWindow[i - pMatch->offset + nMaxLen]) - nMaxLen++; - if (nMaxLen >= pMatch->length) { - /* Replace */ - pMatch->offset = pBestMatch[nNextIndex].offset; - nDidReduce = 1; - } - else if (nMaxLen >= 2 && pMatch->offset != nRepMatchOffset) { - int nPartialSizeBefore, nPartialSizeAfter; - - nPartialSizeBefore = lzsa_get_match_varlen_size_v2(pMatch->length - MIN_MATCH_SIZE_V2); - nPartialSizeBefore += (pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16)); - nPartialSizeBefore += lzsa_get_literals_varlen_size_v2(nNextLiterals); - - nPartialSizeAfter = lzsa_get_match_varlen_size_v2(nMaxLen - MIN_MATCH_SIZE_V2); - nPartialSizeAfter += lzsa_get_literals_varlen_size_v2(nNextLiterals + (pMatch->length - nMaxLen)) + ((pMatch->length - nMaxLen) << 3); - - if (nPartialSizeAfter < nPartialSizeBefore) { - int j; - - /* We gain a repmatch that is shorter than the original match as this is the best we can do, so it is followed by extra literals, but - * we have calculated that this is shorter */ - pMatch->offset = pBestMatch[nNextIndex].offset; - for (j = nMaxLen; j < pMatch->length; j++) { - pBestMatch[i + j].length = 0; - } - pMatch->length = nMaxLen; - nDidReduce = 1; - } - } - } - } - - if (pMatch->length < 9 /* Don't waste time considering large matches, they will always win over literals */) { - /* Calculate this command's current cost (excluding 'nNumLiterals' bytes) */ - - int nCurCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + lzsa_get_match_varlen_size_v2(pMatch->length - MIN_MATCH_SIZE_V2); - if (pMatch->offset != nRepMatchOffset) - nCurCommandSize += (pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16)); - - /* Calculate the next command's current cost */ - int nNextCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); - if (pBestMatch[nNextIndex].offset != pMatch->offset) - nNextCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); - - int nOriginalCombinedCommandSize = nCurCommandSize + nNextCommandSize; - - /* Calculate the cost of replacing this match command by literals + the next command with the cost of encoding these literals (excluding 'nNumLiterals' bytes) */ - int nReducedCommandSize = (pMatch->length << 3) + 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals + pMatch->length + nNextLiterals) + (nNextLiterals << 3) + lzsa_get_match_varlen_size_v2(pBestMatch[nNextIndex].length - MIN_MATCH_SIZE_V2); - if (pBestMatch[nNextIndex].offset != nRepMatchOffset) - nReducedCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); - - int nReplaceRepOffset = 0; - if (nRepMatchOffset && nRepMatchOffset != nPrevRepMatchOffset && nRepMatchLen >= MIN_MATCH_SIZE_V2 && nRepMatchOffset != pBestMatch[nNextIndex].offset && nRepIndex > pBestMatch[nNextIndex].offset && - (nRepIndex - pBestMatch[nNextIndex].offset + nRepMatchLen) <= (nEndOffset - LAST_LITERALS) && - !memcmp(pInWindow + nRepIndex - nRepMatchOffset, pInWindow + nRepIndex - pBestMatch[nNextIndex].offset, nRepMatchLen)) { - /* Replacing this match command by literals would let us create a repmatch */ - nReplaceRepOffset = 1; - nReducedCommandSize -= (nRepMatchOffset <= 32) ? 4 : ((nRepMatchOffset <= 512) ? 8 : ((nRepMatchOffset <= (8192 + 512)) ? 12 : 16)); - } - - if (nOriginalCombinedCommandSize >= nReducedCommandSize) { - /* Reduce */ - int nMatchLen = pMatch->length; - int j; - - for (j = 0; j < nMatchLen; j++) { - pBestMatch[i + j].length = 0; - } - - nDidReduce = 1; - - if (nReplaceRepOffset) { - pBestMatch[nRepIndex].offset = pBestMatch[nNextIndex].offset; - nRepMatchOffset = pBestMatch[nNextIndex].offset; - } - continue; - } - } - } - } - - if ((i + pMatch->length) < nEndOffset && pMatch->length >= LCP_MAX && - pMatch->offset && pMatch->offset <= 32 && pBestMatch[i + pMatch->length].offset == pMatch->offset && (pMatch->length % pMatch->offset) == 0 && - (pMatch->length + pBestMatch[i + pMatch->length].length) <= MAX_VARLEN) { - int nMatchLen = pMatch->length; - - /* Join */ - - pMatch->length += pBestMatch[i + nMatchLen].length; - pBestMatch[i + nMatchLen].offset = 0; - pBestMatch[i + nMatchLen].length = -1; - nDidReduce = 1; - continue; - } - - nPrevRepMatchOffset = nRepMatchOffset; - nRepMatchOffset = pMatch->offset; - nRepMatchLen = pMatch->length; - nRepIndex = i; - - i += pMatch->length; - nNumLiterals = 0; - } - else { - nNumLiterals++; - i++; - } - } - - return nDidReduce; -} - -/** - * Get compressed data block size - * - * @param pCompressor compression context - * @param pBestMatch optimal matches to emit - * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) - * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * - * @return size of compressed data that will be written to output buffer - */ -static int lzsa_get_compressed_size_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) { - int i; - int nNumLiterals = 0; - int nOutOffset = 0; - int nRepMatchOffset = 0; - int nCompressedSize = 0; - - for (i = nStartOffset; i < nEndOffset; ) { - const lzsa_match *pMatch = pBestMatch + i; - - if (pMatch->length >= MIN_MATCH_SIZE_V2) { - int nMatchOffset = pMatch->offset; - int nMatchLen = pMatch->length; - int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V2; - int nOffsetSize; - - if (nMatchOffset == nRepMatchOffset) { - nOffsetSize = 0; - } - else { - if (nMatchOffset <= 32) { - nOffsetSize = 4; - } - else if (nMatchOffset <= 512) { - nOffsetSize = 8; - } - else if (nMatchOffset <= (8192 + 512)) { - nOffsetSize = 12; - } - else { - nOffsetSize = 16; - } - } - - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + nOffsetSize /* match offset */ + lzsa_get_match_varlen_size_v2(nEncodedMatchLen); - nCompressedSize += nCommandSize; - - nNumLiterals = 0; - nRepMatchOffset = nMatchOffset; - i += nMatchLen; - } - else { - nNumLiterals++; - i++; - } - } - - { - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); - - nCompressedSize += nCommandSize; - nNumLiterals = 0; - } - - if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - nCompressedSize += (8 + 4 + 8); - } - - return nCompressedSize; -} - -/** - * Emit block of compressed data - * - * @param pCompressor compression context - * @param pBestMatch optimal matches to emit - * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) - * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) - * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * @param pOutData pointer to output buffer - * @param nMaxOutDataSize maximum size of output buffer, in bytes - * - * @return size of compressed data in output buffer, or -1 if the data is uncompressible - */ -static int lzsa_write_block_v2(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { - int i; - int nNumLiterals = 0; - int nInFirstLiteralOffset = 0; - int nOutOffset = 0; - int nCurNibbleOffset = -1, nCurFreeNibbles = 0; - int nRepMatchOffset = 0; - - for (i = nStartOffset; i < nEndOffset; ) { - const lzsa_match *pMatch = pBestMatch + i; - - if (pMatch->length >= MIN_MATCH_SIZE_V2) { - int nMatchOffset = pMatch->offset; - int nMatchLen = pMatch->length; - int nEncodedMatchLen = nMatchLen - MIN_MATCH_SIZE_V2; - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; - int nTokenMatchLen = (nEncodedMatchLen >= MATCH_RUN_LEN_V2) ? MATCH_RUN_LEN_V2 : nEncodedMatchLen; - int nTokenOffsetMode; - int nOffsetSize; - - if (nMatchOffset == nRepMatchOffset) { - nTokenOffsetMode = 0xe0; - nOffsetSize = 0; - } - else { - if (nMatchOffset <= 32) { - nTokenOffsetMode = 0x00 | ((((-nMatchOffset) & 0x01) << 5) ^ 0x20); - nOffsetSize = 4; - } - else if (nMatchOffset <= 512) { - nTokenOffsetMode = 0x40 | ((((-nMatchOffset) & 0x100) >> 3) ^ 0x20); - nOffsetSize = 8; - } - else if (nMatchOffset <= (8192 + 512)) { - nTokenOffsetMode = 0x80 | ((((-(nMatchOffset - 512)) & 0x0100) >> 3) ^ 0x20); - nOffsetSize = 12; - } - else { - nTokenOffsetMode = 0xc0; - nOffsetSize = 16; - } - } - - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + nOffsetSize /* match offset */ + lzsa_get_match_varlen_size_v2(nEncodedMatchLen); - - if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) - return -1; - if (nMatchOffset < MIN_OFFSET || nMatchOffset > MAX_OFFSET) - return -1; - - pOutData[nOutOffset++] = nTokenOffsetMode | (nTokenLiteralsLen << 3) | nTokenMatchLen; - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); - if (nOutOffset < 0) return -1; - - if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) - pCompressor->stats.min_literals = nNumLiterals; - if (nNumLiterals > pCompressor->stats.max_literals) - pCompressor->stats.max_literals = nNumLiterals; - pCompressor->stats.total_literals += nNumLiterals; - pCompressor->stats.literals_divisor++; - - if (nNumLiterals != 0) { - memcpy(pOutData + nOutOffset, pInWindow + nInFirstLiteralOffset, nNumLiterals); - nOutOffset += nNumLiterals; - nNumLiterals = 0; - } - - if (nTokenOffsetMode == 0x00 || nTokenOffsetMode == 0x20) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-nMatchOffset) & 0x1e) >> 1); - if (nOutOffset < 0) return -1; - } - else if (nTokenOffsetMode == 0x40 || nTokenOffsetMode == 0x60) { - pOutData[nOutOffset++] = (-nMatchOffset) & 0xff; - } - else if (nTokenOffsetMode == 0x80 || nTokenOffsetMode == 0xa0) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, ((-(nMatchOffset - 512)) >> 9) & 0x0f); - if (nOutOffset < 0) return -1; - pOutData[nOutOffset++] = (-(nMatchOffset - 512)) & 0xff; - } - else if (nTokenOffsetMode == 0xc0) { - pOutData[nOutOffset++] = (-nMatchOffset) >> 8; - pOutData[nOutOffset++] = (-nMatchOffset) & 0xff; - } - - if (nMatchOffset == nRepMatchOffset) - pCompressor->stats.num_rep_offsets++; - - nRepMatchOffset = nMatchOffset; - - nOutOffset = lzsa_write_match_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nEncodedMatchLen); - if (nOutOffset < 0) return -1; - - if (nMatchOffset < pCompressor->stats.min_offset || pCompressor->stats.min_offset == -1) - pCompressor->stats.min_offset = nMatchOffset; - if (nMatchOffset > pCompressor->stats.max_offset) - pCompressor->stats.max_offset = nMatchOffset; - pCompressor->stats.total_offsets += nMatchOffset; - - if (nMatchLen < pCompressor->stats.min_match_len || pCompressor->stats.min_match_len == -1) - pCompressor->stats.min_match_len = nMatchLen; - if (nMatchLen > pCompressor->stats.max_match_len) - pCompressor->stats.max_match_len = nMatchLen; - pCompressor->stats.total_match_lens += nMatchLen; - pCompressor->stats.match_divisor++; - - if (nMatchOffset == 1) { - if (nMatchLen < pCompressor->stats.min_rle1_len || pCompressor->stats.min_rle1_len == -1) - pCompressor->stats.min_rle1_len = nMatchLen; - if (nMatchLen > pCompressor->stats.max_rle1_len) - pCompressor->stats.max_rle1_len = nMatchLen; - pCompressor->stats.total_rle1_lens += nMatchLen; - pCompressor->stats.rle1_divisor++; - } - else if (nMatchOffset == 2) { - if (nMatchLen < pCompressor->stats.min_rle2_len || pCompressor->stats.min_rle2_len == -1) - pCompressor->stats.min_rle2_len = nMatchLen; - if (nMatchLen > pCompressor->stats.max_rle2_len) - pCompressor->stats.max_rle2_len = nMatchLen; - pCompressor->stats.total_rle2_lens += nMatchLen; - pCompressor->stats.rle2_divisor++; - } - - i += nMatchLen; - - if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - int nCurSafeDist = (i - nStartOffset) - nOutOffset; - if (nCurSafeDist >= 0 && pCompressor->safe_dist < nCurSafeDist) - pCompressor->safe_dist = nCurSafeDist; - } - - pCompressor->num_commands++; - } - else { - if (nNumLiterals == 0) - nInFirstLiteralOffset = i; - nNumLiterals++; - i++; - } - } - - { - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3); - - if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) - return -1; - - if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) - pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; - else - pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x00; - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); - if (nOutOffset < 0) return -1; - - if (nNumLiterals < pCompressor->stats.min_literals || pCompressor->stats.min_literals == -1) - pCompressor->stats.min_literals = nNumLiterals; - if (nNumLiterals > pCompressor->stats.max_literals) - pCompressor->stats.max_literals = nNumLiterals; - pCompressor->stats.total_literals += nNumLiterals; - pCompressor->stats.literals_divisor++; - - if (nNumLiterals != 0) { - memcpy(pOutData + nOutOffset, pInWindow + nInFirstLiteralOffset, nNumLiterals); - nOutOffset += nNumLiterals; - nNumLiterals = 0; - } - - if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - int nCurSafeDist = (i - nStartOffset) - nOutOffset; - if (nCurSafeDist >= 0 && pCompressor->safe_dist < nCurSafeDist) - pCompressor->safe_dist = nCurSafeDist; - } - - pCompressor->num_commands++; - } - - if (pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - /* Emit EOD marker for raw block */ - - if (nOutOffset >= nMaxOutDataSize) - return -1; - pOutData[nOutOffset++] = 0; /* Match offset */ - - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ - if (nOutOffset < 0) return -1; - - if ((nOutOffset + 1) > nMaxOutDataSize) - return -1; - - pOutData[nOutOffset++] = 232; /* EOD match length byte */ - } - - if (nCurNibbleOffset != -1) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); - if (nOutOffset < 0 || nCurNibbleOffset != -1) - return -1; - } - - return nOutOffset; -} - -/** - * Emit raw block of uncompressible data - * - * @param pCompressor compression context - * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) - * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) - * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes - * @param pOutData pointer to output buffer - * @param nMaxOutDataSize maximum size of output buffer, in bytes - * - * @return size of compressed data in output buffer, or -1 if the data is uncompressible - */ -static int lzsa_write_raw_uncompressed_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) { - int nCurNibbleOffset = -1, nCurFreeNibbles = 0; - int nNumLiterals = nEndOffset - nStartOffset; - int nTokenLiteralsLen = (nNumLiterals >= LITERALS_RUN_LEN_V2) ? LITERALS_RUN_LEN_V2 : nNumLiterals; - int nOutOffset = 0; - - int nCommandSize = 8 /* token */ + lzsa_get_literals_varlen_size_v2(nNumLiterals) + (nNumLiterals << 3) + 8 + 4 + 8; - if ((nOutOffset + ((nCommandSize + 7) >> 3)) > nMaxOutDataSize) - return -1; - - pCompressor->num_commands = 0; - pOutData[nOutOffset++] = (nTokenLiteralsLen << 3) | 0x47; - - nOutOffset = lzsa_write_literals_varlen_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, nNumLiterals); - if (nOutOffset < 0) return -1; - - if (nNumLiterals != 0) { - memcpy(pOutData + nOutOffset, pInWindow + nStartOffset, nNumLiterals); - nOutOffset += nNumLiterals; - nNumLiterals = 0; - } - - /* Emit EOD marker for raw block */ - - pOutData[nOutOffset++] = 0; /* Match offset */ - - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 15); /* Extended match length nibble */ - if (nOutOffset < 0) return -1; - - if ((nOutOffset + 1) > nMaxOutDataSize) - return -1; - - pOutData[nOutOffset++] = 232; /* EOD match length byte */ - - pCompressor->num_commands++; - - if (nCurNibbleOffset != -1) { - nOutOffset = lzsa_write_nibble_v2(pOutData, nOutOffset, nMaxOutDataSize, &nCurNibbleOffset, &nCurFreeNibbles, 0); - if (nOutOffset < 0 || nCurNibbleOffset != -1) - return -1; - } - - return nOutOffset; -} - -/** - * Select the most optimal matches, reduce the token count if possible, and then emit a block of compressed LZSA2 data - * - * @param pCompressor compression context - * @param pInWindow pointer to input data window (previously compressed bytes + bytes to compress) - * @param nPreviousBlockSize number of previously compressed bytes (or 0 for none) - * @param nInDataSize number of input bytes to compress - * @param pOutData pointer to output buffer - * @param nMaxOutDataSize maximum size of output buffer, in bytes - * - * @return size of compressed data in output buffer, or -1 if the data is uncompressible - */ -int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, const int nPreviousBlockSize, const int nInDataSize, unsigned char *pOutData, const int nMaxOutDataSize) { - int nResult, nBaseCompressedSize; - - /* Compress optimally without breaking ties in favor of less tokens */ - - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */, (nInDataSize < 65536) ? 1 : 0 /* insert forward reps */); - - int nDidReduce; - int nPasses = 0; - do { - nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); - nPasses++; - } while (nDidReduce && nPasses < 20); - - nBaseCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); - lzsa_match *pBestMatch = pCompressor->best_match; - - if (nBaseCompressedSize > 0 && nInDataSize < 65536) { - int nReducedCompressedSize; - - /* Compress optimally and do break ties in favor of less tokens */ - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */); - - nPasses = 0; - do { - nDidReduce = lzsa_optimize_command_count_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); - nPasses++; - } while (nDidReduce && nPasses < 20); - - nReducedCompressedSize = lzsa_get_compressed_size_v2(pCompressor, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize); - if (nReducedCompressedSize > 0 && nReducedCompressedSize <= nBaseCompressedSize) { - /* Pick the parse with the reduced number of tokens as it didn't negatively affect the size */ - pBestMatch = pCompressor->improved_match; - } - } - - nResult = lzsa_write_block_v2(pCompressor, pBestMatch, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); - if (nResult < 0 && pCompressor->flags & LZSA_FLAG_RAW_BLOCK) { - nResult = lzsa_write_raw_uncompressed_block_v2(pCompressor, pInWindow, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, pOutData, nMaxOutDataSize); - } - - return nResult; -} From 7dd039a152a361aa2f7da5cd66f351a16f8f51ea Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Tue, 22 Oct 2019 12:37:16 +0200 Subject: [PATCH 14/17] Delete shrink_context.h --- shrink_context.h | 175 ----------------------------------------------- 1 file changed, 175 deletions(-) delete mode 100644 shrink_context.h diff --git a/shrink_context.h b/shrink_context.h deleted file mode 100644 index 5872b59..0000000 --- a/shrink_context.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * shrink_context.h - compression context definitions - * - * Copyright (C) 2019 Emmanuel Marty - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgment in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * Uses the libdivsufsort library Copyright (c) 2003-2008 Yuta Mori - * - * Inspired by LZ4 by Yann Collet. https://github.com/lz4/lz4 - * With help, ideas, optimizations and speed measurements by spke - * With ideas from Lizard by Przemyslaw Skibinski and Yann Collet. https://github.com/inikep/lizard - * Also with ideas from smallz4 by Stephan Brumme. https://create.stephan-brumme.com/smallz4/ - * - */ - -#ifndef _SHRINK_CONTEXT_H -#define _SHRINK_CONTEXT_H - -#include "divsufsort.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define LCP_BITS 14 -#define TAG_BITS 3 -#define LCP_MAX (1U<<(LCP_BITS - TAG_BITS - 1)) -#define LCP_AND_TAG_MAX (1U<<(LCP_BITS - 1)) -#define LCP_SHIFT (31-LCP_BITS) -#define LCP_MASK (((1U< Date: Tue, 22 Oct 2019 12:37:46 +0200 Subject: [PATCH 15/17] Further increase LZSA2 ratio by ~0.1% on average --- src/shrink_block_v2.c | 51 +++++++++++++++++++++++++++++++++++++------ src/shrink_context.h | 2 ++ 2 files changed, 46 insertions(+), 7 deletions(-) diff --git a/src/shrink_block_v2.c b/src/shrink_block_v2.c index db7785d..4d23314 100644 --- a/src/shrink_block_v2.c +++ b/src/shrink_block_v2.c @@ -180,8 +180,9 @@ static inline int lzsa_write_match_varlen_v2(unsigned char *pOutData, int nOutOf * @param pCompressor compression context * @param nStartOffset current offset in input window (typically the number of previously compressed bytes) * @param nEndOffset offset to end finding matches at (typically the size of the total input window in bytes + * @param nInsertForwardReps non-zero to insert forward repmatch candidates, zero to use the previously inserted candidates */ -static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce) { +static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigned char *pInWindow, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset, const int nReduce, const int nInsertForwardReps) { lzsa_arrival *arrival = pCompressor->arrival; const int nFavorRatio = (pCompressor->flags & LZSA_FLAG_FAVOR_RATIO) ? 1 : 0; const int nMinMatchSize = pCompressor->min_match_size; @@ -250,6 +251,8 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->num_literals = nNumLiterals; pDestArrival->score = nScore; pDestArrival->rep_offset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + pDestArrival->rep_pos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; + pDestArrival->rep_len = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; break; } } @@ -275,7 +278,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne if (nMatchOffset != nRepOffset && nRepOffset && - i >= nRepOffset && + i > nRepOffset && (i - nRepOffset + nMatchLen) <= (nEndOffset - LAST_LITERALS)) { while (nCurMaxRepLen < nMatchLen && pInWindow[i - nRepOffset + nCurMaxRepLen] == pInWindow[i - nMatchOffset + nCurMaxRepLen]) nCurMaxRepLen++; @@ -286,6 +289,36 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne while (j < NMATCHES_PER_OFFSET) nMaxRepLen[j++] = 0; + for (j = 0; j < NMATCHES_PER_OFFSET && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].from_slot; j++) { + int nRepOffset = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_offset; + + if (nMatchOffset != nRepOffset && nRepOffset && nInsertForwardReps && arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len >= MIN_MATCH_SIZE_V2) { + int nRepPos = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_pos; + int nRepLen = arrival[(i << MATCHES_PER_OFFSET_SHIFT) + j].rep_len; + + if (nRepPos > nMatchOffset && + (nRepPos - nMatchOffset + nRepLen) <= (nEndOffset - LAST_LITERALS) && + !memcmp(pInWindow + nRepPos - nRepOffset, pInWindow + nRepPos - nMatchOffset, nRepLen)) { + + lzsa_match *fwd_match = pCompressor->match + (nRepPos << 5); + int exists = 0; + int r; + + for (r = 0; r < 32 && fwd_match[r].length >= MIN_MATCH_SIZE_V2; r++) { + if (fwd_match[r].offset == nMatchOffset) { + exists = 1; + break; + } + } + + if (!exists && r < 32) { + fwd_match[r].offset = nMatchOffset; + fwd_match[r].length = nRepLen; + } + } + } + } + if (nMatchLen >= LEAVE_ALONE_MATCH_SIZE) nStartingMatchLen = nMatchLen; else @@ -341,6 +374,8 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->num_literals = 0; pDestArrival->score = nScore; pDestArrival->rep_offset = nMatchOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; break; } } @@ -387,6 +422,8 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne pDestArrival->num_literals = 0; pDestArrival->score = nScore; pDestArrival->rep_offset = nRepOffset; + pDestArrival->rep_pos = i; + pDestArrival->rep_len = k; break; } } @@ -452,7 +489,7 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un /* Check if we can change the current match's offset to be the same as the previous match's offset, and get an extra repmatch. This will occur when * matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the * ratio (the forward arrivals parser already has this covered). */ - if (i >= nRepMatchOffset && + if (i > nRepMatchOffset && (i - nRepMatchOffset + pMatch->length) <= (nEndOffset - LAST_LITERALS) && !memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i - pMatch->offset, pMatch->length)) { pMatch->offset = nRepMatchOffset; @@ -462,7 +499,7 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) { /* Otherwise, try to gain a match forward as well */ - if (i >= pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= (nEndOffset - LAST_LITERALS)) { + if (i > pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= (nEndOffset - LAST_LITERALS)) { int nMaxLen = 0; while (nMaxLen < pMatch->length && pInWindow[i - pBestMatch[nNextIndex].offset + nMaxLen] == pInWindow[i - pMatch->offset + nMaxLen]) nMaxLen++; @@ -517,7 +554,7 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un nReducedCommandSize += (pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16)); int nReplaceRepOffset = 0; - if (nRepMatchOffset && nRepMatchOffset != nPrevRepMatchOffset && nRepMatchLen >= MIN_MATCH_SIZE_V2 && nRepMatchOffset != pBestMatch[nNextIndex].offset && nRepIndex >= pBestMatch[nNextIndex].offset && + if (nRepMatchOffset && nRepMatchOffset != nPrevRepMatchOffset && nRepMatchLen >= MIN_MATCH_SIZE_V2 && nRepMatchOffset != pBestMatch[nNextIndex].offset && nRepIndex > pBestMatch[nNextIndex].offset && (nRepIndex - pBestMatch[nNextIndex].offset + nRepMatchLen) <= (nEndOffset - LAST_LITERALS) && !memcmp(pInWindow + nRepIndex - nRepMatchOffset, pInWindow + nRepIndex - pBestMatch[nNextIndex].offset, nRepMatchLen)) { /* Replacing this match command by literals would let us create a repmatch */ @@ -937,7 +974,7 @@ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigne /* Compress optimally without breaking ties in favor of less tokens */ - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */); + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->best_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 0 /* reduce */, (nInDataSize < 65536) ? 1 : 0 /* insert forward reps */); int nDidReduce; int nPasses = 0; @@ -953,7 +990,7 @@ int lzsa_optimize_and_write_block_v2(lzsa_compressor *pCompressor, const unsigne int nReducedCompressedSize; /* Compress optimally and do break ties in favor of less tokens */ - lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */); + lzsa_optimize_forward_v2(pCompressor, pInWindow, pCompressor->improved_match, nPreviousBlockSize, nPreviousBlockSize + nInDataSize, 1 /* reduce */, 0 /* use forward reps */); nPasses = 0; do { diff --git a/src/shrink_context.h b/src/shrink_context.h index c5cc501..5872b59 100644 --- a/src/shrink_context.h +++ b/src/shrink_context.h @@ -72,6 +72,8 @@ typedef struct { short from_slot; unsigned short rep_offset; + unsigned short rep_len; + int rep_pos; int num_literals; int score; From 05d77095ca5784f7002b7e94f43a778a61ab1e04 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Tue, 22 Oct 2019 12:39:27 +0200 Subject: [PATCH 16/17] Bump version --- src/lzsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lzsa.c b/src/lzsa.c index 68eb5f5..3b5e88f 100755 --- a/src/lzsa.c +++ b/src/lzsa.c @@ -48,7 +48,7 @@ #define OPT_RAW_BACKWARD 8 #define OPT_STATS 16 -#define TOOL_VERSION "1.1.1" +#define TOOL_VERSION "1.1.2" /*---------------------------------------------------------------------------*/ From 16ac8c75af3f862c4a5e3f7b4fdd74880635a363 Mon Sep 17 00:00:00 2001 From: Emmanuel Marty Date: Tue, 22 Oct 2019 17:13:05 +0200 Subject: [PATCH 17/17] Add link to PDP-11 depackers by Ivan Gorodetsky --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 57d5df8..9b15dc4 100755 --- a/README.md +++ b/README.md @@ -69,7 +69,8 @@ License: External links: -* [i8080 decompressors](https://gitlab.com/ivagor) by Ivan Gorodetsky +* [i8080 decompressors](https://gitlab.com/ivagor/lzsa8080/tree/master) by Ivan Gorodetsky +* [PDP-11 decompressors](https://gitlab.com/ivagor/lzsa8080/tree/master/PDP11) also by Ivan Gorodetsky * LZSA's page on [Pouet](https://www.pouet.net/prod.php?which=81573) # Compressed format