mirror of
https://github.com/emmanuel-marty/lzsa.git
synced 2024-11-28 14:50:44 +00:00
Add more missing constants; more minor cleanup
This commit is contained in:
parent
34ed06abfb
commit
5484395465
@ -289,7 +289,7 @@ static void lzsa_optimize_forward_v1(lzsa_compressor *pCompressor, lzsa_match *p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const lzsa_arrival *end_arrival = &arrival[(i << ARRIVALS_PER_POSITION_SHIFT_V1) + 0];
|
const lzsa_arrival *end_arrival = &arrival[i << ARRIVALS_PER_POSITION_SHIFT_V1];
|
||||||
|
|
||||||
while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0 && (end_arrival->from_pos + nStartOffset) < nEndOffset) {
|
while (end_arrival->from_slot > 0 && end_arrival->from_pos >= 0 && (end_arrival->from_pos + nStartOffset) < nEndOffset) {
|
||||||
pBestMatch[end_arrival->from_pos + nStartOffset].length = end_arrival->match_len;
|
pBestMatch[end_arrival->from_pos + nStartOffset].length = end_arrival->match_len;
|
||||||
@ -419,7 +419,7 @@ static int lzsa_optimize_command_count_v1(lzsa_compressor *pCompressor, const un
|
|||||||
*
|
*
|
||||||
* @return size of compressed data that will be written to output buffer
|
* @return size of compressed data that will be written to output buffer
|
||||||
*/
|
*/
|
||||||
static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) {
|
static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, const lzsa_match *pBestMatch, const int nStartOffset, const int nEndOffset) {
|
||||||
int i;
|
int i;
|
||||||
int nNumLiterals = 0;
|
int nNumLiterals = 0;
|
||||||
int nCompressedSize = 0;
|
int nCompressedSize = 0;
|
||||||
@ -470,7 +470,7 @@ static int lzsa_get_compressed_size_v1(lzsa_compressor *pCompressor, lzsa_match
|
|||||||
*
|
*
|
||||||
* @return size of compressed data in output buffer, or -1 if the data is uncompressible
|
* @return size of compressed data in output buffer, or -1 if the data is uncompressible
|
||||||
*/
|
*/
|
||||||
static int lzsa_write_block_v1(lzsa_compressor *pCompressor, lzsa_match *pBestMatch, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) {
|
static int lzsa_write_block_v1(lzsa_compressor *pCompressor, const lzsa_match *pBestMatch, const unsigned char *pInWindow, const int nStartOffset, const int nEndOffset, unsigned char *pOutData, const int nMaxOutDataSize) {
|
||||||
int i;
|
int i;
|
||||||
int nNumLiterals = 0;
|
int nNumLiterals = 0;
|
||||||
int nInFirstLiteralOffset = 0;
|
int nInFirstLiteralOffset = 0;
|
||||||
|
@ -482,9 +482,7 @@ static void lzsa_optimize_forward_v2(lzsa_compressor *pCompressor, const unsigne
|
|||||||
|
|
||||||
int nNonRepMatchArrivalIdx = -1;
|
int nNonRepMatchArrivalIdx = -1;
|
||||||
for (j = 0; j < nNumArrivalsForThisPos; j++) {
|
for (j = 0; j < nNumArrivalsForThisPos; j++) {
|
||||||
const int nRepOffset = cur_arrival[j].rep_offset;
|
if (nMatchOffset != cur_arrival[j].rep_offset) {
|
||||||
|
|
||||||
if (nMatchOffset != nRepOffset) {
|
|
||||||
const int nPrevCost = cur_arrival[j].cost;
|
const int nPrevCost = cur_arrival[j].cost;
|
||||||
const int nScorePenalty = 3 + (match[m].length >> 15);
|
const int nScorePenalty = 3 + (match[m].length >> 15);
|
||||||
|
|
||||||
@ -768,15 +766,15 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un
|
|||||||
if (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length >= MIN_MATCH_SIZE_V2) {
|
if (nNextIndex < nEndOffset && pBestMatch[nNextIndex].length >= MIN_MATCH_SIZE_V2) {
|
||||||
/* This command is a match, is followed by 'nNextLiterals' literals and then by another match */
|
/* This command is a match, is followed by 'nNextLiterals' literals and then by another match */
|
||||||
|
|
||||||
if (nRepMatchOffset && pMatch->offset != nRepMatchOffset && (pBestMatch[nNextIndex].offset != pMatch->offset || pBestMatch[nNextIndex].offset == nRepMatchOffset ||
|
if (nRepMatchOffset && pMatch->offset != nRepMatchOffset && (pBestMatch[nNextIndex].offset != pMatch->offset ||
|
||||||
((pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16))) >
|
((pMatch->offset <= 32) ? 4 : ((pMatch->offset <= 512) ? 8 : ((pMatch->offset <= (8192 + 512)) ? 12 : 16))) >
|
||||||
((pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16))))) {
|
((pBestMatch[nNextIndex].offset <= 32) ? 4 : ((pBestMatch[nNextIndex].offset <= 512) ? 8 : ((pBestMatch[nNextIndex].offset <= (8192 + 512)) ? 12 : 16))))) {
|
||||||
/* Check if we can change the current match's offset to be the same as the previous match's offset, and get an extra repmatch. This will occur when
|
/* Check if we can change the current match's offset to be the same as the previous match's offset, and get an extra repmatch. This will occur when
|
||||||
* matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the
|
* matching large regions of identical bytes for instance, where there are too many offsets to be considered by the parser, and when not compressing to favor the
|
||||||
* ratio (the forward arrivals parser already has this covered). */
|
* ratio (the forward arrivals parser already has this covered). */
|
||||||
if (i >= nRepMatchOffset &&
|
if (i >= nRepMatchOffset &&
|
||||||
(i - nRepMatchOffset + pMatch->length) <= nEndOffset &&
|
(i + pMatch->length) <= nEndOffset &&
|
||||||
!memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i - pMatch->offset, pMatch->length)) {
|
!memcmp(pInWindow + i - nRepMatchOffset, pInWindow + i, pMatch->length)) {
|
||||||
pMatch->offset = nRepMatchOffset;
|
pMatch->offset = nRepMatchOffset;
|
||||||
nDidReduce = 1;
|
nDidReduce = 1;
|
||||||
}
|
}
|
||||||
@ -784,14 +782,12 @@ static int lzsa_optimize_command_count_v2(lzsa_compressor *pCompressor, const un
|
|||||||
|
|
||||||
if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) {
|
if (pBestMatch[nNextIndex].offset && pMatch->offset != pBestMatch[nNextIndex].offset && nRepMatchOffset != pBestMatch[nNextIndex].offset) {
|
||||||
/* Otherwise, try to gain a match forward as well */
|
/* Otherwise, try to gain a match forward as well */
|
||||||
if (i >= pBestMatch[nNextIndex].offset && (i - pBestMatch[nNextIndex].offset + pMatch->length) <= nEndOffset) {
|
if (i >= pBestMatch[nNextIndex].offset && (i + pMatch->length) <= nEndOffset) {
|
||||||
int nMaxLen = 0;
|
int nMaxLen = 0;
|
||||||
const unsigned char *pInWindowAtPos = pInWindow + i;
|
const unsigned char *pInWindowAtPos = pInWindow + i;
|
||||||
while ((nMaxLen + 8) < pMatch->length && !memcmp(pInWindowAtPos + nMaxLen - pBestMatch[nNextIndex].offset, pInWindowAtPos + nMaxLen, 8))
|
|
||||||
nMaxLen += 8;
|
|
||||||
while ((nMaxLen + 4) < pMatch->length && !memcmp(pInWindowAtPos + nMaxLen - pBestMatch[nNextIndex].offset, pInWindowAtPos + nMaxLen, 4))
|
while ((nMaxLen + 4) < pMatch->length && !memcmp(pInWindowAtPos + nMaxLen - pBestMatch[nNextIndex].offset, pInWindowAtPos + nMaxLen, 4))
|
||||||
nMaxLen += 4;
|
nMaxLen += 4;
|
||||||
while (nMaxLen < pMatch->length && pInWindowAtPos[nMaxLen - pBestMatch[nNextIndex].offset] == pInWindowAtPos[nMaxLen - pMatch->offset])
|
while (nMaxLen < pMatch->length && pInWindowAtPos[nMaxLen - pBestMatch[nNextIndex].offset] == pInWindowAtPos[nMaxLen])
|
||||||
nMaxLen++;
|
nMaxLen++;
|
||||||
if (nMaxLen >= pMatch->length) {
|
if (nMaxLen >= pMatch->length) {
|
||||||
/* Replace */
|
/* Replace */
|
||||||
|
Loading…
Reference in New Issue
Block a user