From 201454a9829d25081e6d002245a4868cfbbaa097 Mon Sep 17 00:00:00 2001 From: Vilnis Termanis Date: Sat, 3 Feb 2018 21:22:26 +0000 Subject: [PATCH] Changes for 0.11.0 --- CHANGELOG | 4 + README.md | 2 +- lz4/NEWS | 13 + lz4/README.md | 56 ++--- lz4/lz4.c | 192 +++++++++++---- lz4/lz4.h | 156 ++++++------ lz4/lz4frame.c | 161 +++++++------ lz4/lz4frame.h | 65 ++--- lz4/lz4frame_static.h | 43 ++-- lz4/lz4hc.c | 330 +++++++++++++++---------- lz4/lz4hc.h | 52 ++-- lz4/lz4opt.h | 504 +++++++++++++++++++-------------------- lz4/xxhash.c | 32 +-- lz4framed/__init__.py | 4 +- lz4framed/py-lz4framed.c | 13 +- setup.py | 2 +- 16 files changed, 920 insertions(+), 709 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index ac0ef3c..68c09b7 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,7 @@ +0.11.0 +- Updated lz4 to v1.8.1 (faster/stronger ultra modes, levels 10+) +- Minor documentation clarification about compression levels. + 0.10.0 - Updated lz4 to v1.8.0 - Support for fast acceleration via negative compression levels diff --git a/README.md b/README.md index 1ab7dd1..98fa14f 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ python3 setup.py install **Notes** - The above as well as all other python3-using commands should also run with v2.7+ -- This module is also available via [Anaconda (conda-forge)](https://anaconda.org/conda-forge/py-lz4framed) +- This module is also available via [Anaconda (conda-forge)](https://anaconda.org/conda-forge/py-lz4framed) (with binaries for Linux, OSX and Windows) - PyPI releases are signed with the [Iotic Labs Software release signing key](https://iotic-labs.com/iotic-labs.com.asc) diff --git a/lz4/NEWS b/lz4/NEWS index e63a82f..f9c503f 100644 --- a/lz4/NEWS +++ b/lz4/NEWS @@ -1,3 +1,16 @@ +v1.8.1 +perf : faster and stronger ultra modes (levels 10+) +perf : slightly faster compression and decompression speed +perf : fix bad degenerative case, reported by @c-morgenstern +fix : decompression failed when using a combination of extDict + low memory address (#397), reported and fixed by Julian Scheid (@jscheid) +cli : support for dictionary compression (`-D`), by Felix Handte @felixhandte +cli : fix : `lz4 -d --rm` preserves timestamp (#441) +cli : fix : do not modify /dev/null permission as root, by @aliceatlas +api : `_destSize()` variant supported for all compression levels +build : `make` and `make test` compatible with `-jX`, reported by @mwgamera +build : can control LZ4LIB_VISIBILITY macro, by @mikir +install: fix man page directory (#387), reported by Stuart Cardall (@itoffshore) + v1.8.0 cli : fix : do not modify /dev/null permissions, reported by @Maokaman1 cli : added GNU separator -- specifying that all following arguments are files diff --git a/lz4/README.md b/lz4/README.md index b40442c..fc5d4e9 100644 --- a/lz4/README.md +++ b/lz4/README.md @@ -1,44 +1,43 @@ LZ4 - Library Files ================================ -The directory contains many files, but depending on project's objectives, +The `/lib` directory contains many files, but depending on project's objectives, not all of them are necessary. #### Minimal LZ4 build The minimum required is **`lz4.c`** and **`lz4.h`**, -which will provide the fast compression and decompression algorithm. +which provides the fast compression and decompression algorithm. +They generate and decode data using [LZ4 block format]. -#### The High Compression variant of LZ4 +#### High Compression variant -For more compression at the cost of compression speed, -the High Compression variant **lz4hc** is available. -It's necessary to add **`lz4hc.c`** and **`lz4hc.h`**. -The variant still depends on regular `lz4` source files. -In particular, the decompression is still provided by `lz4.c`. +For more compression ratio at the cost of compression speed, +the High Compression variant called **lz4hc** is available. +Add files **`lz4hc.c`**, **`lz4hc.h`** and **`lz4opt.h`**. +The variant still depends on regular `lib/lz4.*` source files. -#### Compatibility issues +#### Frame variant, for interoperability -In order to produce files or streams compatible with `lz4` command line utility, +In order to produce compressed data compatible with `lz4` command line utility, it's necessary to encode lz4-compressed blocks using the [official interoperable frame format]. This format is generated and decoded automatically by the **lz4frame** library. -In order to work properly, lz4frame needs lz4 and lz4hc, and also **xxhash**, -which provides error detection. -(_Advanced stuff_ : It's possible to hide xxhash symbols into a local namespace. -This is what `liblz4` does, to avoid symbol duplication -in case a user program would link to several libraries containing xxhash symbols.) +Its public API is described in `lib/lz4frame.h`. +In order to work properly, lz4frame needs all other modules present in `/lib`, +including, lz4 and lz4hc, and also **xxhash**. +So it's necessary to include all `*.c` and `*.h` files present in `/lib`. -#### Advanced API +#### Advanced / Experimental API -A more complex `lz4frame_static.h` is also provided. -It contains definitions which are not guaranteed to remain stable within future versions. -It must be used with static linking ***only***. +A complex API defined in `lz4frame_static.h` contains definitions +which are not guaranteed to remain stable in future versions. +As a consequence, it must be used with static linking ***only***. -#### Using MinGW+MSYS to create DLL +#### Windows : using MinGW+MSYS to create DLL DLL can be created using MinGW+MSYS with the `make liblz4` command. This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`. @@ -51,23 +50,24 @@ file it should be linked with `dll\liblz4.dll`. For example: ``` gcc $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll ``` -The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. +The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. -#### Miscellaneous +#### Miscellaneous Other files present in the directory are not source code. There are : - - LICENSE : contains the BSD license text - - Makefile : script to compile or install lz4 library (static or dynamic) - - liblz4.pc.in : for pkg-config (make install) - - README.md : this file + - `LICENSE` : contains the BSD license text + - `Makefile` : `make` script to compile and install lz4 library (static and dynamic) + - `liblz4.pc.in` : for `pkg-config` (used in `make install`) + - `README.md` : this file [official interoperable frame format]: ../doc/lz4_Frame_format.md +[LZ4 block format]: ../doc/lz4_Block_format.md -#### License +#### License All source material within __lib__ directory are BSD 2-Clause licensed. See [LICENSE](LICENSE) for details. -The license is also repeated at the top of each source file. +The license is also reminded at the top of each source file. diff --git a/lz4/lz4.c b/lz4/lz4.c index 41c0a28..213b085 100644 --- a/lz4/lz4.c +++ b/lz4/lz4.c @@ -85,6 +85,7 @@ #endif + /*-************************************ * Dependency **************************************/ @@ -101,21 +102,43 @@ # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ #endif /* _MSC_VER */ -#ifndef FORCE_INLINE +#ifndef LZ4_FORCE_INLINE # ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline +# define LZ4_FORCE_INLINE static __forceinline # else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) # else -# define FORCE_INLINE static inline +# define LZ4_FORCE_INLINE static inline # endif # else -# define FORCE_INLINE static +# define LZ4_FORCE_INLINE static # endif /* __STDC_VERSION__ */ # endif /* _MSC_VER */ -#endif /* FORCE_INLINE */ +#endif /* LZ4_FORCE_INLINE */ + +/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE + * Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy, + * together with a simple 8-byte copy loop as a fall-back path. + * However, this optimization hurts the decompression speed by >30%, + * because the execution does not go to the optimized loop + * for typical compressible data, and all of the preamble checks + * before going to the fall-back path become useless overhead. + * This optimization happens only with the -O3 flag, and -O2 generates + * a simple 8-byte copy loop. + * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy + * functions are annotated with __attribute__((optimize("O2"))), + * and also LZ4_wildCopy is forcibly inlined, so that the O2 attribute + * of LZ4_wildCopy does not affect the compression speed. + */ +#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) +# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2"))) +# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE +#else +# define LZ4_FORCE_O2_GCC_PPC64LE +# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static +#endif #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) @@ -253,7 +276,8 @@ static void LZ4_copy8(void* dst, const void* src) } /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) +LZ4_FORCE_O2_INLINE_GCC_PPC64LE +void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) { BYTE* d = (BYTE*)dstPtr; const BYTE* s = (const BYTE*)srcPtr; @@ -289,15 +313,24 @@ static const int LZ4_minLength = (MFLIMIT+1); /*-************************************ * Error detection **************************************/ -#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) # include -# define DEBUGLOG(l, ...) { \ - if (l<=LZ4_DEBUG) { \ - fprintf(stderr, __FILE__ ": "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ } } #else # define DEBUGLOG(l, ...) {} /* disabled */ @@ -307,7 +340,7 @@ static const int LZ4_minLength = (MFLIMIT+1); /*-************************************ * Common functions **************************************/ -static unsigned LZ4_NbCommonBytes (register reg_t val) +static unsigned LZ4_NbCommonBytes (reg_t val) { if (LZ4_isLittleEndian()) { if (sizeof(val)==8) { @@ -318,7 +351,14 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) return (__builtin_ctzll((U64)val) >> 3); # else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else /* 32 bits */ { @@ -329,12 +369,15 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) return (__builtin_ctz((U32)val) >> 3); # else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else /* Big Endian CPU */ { - if (sizeof(val)==8) { + if (sizeof(val)==8) { /* 64-bits */ # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) unsigned long r = 0; _BitScanReverse64( &r, val ); @@ -342,8 +385,11 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) return (__builtin_clzll((U64)val) >> 3); # else + static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. + Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. + Note that this code path is never triggered in 32-bits mode. */ unsigned r; - if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; @@ -366,11 +412,20 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) } #define STEPSIZE sizeof(reg_t) -static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +LZ4_FORCE_INLINE +unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) { const BYTE* const pStart = pIn; - while (likely(pIn> 24) * prime8bytes) >> (64 - hashLog)); } -FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) { if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); return LZ4_hash4(LZ4_read32(p), tableType); @@ -452,7 +507,7 @@ static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableTy } } -FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) { U32 const h = LZ4_hashPosition(p, tableType); LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); @@ -465,7 +520,7 @@ static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tab { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ } -FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) { U32 const h = LZ4_hashPosition(p, tableType); return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); @@ -474,7 +529,7 @@ FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableTy /** LZ4_compress_generic() : inlined, to ensure branches are decided at compilation time */ -FORCE_INLINE int LZ4_compress_generic( +LZ4_FORCE_INLINE int LZ4_compress_generic( LZ4_stream_t_internal* const cctx, const char* const source, char* const dest, @@ -616,7 +671,11 @@ FORCE_INLINE int LZ4_compress_generic( *token += ML_MASK; matchCode -= ML_MASK; LZ4_write32(op, 0xFFFFFFFF); - while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255; + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } op += matchCode / 255; *op++ = (BYTE)(matchCode % 255); } else @@ -940,6 +999,7 @@ LZ4_stream_t* LZ4_createStream(void) void LZ4_resetStream (LZ4_stream_t* LZ4_stream) { + DEBUGLOG(4, "LZ4_resetStream"); MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); } @@ -1100,47 +1160,46 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) * Decompression functions *******************************/ /*! LZ4_decompress_generic() : - * This generic decompression function cover all use cases. - * It shall be instantiated several times, using different sets of directives - * Note that it is important this generic function is really inlined, + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, * in order to remove useless branches during compilation optimization. */ -FORCE_INLINE int LZ4_decompress_generic( - const char* const source, - char* const dest, - int inputSize, - int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ +LZ4_FORCE_O2_GCC_PPC64LE +LZ4_FORCE_INLINE int LZ4_decompress_generic( + const char* const src, + char* const dst, + int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ int endOnInput, /* endOnOutputSize, endOnInputSize */ int partialDecoding, /* full, partial */ int targetOutputSize, /* only used if partialDecoding==partial */ int dict, /* noDict, withPrefix64k, usingExtDict */ - const BYTE* const lowPrefix, /* == dest when no prefix */ + const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ const BYTE* const dictStart, /* only if dict==usingExtDict */ const size_t dictSize /* note : = 0 if noDict */ ) { - /* Local Variables */ - const BYTE* ip = (const BYTE*) source; - const BYTE* const iend = ip + inputSize; + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; - BYTE* op = (BYTE*) dest; + BYTE* op = (BYTE*) dst; BYTE* const oend = op + outputSize; BYTE* cpy; BYTE* oexit = op + targetOutputSize; - const BYTE* const lowLimit = lowPrefix - dictSize; const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; - const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; - const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4}; + const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3}; const int safeDecode = (endOnInput==endOnInputSize); const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); /* Special cases */ - if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ - if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ + if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */ + if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); /* Main Loop : decode sequences */ @@ -1149,8 +1208,27 @@ FORCE_INLINE int LZ4_decompress_generic( const BYTE* match; size_t offset; - /* get literal length */ unsigned const token = *ip++; + + /* shortcut for common case : + * in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes). + * this shortcut was tested on x86 and x64, where it improves decoding speed. + * it has not yet been benchmarked on ARM, Power, mips, etc. */ + if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend) + & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend)) + & ((token < (15<> ML_BITS; + size_t const off = LZ4_readLE16(ip+ll); + const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */ + if ((off >= 18) /* do not deal with overlapping matches */ & (matchPtr >= lowPrefix)) { + size_t const ml = (token & ML_MASK) + MINMATCH; + memcpy(op, ip, 16); op += ll; ip += ll + 2 /*offset*/; + memcpy(op, matchPtr, 18); op += ml; + continue; + } + } + + /* decode literal length */ if ((length=(token>>ML_BITS)) == RUN_MASK) { unsigned s; do { @@ -1184,7 +1262,7 @@ FORCE_INLINE int LZ4_decompress_generic( /* get offset */ offset = LZ4_readLE16(ip); ip+=2; match = op - offset; - if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */ + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ /* get matchlength */ @@ -1228,14 +1306,13 @@ FORCE_INLINE int LZ4_decompress_generic( /* copy match within block */ cpy = op + length; if (unlikely(offset<8)) { - const int dec64 = dec64table[offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; - match += dec32table[offset]; + match += inc32table[offset]; memcpy(op+4, match, 4); - match -= dec64; + match -= dec64table[offset]; } else { LZ4_copy8(op, match); match+=8; } op += 8; @@ -1252,31 +1329,34 @@ FORCE_INLINE int LZ4_decompress_generic( LZ4_copy8(op, match); if (length>16) LZ4_wildCopy(op+8, match+8, cpy); } - op=cpy; /* correction */ + op = cpy; /* correction */ } /* end of decoding */ if (endOnInput) - return (int) (((char*)op)-dest); /* Nb of output bytes decoded */ + return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ else - return (int) (((const char*)ip)-source); /* Nb of input bytes read */ + return (int) (((const char*)ip)-src); /* Nb of input bytes read */ /* Overflow error detected */ _output_error: - return (int) (-(((const char*)ip)-source))-1; + return (int) (-(((const char*)ip)-src))-1; } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0); } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0); } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast(const char* source, char* dest, int originalSize) { return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB); @@ -1322,6 +1402,7 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti If it's not possible, save the relevant part of decoded data into a safe buffer, and indicate where it stands using LZ4_setStreamDecode() */ +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) { LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; @@ -1348,6 +1429,7 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch return result; } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) { LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; @@ -1382,7 +1464,8 @@ Advanced decoding functions : the dictionary must be explicitly provided within parameters */ -FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) +LZ4_FORCE_O2_GCC_PPC64LE +LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) { if (dictSize==0) return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); @@ -1394,17 +1477,20 @@ FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) { return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); } +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) { return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); } /* debug function */ +LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); diff --git a/lz4/lz4.h b/lz4/lz4.h index 86ca0d5..a06b8a4 100644 --- a/lz4/lz4.h +++ b/lz4/lz4.h @@ -72,24 +72,28 @@ extern "C" { /* * LZ4_DLL_EXPORT : * Enable exporting of functions when building a Windows DLL -* LZ4LIB_API : +* LZ4LIB_VISIBILITY : * Control library symbols visibility. */ +#ifndef LZ4LIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4LIB_VISIBILITY +# endif +#endif #if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4LIB_API __declspec(dllexport) +# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY #elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#elif defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4LIB_API __attribute__ ((__visibility__ ("default"))) +# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define LZ4LIB_API +# define LZ4LIB_API LZ4LIB_VISIBILITY #endif - /*------ Version ------*/ #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ #define LZ4_VERSION_MINOR 8 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ +#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */ #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) @@ -120,30 +124,30 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; * Simple Functions **************************************/ /*! LZ4_compress_default() : - Compresses 'sourceSize' bytes from buffer 'source' - into already allocated 'dest' buffer of size 'maxDestSize'. - Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). + Compresses 'srcSize' bytes from buffer 'src' + into already allocated 'dst' buffer of size 'dstCapacity'. + Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). It also runs faster, so it's a recommended setting. - If the function cannot compress 'source' into a more limited 'dest' budget, + If the function cannot compress 'src' into a limited 'dst' budget, compression stops *immediately*, and the function result is zero. - As a consequence, 'dest' content is not valid. - This function never writes outside 'dest' buffer, nor read outside 'source' buffer. - sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE - maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) - return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) - or 0 if compression fails */ -LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize); + As a consequence, 'dst' content is not valid. + This function never writes outside 'dst' buffer, nor read outside 'source' buffer. + srcSize : supported max value is LZ4_MAX_INPUT_VALUE + dstCapacity : full or partial size of buffer 'dst' (which must be already allocated) + return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) + or 0 if compression fails */ +LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); /*! LZ4_decompress_safe() : - compressedSize : is the precise full size of the compressed block. - maxDecompressedSize : is the size of destination buffer, which must be already allocated. - return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) - If destination buffer is not large enough, decoding will stop and output an error code (<0). + compressedSize : is the exact complete size of the compressed block. + dstCapacity : is the size of destination buffer, which must be already allocated. + return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + If destination buffer is not large enough, decoding will stop and output an error code (negative value). If the source stream is detected malformed, the function will stop decoding and return a negative result. This function is protected against buffer overflow exploits, including malicious data packets. It never writes outside output buffer, nor reads outside input buffer. */ -LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); +LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); /*-************************************ @@ -172,7 +176,7 @@ LZ4_compress_fast() : An acceleration value of "1" is the same as regular LZ4_compress_default() Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. */ -LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration); +LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); /*! @@ -183,49 +187,49 @@ LZ4_compress_fast_extState() : Then, provide it as 'void* state' to compression function. */ LZ4LIB_API int LZ4_sizeofState(void); -LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); /*! LZ4_compress_destSize() : - Reverse the logic, by compressing as much data as possible from 'source' buffer - into already allocated buffer 'dest' of size 'targetDestSize'. - This function either compresses the entire 'source' content into 'dest' if it's large enough, - or fill 'dest' buffer completely with as much data as possible from 'source'. - *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. - New value is necessarily <= old value. - return : Nb bytes written into 'dest' (necessarily <= targetDestSize) - or 0 if compression fails + Reverse the logic : compresses as much data as possible from 'src' buffer + into already allocated buffer 'dst' of size 'targetDestSize'. + This function either compresses the entire 'src' content into 'dst' if it's large enough, + or fill 'dst' buffer completely with as much data as possible from 'src'. + *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + New value is necessarily <= old value. + return : Nb bytes written into 'dst' (necessarily <= targetDestSize) + or 0 if compression fails */ -LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize); +LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); /*! -LZ4_decompress_fast() : - originalSize : is the original and therefore uncompressed size +LZ4_decompress_fast() : (unsafe!!) + originalSize : is the original uncompressed size return : the number of bytes read from the source buffer (in other words, the compressed size) If the source stream is detected malformed, the function will stop decoding and return a negative result. - Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. - note : This function fully respect memory boundaries for properly formed compressed data. + Destination buffer must be already allocated. Its size must be >= 'originalSize' bytes. + note : This function respects memory boundaries for *properly formed* compressed data. It is a bit faster than LZ4_decompress_safe(). However, it does not provide any protection against intentionally modified data stream (malicious input). Use this function in trusted environment only (data to decode comes from a trusted source). */ -LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize); +LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); /*! LZ4_decompress_safe_partial() : - This function decompress a compressed block of size 'compressedSize' at position 'source' - into destination buffer 'dest' of size 'maxDecompressedSize'. - The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, - reducing decompression time. - return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) - Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. + This function decompress a compressed block of size 'srcSize' at position 'src' + into destination buffer 'dst' of size 'dstCapacity'. + The function will decompress a minimum of 'targetOutputSize' bytes, and stop after that. + However, it's not accurate, and may write more than 'targetOutputSize' (but <= dstCapacity). + @return : the number of bytes decoded in the destination buffer (necessarily <= dstCapacity) + Note : this number can be < 'targetOutputSize' should the compressed block contain less data. Always control how many bytes were decoded. If the source stream is detected malformed, the function will stop decoding and return a negative result. - This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets + This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets. */ -LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); +LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); /*-********************************************* @@ -242,24 +246,29 @@ LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); /*! LZ4_resetStream() : * An LZ4_stream_t structure can be allocated once and re-used multiple times. - * Use this function to init an allocated `LZ4_stream_t` structure and start a new compression. + * Use this function to start compressing a new stream. */ LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); /*! LZ4_loadDict() : - * Use this function to load a static dictionary into LZ4_stream. + * Use this function to load a static dictionary into LZ4_stream_t. * Any previous data will be forgotten, only 'dictionary' will remain in memory. - * Loading a size of 0 is allowed. - * Return : dictionary size, in bytes (necessarily <= 64 KB) + * Loading a size of 0 is allowed, and is the same as reset. + * @return : dictionary size, in bytes (necessarily <= 64 KB) */ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); /*! LZ4_compress_fast_continue() : - * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio. - * Important : Previous data blocks are assumed to remain present and unmodified ! + * Compress content into 'src' using data from previously compressed blocks, improving compression ratio. * 'dst' buffer must be already allocated. * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function @return==0. + * + * Important : Up to 64KB of previously compressed data is assumed to remain present and unmodified in memory ! + * Special 1 : If input buffer is a double-buffer, it can have any size, including < 64 KB. + * Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * @return : size of compressed block + * or 0 if there is an error (typically, compressed data cannot fit into 'dst') * After an error, the stream status is invalid, it can only be reset or freed. */ LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); @@ -280,35 +289,40 @@ LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dict typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ /*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking structure */ + * creation / destruction of streaming decompression tracking structure. + * A tracking structure can be re-used multiple times sequentially. */ LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); /*! LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * Setting a size of 0 is allowed (same effect as reset). - * @return : 1 if OK, 0 if error + * An LZ4_streamDecode_t structure can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionnally be set. Use NULL or size 0 for a simple reset order. + * @return : 1 if OK, 0 if error */ LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); /*! LZ4_decompress_*_continue() : - * These decoding functions allow decompression of multiple blocks in "streaming" mode. - * Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB) - * In the case of a ring buffers, decoding buffer must be either : + * These decoding functions allow decompression of consecutive blocks in "streaming" mode. + * A block is an unsplittable entity, it must be presented entirely to a decompression function. + * Decompression functions only accept one block at a time. + * Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB). + * + * Special : if application sets a ring buffer for decompression, it must respect one of the following conditions : * - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) * In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block. + * maxBlockSize is implementation dependent. It's the maximum size of any single block. * In which case, encoding and decoding buffers do not need to be synchronized, * and encoding ring buffer can have any size, including small ones ( < 64 KB). * - _At least_ 64 KB + 8 bytes + maxBlockSize. * In which case, encoding and decoding buffers do not need to be synchronized, * and encoding ring buffer can have any size, including larger than decoding buffer. * Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, - * and indicate where it is saved using LZ4_setStreamDecode() + * and indicate where it is saved using LZ4_setStreamDecode() before decompressing next block. */ -LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); -LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); +LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity); +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); /*! LZ4_decompress_*_usingDict() : @@ -316,8 +330,8 @@ LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecod * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() * They are stand-alone, and don't need an LZ4_streamDecode_t structure. */ -LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); -LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); +LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize); +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); /*^********************************************** @@ -419,9 +433,11 @@ union LZ4_streamDecode_u { # define LZ4_DEPRECATED(message) /* disable deprecation warnings */ #else # define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# if defined(__clang__) /* clang doesn't handle mixed C++11 and CNU attributes */ +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__) +# elif (LZ4_GCC_VERSION >= 405) # define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) # elif (LZ4_GCC_VERSION >= 301) # define LZ4_DEPRECATED(message) __attribute__((deprecated)) diff --git a/lz4/lz4frame.c b/lz4/lz4frame.c index 3408708..0b26f75 100644 --- a/lz4/lz4frame.c +++ b/lz4/lz4frame.c @@ -70,6 +70,14 @@ You can contact the author at : /*-************************************ * Debug **************************************/ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ @@ -281,7 +289,7 @@ static size_t LZ4F_compressBound_internal(size_t srcSize, size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); size_t const maxSrcSize = srcSize + bufferedSize; unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); - size_t const partialBlockSize = (srcSize - (srcSize==0)) & (blockSize-1); /* 0 => -1 == MAX => blockSize-1 */ + size_t const partialBlockSize = maxSrcSize & (blockSize-1); size_t const lastBlockSize = flush ? partialBlockSize : 0; unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); @@ -322,7 +330,7 @@ size_t LZ4F_compressFrame_usingCDict(void* dstBuffer, size_t dstCapacity, const LZ4F_preferences_t* preferencesPtr) { LZ4F_cctx_t cctxI; - LZ4_stream_t lz4ctx; + LZ4_stream_t lz4ctx; /* pretty large on stack */ LZ4F_preferences_t prefs; LZ4F_compressOptions_t options; BYTE* const dstStart = (BYTE*) dstBuffer; @@ -504,15 +512,15 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, cctxPtr->prefs = *preferencesPtr; /* Ctx Management */ - { U32 const tableID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ - if (cctxPtr->lz4CtxLevel < tableID) { + { U32 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ + if (cctxPtr->lz4CtxLevel < ctxTypeID) { FREEMEM(cctxPtr->lz4CtxPtr); if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) cctxPtr->lz4CtxPtr = (void*)LZ4_createStream(); else cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC(); if (cctxPtr->lz4CtxPtr == NULL) return err0r(LZ4F_ERROR_allocation_failed); - cctxPtr->lz4CtxLevel = tableID; + cctxPtr->lz4CtxLevel = ctxTypeID; } } /* Buffer Management */ @@ -604,10 +612,10 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, } -/* LZ4F_compressBound() : - * @ return size of Dst buffer given a srcSize to handle worst case situations. - * The LZ4F_frameInfo_t structure is optional : if NULL, preferences will be set to cover worst case situations. - * This function cannot fail. +/* LZ4F_compressBound() : + * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. + * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. + * This function cannot fail. */ size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) { @@ -887,8 +895,7 @@ typedef enum { dstage_getBlockHeader, dstage_storeBlockHeader, dstage_copyDirect, dstage_getBlockChecksum, dstage_getCBlock, dstage_storeCBlock, - dstage_decodeCBlock, dstage_decodeCBlock_intoDst, - dstage_decodeCBlock_intoTmp, dstage_flushOut, + dstage_flushOut, dstage_getSuffix, dstage_storeSuffix, dstage_getSFrameSize, dstage_storeSFrameSize, dstage_skipSkippable @@ -1253,29 +1260,32 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); dctx->tmpInSize += sizeToCopy; srcPtr += sizeToCopy; - if (dctx->tmpInSize < dctx->tmpInTarget) { - nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ - doAnotherStage = 0; /* not enough src data, ask for some more */ - break; - } - { size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */ - if (LZ4F_isError(hSize)) return hSize; - } + } + if (dctx->tmpInSize < dctx->tmpInTarget) { + nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ + doAnotherStage = 0; /* not enough src data, ask for some more */ break; } + { size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */ + if (LZ4F_isError(hSize)) return hSize; + } + break; case dstage_init: if (dctx->frameInfo.contentChecksumFlag) XXH32_reset(&(dctx->xxh), 0); /* internal buffers allocation */ - { size_t const bufferNeeded = dctx->maxBlockSize + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) * 128 KB) + 4 /* block checksum */; + { size_t const bufferNeeded = dctx->maxBlockSize + + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) * 128 KB); if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ FREEMEM(dctx->tmpIn); - dctx->tmpIn = (BYTE*)ALLOCATOR(dctx->maxBlockSize); - if (dctx->tmpIn == NULL) return err0r(LZ4F_ERROR_allocation_failed); + dctx->tmpIn = (BYTE*)ALLOCATOR(dctx->maxBlockSize + 4 /* block checksum */); + if (dctx->tmpIn == NULL) + return err0r(LZ4F_ERROR_allocation_failed); FREEMEM(dctx->tmpOutBuffer); dctx->tmpOutBuffer= (BYTE*)ALLOCATOR(bufferNeeded); - if (dctx->tmpOutBuffer== NULL) return err0r(LZ4F_ERROR_allocation_failed); + if (dctx->tmpOutBuffer== NULL) + return err0r(LZ4F_ERROR_allocation_failed); dctx->maxBufferSize = bufferNeeded; } } dctx->tmpInSize = 0; @@ -1299,18 +1309,20 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ case dstage_storeBlockHeader: - { size_t sizeToCopy = BHSize - dctx->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = BHSize - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; dctx->tmpInSize += sizeToCopy; + if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ nextSrcSizeHint = BHSize - dctx->tmpInSize; doAnotherStage = 0; break; } selectedIn = dctx->tmpIn; - } + } /* if (dctx->dStage == dstage_storeBlockHeader) */ /* decode block header */ { size_t const nextCBlockSize = LZ4F_readLE32(selectedIn) & 0x7FFFFFFFU; @@ -1401,7 +1413,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, dctx->dStage = dstage_getBlockHeader; /* new block */ break; - case dstage_getCBlock: /* entry from dstage_decodeCBlockSize */ + case dstage_getCBlock: if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { dctx->tmpInSize = 0; dctx->dStage = dstage_storeCBlock; @@ -1410,11 +1422,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, /* input large enough to read full block directly */ selectedIn = srcPtr; srcPtr += dctx->tmpInTarget; - dctx->dStage = dstage_decodeCBlock; - break; + if (0) /* jump over next block */ case dstage_storeCBlock: - { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd-srcPtr)); + { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; + size_t const inputLeft = (size_t)(srcEnd-srcPtr); + size_t const sizeToCopy = MIN(wantedData, inputLeft); memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); dctx->tmpInSize += sizeToCopy; srcPtr += sizeToCopy; @@ -1424,27 +1437,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, break; } selectedIn = dctx->tmpIn; - dctx->dStage = dstage_decodeCBlock; } - /* fall-through */ - /* At this stage, input is large enough to decode a block */ - case dstage_decodeCBlock: + /* At this stage, input is large enough to decode a block */ if (dctx->frameInfo.blockChecksumFlag) { dctx->tmpInTarget -= 4; + assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); if (readBlockCrc != calcBlockCrc) return err0r(LZ4F_ERROR_blockChecksum_invalid); } } - if ((size_t)(dstEnd-dstPtr) < dctx->maxBlockSize) /* not enough place into dst : decode into tmpOut */ - dctx->dStage = dstage_decodeCBlock_intoTmp; - else - dctx->dStage = dstage_decodeCBlock_intoDst; - break; - case dstage_decodeCBlock_intoDst: - { int const decodedSize = LZ4_decompress_safe_usingDict( + if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) { + /* enough capacity in `dst` to decompress directly there */ + int const decodedSize = LZ4_decompress_safe_usingDict( (const char*)selectedIn, (char*)dstPtr, (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, (const char*)dctx->dict, (int)dctx->dictSize); @@ -1463,9 +1470,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, break; } - case dstage_decodeCBlock_intoTmp: /* not enough place into dst : decode into tmpOut */ - /* ensure enough place for tmpOut */ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { if (dctx->dict == dctx->tmpOutBuffer) { @@ -1518,29 +1523,27 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, } case dstage_getSuffix: - { size_t const suffixSize = dctx->frameInfo.contentChecksumFlag * 4; - if (dctx->frameRemainingSize) - return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */ - if (suffixSize == 0) { /* frame completed */ - nextSrcSizeHint = 0; - LZ4F_resetDecompressionContext(dctx); - doAnotherStage = 0; - break; - } - if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeSuffix; - } else { - selectedIn = srcPtr; - srcPtr += 4; - } + if (dctx->frameRemainingSize) + return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */ + if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ + nextSrcSizeHint = 0; + LZ4F_resetDecompressionContext(dctx); + doAnotherStage = 0; + break; + } + if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ + dctx->tmpInSize = 0; + dctx->dStage = dstage_storeSuffix; + } else { + selectedIn = srcPtr; + srcPtr += 4; } if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ case dstage_storeSuffix: - { - size_t sizeToCopy = 4 - dctx->tmpInSize; - if (sizeToCopy > (size_t)(srcEnd - srcPtr)) sizeToCopy = srcEnd - srcPtr; + { size_t const remainingInput = (size_t)(srcEnd - srcPtr); + size_t const wantedData = 4 - dctx->tmpInSize; + size_t const sizeToCopy = MIN(wantedData, remainingInput); memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; dctx->tmpInSize += sizeToCopy; @@ -1550,7 +1553,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, break; } selectedIn = dctx->tmpIn; - } + } /* if (dctx->dStage == dstage_storeSuffix) */ /* case dstage_checkSuffix: */ /* no direct call, avoid scan-build warning */ { U32 const readCRC = LZ4F_readLE32(selectedIn); @@ -1589,7 +1592,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, break; } selectedIn = dctx->header + 4; - } + } /* if (dctx->dStage == dstage_storeSFrameSize) */ /* case dstage_decodeSFrameSize: */ /* no direct access */ { size_t const SFrameSize = LZ4F_readLE32(selectedIn); @@ -1606,36 +1609,38 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, doAnotherStage = 0; nextSrcSizeHint = dctx->tmpInTarget; if (nextSrcSizeHint) break; /* still more to skip */ + /* frame fully skipped : prepare context for a new frame */ LZ4F_resetDecompressionContext(dctx); break; } } - } - - /* preserve history within tmp if necessary */ - if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) - && (dctx->dict != dctx->tmpOutBuffer) - && (dctx->dStage != dstage_getFrameHeader) - && (!decompressOptionsPtr->stableDst) - && ((unsigned)(dctx->dStage-1) < (unsigned)(dstage_getSuffix-1)) ) + } /* while (doAnotherStage) */ + + /* preserve history within tmp whenever necessary */ + LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); + if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ + && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ + && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ + && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ { if (dctx->dStage == dstage_flushOut) { - size_t preserveSize = dctx->tmpOut - dctx->tmpOutBuffer; + size_t const preserveSize = dctx->tmpOut - dctx->tmpOutBuffer; size_t copySize = 64 KB - dctx->tmpOutSize; const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; if (dctx->tmpOutSize > 64 KB) copySize = 0; if (copySize > preserveSize) copySize = preserveSize; - memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); + if (copySize > 0) + memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); dctx->dict = dctx->tmpOutBuffer; dctx->dictSize = preserveSize + dctx->tmpOutStart; } else { - size_t newDictSize = dctx->dictSize; - const BYTE* oldDictEnd = dctx->dict + dctx->dictSize; - if ((newDictSize) > 64 KB) newDictSize = 64 KB; + const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; + size_t const newDictSize = MIN(dctx->dictSize, 64 KB); - memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); + if (newDictSize > 0) + memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); dctx->dict = dctx->tmpOutBuffer; dctx->dictSize = newDictSize; diff --git a/lz4/lz4frame.h b/lz4/lz4frame.h index 88a6513..eb55e45 100644 --- a/lz4/lz4frame.h +++ b/lz4/lz4frame.h @@ -248,19 +248,19 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, const LZ4F_preferences_t* prefsPtr); /*! LZ4F_compressBound() : - * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. - * prefsPtr is optional : you can provide NULL as argument, preferences will be set to cover worst case scenario. - * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. + * Provides minimum dstCapacity for a given srcSize to guarantee operation success in worst case situations. + * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. + * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers. + * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. */ LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); /*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * An important rule is that dstCapacity MUST be large enough to ensure operation success even in worst case situations. - * This value is provided by LZ4F_compressBound(). - * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * LZ4F_compressUpdate() doesn't guarantee error recovery. When an error occurs, compression context must be freed or resized. + * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. + * An important rule is that dstCapacity MUST be large enough to ensure operation success even in worst case situations. + * This value is provided by LZ4F_compressBound(). + * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). + * LZ4F_compressUpdate() doesn't guarantee error recovery. When an error occurs, compression context must be freed or resized. * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). * or an error code if it fails (which can be tested using LZ4F_isError()) @@ -268,8 +268,8 @@ LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* cOptPtr); /*! LZ4F_flush() : - * When data must be generated and sent immediately, without waiting for a block to be completely filled, - * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. + * When data must be generated and sent immediately, without waiting for a block to be completely filled, + * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. * `dstCapacity` must be large enough to ensure the operation will be successful. * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. * @return : number of bytes written into dstBuffer (it can be zero, which means there was no data stored within cctx) @@ -303,14 +303,14 @@ typedef struct { /* Resource management */ -/*!LZ4F_createDecompressionContext() : - * Create an LZ4F_dctx object, to track all decompression operations. - * The version provided MUST be LZ4F_VERSION. - * The function provides a pointer to an allocated and initialized LZ4F_dctx object. - * The result is an errorCode, which can be tested using LZ4F_isError(). - * dctx memory can be released using LZ4F_freeDecompressionContext(); - * The result of LZ4F_freeDecompressionContext() is indicative of the current state of decompressionContext when being released. - * That is, it should be == 0 if decompression has been completed fully and correctly. +/*! LZ4F_createDecompressionContext() : + * Create an LZ4F_dctx object, to track all decompression operations. + * The version provided MUST be LZ4F_VERSION. + * The function provides a pointer to an allocated and initialized LZ4F_dctx object. + * The result is an errorCode, which can be tested using LZ4F_isError(). + * dctx memory can be released using LZ4F_freeDecompressionContext(); + * The result of LZ4F_freeDecompressionContext() is indicative of the current state of decompressionContext when being released. + * That is, it should be == 0 if decompression has been completed fully and correctly. */ LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); @@ -347,27 +347,32 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, /*! LZ4F_decompress() : * Call this function repetitively to regenerate compressed data from `srcBuffer`. - * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer, into dstBuffer of capacity *dstSizePtr. + * The function will read up to *srcSizePtr bytes from srcBuffer, + * and decompress data into dstBuffer, of capacity *dstSizePtr. * - * The number of bytes regenerated into dstBuffer is provided within *dstSizePtr (necessarily <= original value). + * The number of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). + * The number of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). * - * The number of bytes consumed from srcBuffer is provided within *srcSizePtr (necessarily <= original value). - * Number of bytes consumed can be < number of bytes provided. - * It typically happens when dstBuffer is not large enough to contain all decoded data. + * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. * Unconsumed source data must be presented again in subsequent invocations. * - * `dstBuffer` content is expected to be flushed between each invocation, as its content will be overwritten. - * `dstBuffer` itself can be changed at will between each consecutive function invocation. + * `dstBuffer` can freely change between each consecutive function invocation. + * `dstBuffer` content will be overwritten. * * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. * Schematically, it's the size of the current (or remaining) compressed block + header of next block. * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. * This is just a hint though, it's always possible to provide any srcSize. + * * When a frame is fully decoded, @return will be 0 (no more data expected). + * When provided with more bytes than necessary to decode a frame, + * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. + * * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). + * After a decompression error, the `dctx` context is not resumable. + * Use LZ4F_resetDecompressionContext() to return to clean state. * * After a frame is fully decoded, dctx can be used again to decompress another frame. - * After a decompression error, use LZ4F_resetDecompressionContext() before re-using dctx, to return to clean state. */ LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx, void* dstBuffer, size_t* dstSizePtr, @@ -375,11 +380,11 @@ LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx, const LZ4F_decompressOptions_t* dOptPtr); -/*! LZ4F_resetDecompressionContext() : v1.8.0 +/*! LZ4F_resetDecompressionContext() : added in v1.8.0 * In case of an error, the context is left in "undefined" state. * In which case, it's necessary to reset it, before re-using it. - * This method can also be used to abruptly stop an unfinished decompression, - * and start a new one using the same context. */ + * This method can also be used to abruptly stop any unfinished decompression, + * and start a new one using same context resources. */ LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ diff --git a/lz4/lz4frame_static.h b/lz4/lz4frame_static.h index 1899f8e..a59b94b 100644 --- a/lz4/lz4frame_static.h +++ b/lz4/lz4frame_static.h @@ -43,7 +43,15 @@ extern "C" { /* lz4frame_static.h should be used solely in the context of static linking. * It contains definitions which are not stable and may change in the future. * Never use it in the context of DLL linking. + * + * Defining LZ4F_PUBLISH_STATIC_FUNCTIONS allows one to override this. Use at + * your own risk. */ +#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS +#define LZ4FLIB_STATIC_API LZ4FLIB_API +#else +#define LZ4FLIB_STATIC_API +#endif /* --- Dependency --- */ @@ -79,7 +87,7 @@ extern "C" { /* enum list is exposed, to handle specific errors */ typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes; -LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); +LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); @@ -93,8 +101,8 @@ typedef struct LZ4F_CDict_s LZ4F_CDict; * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ -LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); -void LZ4F_freeCDict(LZ4F_CDict* CDict); +LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); +LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); /*! LZ4_compressFrame_usingCDict() : @@ -106,10 +114,11 @@ void LZ4F_freeCDict(LZ4F_CDict* CDict); * but it's not recommended, as it's the only way to provide dictID in the frame header. * @return : number of bytes written into dstBuffer. * or an error code if it fails (can be tested using LZ4F_isError()) */ -size_t LZ4F_compressFrame_usingCDict(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr); +LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict( + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr); /*! LZ4F_compressBegin_usingCDict() : @@ -119,21 +128,23 @@ size_t LZ4F_compressFrame_usingCDict(void* dst, size_t dstCapacity, * however, it's the only way to provide dictID in the frame header. * @return : number of bytes written into dstBuffer for the header, * or an error code (which can be tested using LZ4F_isError()) */ -size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* prefsPtr); +LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict( + LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* prefsPtr); /*! LZ4F_decompress_usingDict() : * Same as LZ4F_decompress(), using a predefined dictionary. * Dictionary is used "in place", without any preprocessing. * It must remain accessible throughout the entire frame decoding. */ -size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr); +LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict( + LZ4F_dctx* dctxPtr, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr); #if defined (__cplusplus) diff --git a/lz4/lz4hc.c b/lz4/lz4hc.c index 22eb071..f2c2566 100644 --- a/lz4/lz4hc.c +++ b/lz4/lz4hc.c @@ -49,6 +49,7 @@ /*=== Dependency ===*/ +#define LZ4_HC_STATIC_LINKING_ONLY #include "lz4hc.h" @@ -96,7 +97,7 @@ static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start) /* Update chains up to ip (excluded) */ -FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) +LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) { U16* const chainTable = hc4->chainTable; U32* const hashTable = hc4->hashTable; @@ -116,56 +117,73 @@ FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) hc4->nextToUpdate = target; } - -FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ - const BYTE* const ip, const BYTE* const iLimit, - const BYTE** matchpos, - const int maxNbAttempts) +/** LZ4HC_countBack() : + * @return : negative value, nb of common bytes before ip/match */ +LZ4_FORCE_INLINE +int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, + const BYTE* const iMin, const BYTE* const mMin) { - U16* const chainTable = hc4->chainTable; - U32* const HashTable = hc4->hashTable; - const BYTE* const base = hc4->base; - const BYTE* const dictBase = hc4->dictBase; - const U32 dictLimit = hc4->dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); - U32 matchIndex; - int nbAttempts = maxNbAttempts; - size_t ml = 0; + int back=0; + while ( (ip+back > iMin) + && (match+back > mMin) + && (ip[back-1] == match[back-1])) + back--; + return back; +} - /* HC4 match finder */ - LZ4HC_Insert(hc4, ip); - matchIndex = HashTable[LZ4HC_hashPtr(ip)]; +/* LZ4HC_countPattern() : + * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ +static unsigned LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) +{ + const BYTE* const iStart = ip; + reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32; + + while (likely(ip < iEnd-(sizeof(pattern)-1))) { + reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; + if (!diff) { ip+=sizeof(pattern); continue; } + ip += LZ4_NbCommonBytes(diff); + return (unsigned)(ip - iStart); + } - while ((matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - const BYTE* const match = base + matchIndex; - if ( (*(match+ml) == *(ip+ml)) /* can be longer */ - && (LZ4_read32(match) == LZ4_read32(ip)) ) - { - size_t const mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH; - if (mlt > ml) { ml = mlt; *matchpos = match; } - } - } else { - const BYTE* const match = dictBase + matchIndex; - if (LZ4_read32(match) == LZ4_read32(ip)) { - size_t mlt; - const BYTE* vLimit = ip + (dictLimit - matchIndex); - if (vLimit > iLimit) vLimit = iLimit; - mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH; - if ((ip+mlt == vLimit) && (vLimit < iLimit)) - mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit); - if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; } /* virtual matchpos */ - } + if (LZ4_isLittleEndian()) { + reg_t patternByte = pattern; + while ((ip>= 8; + } + } else { /* big endian */ + U32 bitOffset = (sizeof(pattern)*8) - 8; + while (ip < iEnd) { + BYTE const byte = (BYTE)(pattern >> bitOffset); + if (*ip != byte) break; + ip ++; bitOffset -= 8; } - matchIndex -= DELTANEXTU16(chainTable, matchIndex); } - return (int)ml; + return (unsigned)(ip - iStart); +} + +/* LZ4HC_reverseCountPattern() : + * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) + * read using natural platform endianess */ +static unsigned LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) +{ + const BYTE* const iStart = ip; + + while (likely(ip >= iLow+4)) { + if (LZ4_read32(ip-4) != pattern) break; + ip -= 4; + } + { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */ + while (likely(ip>iLow)) { + if (ip[-1] != *bytePtr) break; + ip--; bytePtr--; + } } + return (unsigned)(iStart - ip); } +typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; -FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( +LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( LZ4HC_CCtx_internal* hc4, const BYTE* const ip, const BYTE* const iLowLimit, @@ -173,67 +191,126 @@ FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( int longest, const BYTE** matchpos, const BYTE** startpos, - const int maxNbAttempts) + const int maxNbAttempts, + const int patternAnalysis) { U16* const chainTable = hc4->chainTable; U32* const HashTable = hc4->hashTable; const BYTE* const base = hc4->base; const U32 dictLimit = hc4->dictLimit; const BYTE* const lowPrefixPtr = base + dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); + const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - MAX_DISTANCE; const BYTE* const dictBase = hc4->dictBase; int const delta = (int)(ip-iLowLimit); int nbAttempts = maxNbAttempts; + U32 const pattern = LZ4_read32(ip); U32 matchIndex; + repeat_state_e repeat = rep_untested; + size_t srcPatternLength = 0; - + DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); /* First Match */ LZ4HC_Insert(hc4, ip); matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + DEBUGLOG(7, "First match at index %u / %u (lowLimit)", + matchIndex, lowLimit); while ((matchIndex>=lowLimit) && (nbAttempts)) { + DEBUGLOG(7, "remaining attempts : %i", nbAttempts); nbAttempts--; if (matchIndex >= dictLimit) { const BYTE* const matchPtr = base + matchIndex; if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) { - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + if (LZ4_read32(matchPtr) == pattern) { int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); + #if 0 + /* more generic but unfortunately slower on clang */ + int const back = LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr); + #else int back = 0; - while ( (ip+back > iLowLimit) && (matchPtr+back > lowPrefixPtr) && (ip[back-1] == matchPtr[back-1])) { back--; } - + #endif mlt -= back; if (mlt > longest) { longest = mlt; *matchpos = matchPtr+back; *startpos = ip+back; - } } } - } else { + } } + } + } else { /* matchIndex < dictLimit */ const BYTE* const matchPtr = dictBase + matchIndex; - if (LZ4_read32(matchPtr) == LZ4_read32(ip)) { + if (LZ4_read32(matchPtr) == pattern) { int mlt; - int back=0; + int back = 0; const BYTE* vLimit = ip + (dictLimit - matchIndex); if (vLimit > iHighLimit) vLimit = iHighLimit; mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; if ((ip+mlt == vLimit) && (vLimit < iHighLimit)) mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit); - while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == matchPtr[back-1])) back--; + while ( (ip+back > iLowLimit) + && (matchIndex+back > lowLimit) + && (ip[back-1] == matchPtr[back-1])) + back--; mlt -= back; - if (mlt > longest) { longest = mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; } - } - } - matchIndex -= DELTANEXTU16(chainTable, matchIndex); - } + if (mlt > longest) { + longest = mlt; + *matchpos = base + matchIndex + back; + *startpos = ip + back; + } } } + + { U32 const nextOffset = DELTANEXTU16(chainTable, matchIndex); + matchIndex -= nextOffset; + if (patternAnalysis && nextOffset==1) { + /* may be a repeated pattern */ + if (repeat == rep_untested) { + if ( ((pattern & 0xFFFF) == (pattern >> 16)) + & ((pattern & 0xFF) == (pattern >> 24)) ) { + repeat = rep_confirmed; + srcPatternLength = LZ4HC_countPattern(ip+4, iHighLimit, pattern) + 4; + } else { + repeat = rep_not; + } } + if ( (repeat == rep_confirmed) + && (matchIndex >= dictLimit) ) { /* same segment only */ + const BYTE* const matchPtr = base + matchIndex; + if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ + size_t const forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); + const BYTE* const maxLowPtr = (lowPrefixPtr + MAX_DISTANCE >= ip) ? lowPrefixPtr : ip - MAX_DISTANCE; + size_t const backLength = LZ4HC_reverseCountPattern(matchPtr, maxLowPtr, pattern); + size_t const currentSegmentLength = backLength + forwardPatternLength; + + if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ + && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ + matchIndex += (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ + } else { + matchIndex -= (U32)backLength; /* let's go to farthest segment position, will find a match of length currentSegmentLength + maybe some back */ + } + } } } } + } /* while ((matchIndex>=lowLimit) && (nbAttempts)) */ return longest; } +LZ4_FORCE_INLINE +int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + const BYTE** matchpos, + const int maxNbAttempts, + const int patternAnalysis) +{ + const BYTE* uselessPtr = ip; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis); +} + + typedef enum { noLimit = 0, @@ -241,14 +318,10 @@ typedef enum { limitedDestSize = 2, } limitedOutput_directive; -#ifndef LZ4HC_DEBUG -# define LZ4HC_DEBUG 0 -#endif - /* LZ4HC_encodeSequence() : * @return : 0 if ok, * 1 if buffer issue detected */ -FORCE_INLINE int LZ4HC_encodeSequence ( +LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( const BYTE** ip, BYTE** op, const BYTE** anchor, @@ -260,9 +333,21 @@ FORCE_INLINE int LZ4HC_encodeSequence ( size_t length; BYTE* const token = (*op)++; -#if LZ4HC_DEBUG - printf("literal : %u -- match : %u -- offset : %u\n", - (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match)); +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2) + static const BYTE* start = NULL; + static U32 totalCost = 0; + U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start); + U32 const ll = (U32)(*ip - *anchor); + U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; + U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; + U32 const cost = 1 + llAdd + ll + 2 + mlAdd; + if (start==NULL) start = *anchor; /* only works for single segment */ + //g_debuglog_enable = (pos >= 2228) & (pos <= 2262); + DEBUGLOG(2, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u", + pos, + (U32)(*ip - *anchor), matchLength, (U32)(*ip-match), + cost, totalCost); + totalCost += cost; #endif /* Encode Literal length */ @@ -285,6 +370,7 @@ FORCE_INLINE int LZ4HC_encodeSequence ( LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2; /* Encode MatchLength */ + assert(matchLength >= MINMATCH); length = (size_t)(matchLength - MINMATCH); if ((limit) && (*op + (length >> 8) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */ if (length >= ML_MASK) { @@ -319,6 +405,7 @@ static int LZ4HC_compress_hashChain ( ) { const int inputSize = *srcSizePtr; + const int patternAnalysis = (maxNbAttempts > 64); /* levels 8+ */ const BYTE* ip = (const BYTE*) source; const BYTE* anchor = ip; @@ -341,19 +428,13 @@ static int LZ4HC_compress_hashChain ( /* init */ *srcSizePtr = 0; - if (limit == limitedDestSize && maxOutputSize < 1) return 0; /* Impossible to store anything */ - if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ - - ctx->end += inputSize; - if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support limitations LZ4 decompressor */ + if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - ip++; - /* Main Loop */ while (ip < mflimit) { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts); - if (!ml) { ip++; continue; } + ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis); + if (ml 9) { - if (limit == limitedDestSize) cLevel = 10; - switch (cLevel) { - case 10: - return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << 12, limit); - case 11: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, 128, 0); - default: - cLevel = 12; - /* fall-through */ - case 12: - ctx->searchNum = LZ4HC_getSearchNum(cLevel); - return LZ4HC_compress_optimal(ctx, src, dst, *srcSizePtr, dstCapacity, limit, LZ4_OPT_NUM, 1); - } + typedef enum { lz4hc, lz4opt } lz4hc_strat_e; + typedef struct { + lz4hc_strat_e strat; + U32 nbSearches; + U32 targetLength; + } cParams_t; + static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = { + { lz4hc, 2, 16 }, /* 0, unused */ + { lz4hc, 2, 16 }, /* 1, unused */ + { lz4hc, 2, 16 }, /* 2, unused */ + { lz4hc, 4, 16 }, /* 3 */ + { lz4hc, 8, 16 }, /* 4 */ + { lz4hc, 16, 16 }, /* 5 */ + { lz4hc, 32, 16 }, /* 6 */ + { lz4hc, 64, 16 }, /* 7 */ + { lz4hc, 128, 16 }, /* 8 */ + { lz4hc, 256, 16 }, /* 9 */ + { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ + { lz4opt, 512,128 }, /*11 */ + { lz4opt,8192, LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ + }; + + if (limit == limitedDestSize && dstCapacity < 1) return 0; /* Impossible to store anything */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ + + ctx->end += *srcSizePtr; + if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ + cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); + assert(cLevel >= 0); + assert(cLevel <= LZ4HC_CLEVEL_MAX); + { cParams_t const cParam = clTable[cLevel]; + if (cParam.strat == lz4hc) + return LZ4HC_compress_hashChain(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, limit); + assert(cParam.strat == lz4opt); + return LZ4HC_compress_optimal(ctx, + src, dst, srcSizePtr, dstCapacity, + cParam.nbSearches, cParam.targetLength, limit, + cLevel == LZ4HC_CLEVEL_MAX); /* ultra mode */ } - return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, 1 << (cLevel-1), limit); /* levels 1-9 */ } @@ -596,8 +696,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in } /* LZ4_compress_HC_destSize() : - * currently, only compatible with Hash Chain implementation, - * hence limit compression level to LZ4HC_CLEVEL_OPT_MIN-1*/ + * only compatible with regular HC parser */ int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) { LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; @@ -624,18 +723,13 @@ void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET); /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */ LZ4_streamHCPtr->internal_donotuse.base = NULL; - if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; /* cap compression level */ - LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; - LZ4_streamHCPtr->internal_donotuse.searchNum = LZ4HC_getSearchNum(compressionLevel); + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); } void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { - int const currentCLevel = LZ4_streamHCPtr->internal_donotuse.compressionLevel; - int const minCLevel = currentCLevel < LZ4HC_CLEVEL_OPT_MIN ? 1 : LZ4HC_CLEVEL_OPT_MIN; - int const maxCLevel = currentCLevel < LZ4HC_CLEVEL_OPT_MIN ? LZ4HC_CLEVEL_OPT_MIN-1 : LZ4HC_CLEVEL_MAX; - compressionLevel = MIN(compressionLevel, minCLevel); - compressionLevel = MAX(compressionLevel, maxCLevel); + if (compressionLevel < 1) compressionLevel = 1; + if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; } @@ -648,10 +742,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int } LZ4HC_init (ctxPtr, (const BYTE*)dictionary); ctxPtr->end = (const BYTE*)dictionary + dictSize; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); + if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); return dictSize; } @@ -660,10 +751,7 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) { - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) - LZ4HC_updateBinTree(ctxPtr, ctxPtr->end - MFLIMIT, ctxPtr->end - LASTLITERALS); - else - if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ ctxPtr->lowLimit = ctxPtr->dictLimit; @@ -717,8 +805,6 @@ int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) { - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - if (ctxPtr->compressionLevel >= LZ4HC_CLEVEL_OPT_MIN) LZ4HC_init(ctxPtr, (const BYTE*)src); /* not compatible with btopt implementation */ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, limitedDestSize); } diff --git a/lz4/lz4hc.h b/lz4/lz4hc.h index 66d5636..d41bf42 100644 --- a/lz4/lz4hc.h +++ b/lz4/lz4hc.h @@ -39,14 +39,14 @@ extern "C" { #endif /* --- Dependency --- */ -/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ +/* note : lz4hc requires lz4.h/lz4.c for compilation */ #include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ /* --- Useful constants --- */ #define LZ4HC_CLEVEL_MIN 3 #define LZ4HC_CLEVEL_DEFAULT 9 -#define LZ4HC_CLEVEL_OPT_MIN 11 +#define LZ4HC_CLEVEL_OPT_MIN 10 #define LZ4HC_CLEVEL_MAX 12 @@ -54,12 +54,12 @@ extern "C" { * Block Compression **************************************/ /*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm. + * Compress data from `src` into `dst`, using the more powerful but slower "HC" algorithm. * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : Recommended values are between 4 and 9, although any value between 1 and LZ4HC_CLEVEL_MAX will work. - * Values >LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. + * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") + * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") + * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. + * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. * @return : the number of bytes written into 'dst' * or 0 if compression fails. */ @@ -72,12 +72,12 @@ LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dst /*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. + * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() will do properly). + * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). */ -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); LZ4LIB_API int LZ4_sizeofStateHC(void); +LZ4LIB_API int LZ4_compress_HC_extStateHC(void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); /*-************************************ @@ -87,10 +87,10 @@ LZ4LIB_API int LZ4_sizeofStateHC(void); typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ /*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * Existing states can be re-used several times, using LZ4_resetStreamHC(). - * These methods are API and ABI stable, they can be used in combination with a DLL. + * These functions create and release memory for LZ4 HC streaming state. + * Newly created states are automatically initialized. + * Existing states can be re-used several times, using LZ4_resetStreamHC(). + * These methods are API and ABI stable, they can be used in combination with a DLL. */ LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); @@ -123,13 +123,13 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in */ - /*-************************************* +/*-************************************************************** * PRIVATE DEFINITIONS : * Do not use these definitions. * They are exposed to allow static allocation of `LZ4_streamHC_t`. * Using these definitions makes the code vulnerable to potential API break when upgrading LZ4 - **************************************/ -#define LZ4HC_DICTIONARY_LOGSIZE 17 /* because of btopt, hc would only need 16 */ + ****************************************************************/ +#define LZ4HC_DICTIONARY_LOGSIZE 16 #define LZ4HC_MAXD (1<= (size_t)RUN_MASK) + int price = litlen; + if (litlen >= (int)RUN_MASK) price += 1 + (litlen-RUN_MASK)/255; return price; } /* requires mlen >= MINMATCH */ -FORCE_INLINE size_t LZ4HC_sequencePrice(size_t litlen, size_t mlen) +LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) { - size_t price = 2 + 1; /* 16-bit offset + token */ + int price = 1 + 2 ; /* token + 16-bit offset */ price += LZ4HC_literalsPrice(litlen); - if (mlen >= (size_t)(ML_MASK+MINMATCH)) - price+= 1 + (mlen-(ML_MASK+MINMATCH))/255; + if (mlen >= (int)(ML_MASK+MINMATCH)) + price += 1 + (mlen-(ML_MASK+MINMATCH))/255; return price; } /*-************************************* -* Binary Tree search +* Match finder ***************************************/ -FORCE_INLINE int LZ4HC_BinTree_InsertAndGetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, - const BYTE* const iHighLimit, - size_t best_mlen, - LZ4HC_match_t* matches, - int* matchNum) -{ - U16* const chainTable = ctx->chainTable; - U32* const HashTable = ctx->hashTable; - const BYTE* const base = ctx->base; - const U32 dictLimit = ctx->dictLimit; - const U32 current = (U32)(ip - base); - const U32 lowLimit = (ctx->lowLimit + MAX_DISTANCE > current) ? ctx->lowLimit : current - (MAX_DISTANCE - 1); - const BYTE* const dictBase = ctx->dictBase; - const BYTE* match; - int nbAttempts = ctx->searchNum; - int mnum = 0; - U16 *ptr0, *ptr1, delta0, delta1; - U32 matchIndex; - size_t matchLength = 0; - U32* HashPos; - - if (ip + MINMATCH > iHighLimit) return 1; - - /* HC4 match finder */ - HashPos = &HashTable[LZ4HC_hashPtr(ip)]; - matchIndex = *HashPos; - *HashPos = current; - - ptr0 = &DELTANEXTMAXD(current*2+1); - ptr1 = &DELTANEXTMAXD(current*2); - delta0 = delta1 = (U16)(current - matchIndex); - - while ((matchIndex < current) && (matchIndex>=lowLimit) && (nbAttempts)) { - nbAttempts--; - if (matchIndex >= dictLimit) { - match = base + matchIndex; - matchLength = LZ4_count(ip, match, iHighLimit); - } else { - const BYTE* vLimit = ip + (dictLimit - matchIndex); - match = dictBase + matchIndex; - if (vLimit > iHighLimit) vLimit = iHighLimit; - matchLength = LZ4_count(ip, match, vLimit); - if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) - matchLength += LZ4_count(ip+matchLength, base+dictLimit, iHighLimit); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > best_mlen) { - best_mlen = matchLength; - if (matches) { - if (matchIndex >= dictLimit) - matches[mnum].off = (int)(ip - match); - else - matches[mnum].off = (int)(ip - (base + matchIndex)); /* virtual matchpos */ - matches[mnum].len = (int)matchLength; - mnum++; - } - if (best_mlen > LZ4_OPT_NUM) break; - } - - if (ip+matchLength >= iHighLimit) /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ - - DEBUGLOG(6, "ip :%016llX", (U64)ip); - DEBUGLOG(6, "match:%016llX", (U64)match); - if (*(ip+matchLength) < *(match+matchLength)) { - *ptr0 = delta0; - ptr0 = &DELTANEXTMAXD(matchIndex*2); - if (*ptr0 == (U16)-1) break; - delta0 = *ptr0; - delta1 += delta0; - matchIndex -= delta0; - } else { - *ptr1 = delta1; - ptr1 = &DELTANEXTMAXD(matchIndex*2+1); - if (*ptr1 == (U16)-1) break; - delta1 = *ptr1; - delta0 += delta1; - matchIndex -= delta1; - } - } - - *ptr0 = (U16)-1; - *ptr1 = (U16)-1; - if (matchNum) *matchNum = mnum; - /* if (best_mlen > 8) return best_mlen-8; */ - if (!matchNum) return 1; - return 1; -} - - -FORCE_INLINE void LZ4HC_updateBinTree(LZ4HC_CCtx_internal* ctx, const BYTE* const ip, const BYTE* const iHighLimit) -{ - const BYTE* const base = ctx->base; - const U32 target = (U32)(ip - base); - U32 idx = ctx->nextToUpdate; - while(idx < target) - idx += LZ4HC_BinTree_InsertAndGetAllMatches(ctx, base+idx, iHighLimit, 8, NULL, NULL); -} - +typedef struct { + int off; + int len; +} LZ4HC_match_t; -/** Tree updater, providing best match */ -FORCE_INLINE int LZ4HC_BinTree_GetAllMatches ( - LZ4HC_CCtx_internal* ctx, - const BYTE* const ip, const BYTE* const iHighLimit, - size_t best_mlen, LZ4HC_match_t* matches, const int fullUpdate) +LZ4_FORCE_INLINE +LZ4HC_match_t LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, + const BYTE* ip, const BYTE* const iHighLimit, + int minLen, int nbSearches) { - int mnum = 0; - if (ip < ctx->base + ctx->nextToUpdate) return 0; /* skipped area */ - if (fullUpdate) LZ4HC_updateBinTree(ctx, ip, iHighLimit); - best_mlen = LZ4HC_BinTree_InsertAndGetAllMatches(ctx, ip, iHighLimit, best_mlen, matches, &mnum); - ctx->nextToUpdate = (U32)(ip - ctx->base + best_mlen); - return mnum; -} - - -#define SET_PRICE(pos, ml, offset, ll, cost) \ -{ \ - while (last_pos < pos) { opt[last_pos+1].price = 1<<30; last_pos++; } \ - opt[pos].mlen = (int)ml; \ - opt[pos].off = (int)offset; \ - opt[pos].litlen = (int)ll; \ - opt[pos].price = (int)cost; \ + LZ4HC_match_t match = { 0 , 0 }; + const BYTE* matchPtr = NULL; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + int const matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, + ip, ip, iHighLimit, minLen, &matchPtr, &ip, + nbSearches, 1 /* patternAnalysis */); + if (matchLength <= minLen) return match; + match.len = matchLength; + match.off = (int)(ip-matchPtr); + return match; } static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, const char* const source, - char* dest, - int inputSize, - int maxOutputSize, - limitedOutput_directive limit, + char* dst, + int* srcSizePtr, + int dstCapacity, + int const nbSearches, size_t sufficient_len, - const int fullUpdate + limitedOutput_directive limit, + int const fullUpdate ) { - LZ4HC_optimal_t opt[LZ4_OPT_NUM + 1]; /* this uses a bit too much stack memory to my taste ... */ - LZ4HC_match_t matches[LZ4_OPT_NUM + 1]; +#define TRAILING_LITERALS 3 + LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* this uses a bit too much stack memory to my taste ... */ const BYTE* ip = (const BYTE*) source; const BYTE* anchor = ip; - const BYTE* const iend = ip + inputSize; + const BYTE* const iend = ip + *srcSizePtr; const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - BYTE* op = (BYTE*) dest; - BYTE* const oend = op + maxOutputSize; + const BYTE* const matchlimit = iend - LASTLITERALS; + BYTE* op = (BYTE*) dst; + BYTE* opSaved = (BYTE*) dst; + BYTE* oend = op + dstCapacity; /* init */ DEBUGLOG(5, "LZ4HC_compress_optimal"); + *srcSizePtr = 0; + if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - ctx->end += inputSize; - ip++; /* Main Loop */ + assert(ip - anchor < LZ4_MAX_INPUT_SIZE); while (ip < mflimit) { - size_t const llen = ip - anchor; - size_t last_pos = 0; - size_t match_num, cur, best_mlen, best_off; - memset(opt, 0, sizeof(LZ4HC_optimal_t)); /* memset only the first one */ + int const llen = (int)(ip - anchor); + int best_mlen, best_off; + int cur, last_match_pos = 0; - match_num = LZ4HC_BinTree_GetAllMatches(ctx, ip, matchlimit, MINMATCH-1, matches, fullUpdate); - if (!match_num) { ip++; continue; } + LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches); + if (firstMatch.len==0) { ip++; continue; } - if ((size_t)matches[match_num-1].len > sufficient_len) { + if ((size_t)firstMatch.len > sufficient_len) { /* good enough solution : immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - cur = 0; - last_pos = 1; - goto encode; + int const firstML = firstMatch.len; + const BYTE* const matchPos = ip - firstMatch.off; + opSaved = op; + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */ + goto _dest_overflow; + continue; } - /* set prices using matches at position = 0 */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t mlen = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = matches[matchNb].len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - for ( ; mlen <= best_mlen ; mlen++) { - size_t const cost = LZ4HC_sequencePrice(llen, mlen) - LZ4HC_literalsPrice(llen); - SET_PRICE(mlen, mlen, matches[matchNb].off, 0, cost); /* updates last_pos and opt[pos] */ - } } } - - if (last_pos < MINMATCH) { ip++; continue; } /* note : on clang at least, this test improves performance */ + /* set prices for first positions (literals) */ + { int rPos; + for (rPos = 0 ; rPos < MINMATCH ; rPos++) { + int const cost = LZ4HC_literalsPrice(llen + rPos); + opt[rPos].mlen = 1; + opt[rPos].off = 0; + opt[rPos].litlen = llen + rPos; + opt[rPos].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + rPos, cost, opt[rPos].litlen); + } } + /* set prices using initial match */ + { int mlen = MINMATCH; + int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ + int const offset = firstMatch.off; + assert(matchML < LZ4_OPT_NUM); + for ( ; mlen <= matchML ; mlen++) { + int const cost = LZ4HC_sequencePrice(llen, mlen); + opt[mlen].mlen = mlen; + opt[mlen].off = offset; + opt[mlen].litlen = llen; + opt[mlen].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", + mlen, cost, mlen); + } } + last_match_pos = firstMatch.len; + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } /* check further positions */ - opt[0].mlen = opt[1].mlen = 1; - for (cur = 1; cur <= last_pos; cur++) { + for (cur = 1; cur < last_match_pos; cur++) { const BYTE* const curPtr = ip + cur; - - /* establish baseline price if cur is literal */ - { size_t price, litlen; - if (opt[cur-1].mlen == 1) { - /* no match at previous position */ - litlen = opt[cur-1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + LZ4HC_literalsPrice(litlen); - } else { - price = LZ4HC_literalsPrice(llen + litlen) - LZ4HC_literalsPrice(llen); - } - } else { - litlen = 1; - price = opt[cur - 1].price + LZ4HC_literalsPrice(1); - } - - if (price < (size_t)opt[cur].price) - SET_PRICE(cur, 1 /*mlen*/, 0 /*off*/, litlen, price); /* note : increases last_pos */ + LZ4HC_match_t newMatch; + + if (curPtr >= mflimit) break; + DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", + cur, opt[cur].price, opt[cur+1].price, cur+1); + if (fullUpdate) { + /* not useful to search here if next position has same (or lower) cost */ + if ( (opt[cur+1].price <= opt[cur].price) + /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ + && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) + continue; + } else { + /* not useful to search here if next position has same (or lower) cost */ + if (opt[cur+1].price <= opt[cur].price) continue; } - if (cur == last_pos || curPtr >= mflimit) break; + DEBUGLOG(7, "search at rPos:%u", cur); + if (fullUpdate) + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches); + else + /* only test matches of minimum length; slightly faster, but misses a few bytes */ + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches); + if (!newMatch.len) continue; - match_num = LZ4HC_BinTree_GetAllMatches(ctx, curPtr, matchlimit, MINMATCH-1, matches, fullUpdate); - if ((match_num > 0) && (size_t)matches[match_num-1].len > sufficient_len) { + if ( ((size_t)newMatch.len > sufficient_len) + || (newMatch.len + cur >= LZ4_OPT_NUM) ) { /* immediate encoding */ - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - last_pos = cur + 1; + best_mlen = newMatch.len; + best_off = newMatch.off; + last_match_pos = cur + 1; goto encode; } - /* set prices using matches at position = cur */ - { size_t matchNb; - for (matchNb = 0; matchNb < match_num; matchNb++) { - size_t ml = (matchNb>0) ? (size_t)matches[matchNb-1].len+1 : MINMATCH; - best_mlen = (cur + matches[matchNb].len < LZ4_OPT_NUM) ? - (size_t)matches[matchNb].len : LZ4_OPT_NUM - cur; - - for ( ; ml <= best_mlen ; ml++) { - size_t ll, price; - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - if (cur > ll) - price = opt[cur - ll].price + LZ4HC_sequencePrice(ll, ml); - else - price = LZ4HC_sequencePrice(llen + ll, ml) - LZ4HC_literalsPrice(llen); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - if (cur + ml > last_pos || price < (size_t)opt[cur + ml].price) { - SET_PRICE(cur + ml, ml, matches[matchNb].off, ll, price); - } } } } - } /* for (cur = 1; cur <= last_pos; cur++) */ - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - -encode: /* cur, last_pos, best_mlen, best_off must be set */ - opt[0].mlen = 1; - while (1) { /* from end to beginning */ - size_t const ml = opt[cur].mlen; - int const offset = opt[cur].off; - opt[cur].mlen = (int)best_mlen; - opt[cur].off = (int)best_off; - best_mlen = ml; - best_off = offset; - if (ml > cur) break; /* can this happen ? */ - cur -= ml; - } + /* before match : set price with literals at beginning */ + { int const baseLitlen = opt[cur].litlen; + int litlen; + for (litlen = 1; litlen < MINMATCH; litlen++) { + int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); + int const pos = cur + litlen; + if (price < opt[pos].price) { + opt[pos].mlen = 1; /* literal */ + opt[pos].off = 0; + opt[pos].litlen = baseLitlen+litlen; + opt[pos].price = price; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", + pos, price, opt[pos].litlen); + } } } + + /* set prices using match at position = cur */ + { int const matchML = newMatch.len; + int ml = MINMATCH; + + assert(cur + newMatch.len < LZ4_OPT_NUM); + for ( ; ml <= matchML ; ml++) { + int const pos = cur + ml; + int const offset = newMatch.off; + int price; + int ll; + DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", + pos, last_match_pos); + if (opt[cur].mlen == 1) { + ll = opt[cur].litlen; + price = ((cur > ll) ? opt[cur - ll].price : 0) + + LZ4HC_sequencePrice(ll, ml); + } else { + ll = 0; + price = opt[cur].price + LZ4HC_sequencePrice(0, ml); + } - /* encode all recorded sequences */ - cur = 0; - while (cur < last_pos) { - int const ml = opt[cur].mlen; - int const offset = opt[cur].off; - if (ml == 1) { ip++; cur++; continue; } - cur += ml; - if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) return 0; - } + if (pos > last_match_pos+TRAILING_LITERALS || price <= opt[pos].price) { + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", + pos, price, ml); + assert(pos < LZ4_OPT_NUM); + if ( (ml == matchML) /* last pos of last match */ + && (last_match_pos < pos) ) + last_match_pos = pos; + opt[pos].mlen = ml; + opt[pos].off = offset; + opt[pos].litlen = ll; + opt[pos].price = price; + } } } + /* complete following positions with literals */ + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + } /* for (cur = 1; cur <= last_match_pos; cur++) */ + + best_mlen = opt[last_match_pos].mlen; + best_off = opt[last_match_pos].off; + cur = last_match_pos - best_mlen; + +encode: /* cur, last_match_pos, best_mlen, best_off must be set */ + assert(cur < LZ4_OPT_NUM); + assert(last_match_pos >= 1); /* == 1 when only one candidate */ + DEBUGLOG(6, "reverse traversal, looking for shortest path") + DEBUGLOG(6, "last_match_pos = %i", last_match_pos); + { int candidate_pos = cur; + int selected_matchLength = best_mlen; + int selected_offset = best_off; + while (1) { /* from end to beginning */ + int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ + int const next_offset = opt[candidate_pos].off; + DEBUGLOG(6, "pos %i: sequence length %i", candidate_pos, selected_matchLength); + opt[candidate_pos].mlen = selected_matchLength; + opt[candidate_pos].off = selected_offset; + selected_matchLength = next_matchLength; + selected_offset = next_offset; + if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ + assert(next_matchLength > 0); /* can be 1, means literal */ + candidate_pos -= next_matchLength; + } } + + /* encode all recorded sequences in order */ + { int rPos = 0; /* relative position (to ip) */ + while (rPos < last_match_pos) { + int const ml = opt[rPos].mlen; + int const offset = opt[rPos].off; + if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ + rPos += ml; + assert(ml >= MINMATCH); + assert((offset >= 1) && (offset <= MAX_DISTANCE)); + opSaved = op; + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */ + goto _dest_overflow; + } } } /* while (ip < mflimit) */ +_last_literals: /* Encode Last Literals */ - { int lastRun = (int)(iend - anchor); - if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */ - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (BYTE)(lastRun< oend)) { + if (limit == limitedOutput) return 0; /* Check output limit */ + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (size_t)(oend - op) - 1; + litLength = (lastRunSize + 255 - RUN_MASK) / 255; + lastRunSize -= litLength; + } + ip = anchor + lastRunSize; + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + memcpy(op, anchor, lastRunSize); + op += lastRunSize; } /* End */ - return (int) ((char*)op-dest); + *srcSizePtr = (int) (((const char*)ip) - source); + return (int) ((char*)op-dst); + +_dest_overflow: + if (limit == limitedDestSize) { + op = opSaved; /* restore correct out pointer */ + goto _last_literals; + } + return 0; } diff --git a/lz4/xxhash.c b/lz4/xxhash.c index a532358..bcf1f1d 100644 --- a/lz4/xxhash.c +++ b/lz4/xxhash.c @@ -115,21 +115,21 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif -#ifndef FORCE_INLINE +#ifndef XXH_FORCE_INLINE # ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline +# define XXH_FORCE_INLINE static __forceinline # else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) +# define XXH_FORCE_INLINE static inline __attribute__((always_inline)) # else -# define FORCE_INLINE static inline +# define XXH_FORCE_INLINE static inline # endif # else -# define FORCE_INLINE static +# define XXH_FORCE_INLINE static # endif /* __STDC_VERSION__ */ # endif /* _MSC_VER */ -#endif /* FORCE_INLINE */ +#endif /* XXH_FORCE_INLINE */ /* ************************************* @@ -223,7 +223,7 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; *****************************/ typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +XXH_FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); @@ -231,7 +231,7 @@ FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_a return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); } -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); } @@ -266,7 +266,7 @@ static U32 XXH32_round(U32 seed, U32 input) return seed; } -FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +XXH_FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* bEnd = p + len; @@ -381,7 +381,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s } -FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +XXH_FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; @@ -451,7 +451,7 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* -FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +XXH_FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem32; const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; @@ -583,7 +583,7 @@ static U64 XXH_swap64 (U64 x) } #endif -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +XXH_FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align==XXH_unaligned) return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); @@ -591,7 +591,7 @@ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_a return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); } -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) { return XXH_readLE64_align(ptr, endian, XXH_unaligned); } @@ -626,7 +626,7 @@ static U64 XXH64_mergeRound(U64 acc, U64 val) return acc; } -FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +XXH_FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; @@ -750,7 +750,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long return XXH_OK; } -FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +XXH_FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) { const BYTE* p = (const BYTE*)input; const BYTE* const bEnd = p + len; @@ -815,7 +815,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* return XXH64_update_endian(state_in, input, len, XXH_bigEndian); } -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +XXH_FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) { const BYTE * p = (const BYTE*)state->mem64; const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; diff --git a/lz4framed/__init__.py b/lz4framed/__init__.py index c3c4690..1913a82 100644 --- a/lz4framed/__init__.py +++ b/lz4framed/__init__.py @@ -85,8 +85,8 @@ def __init__(self, fp=None, block_size_id=LZ4F_BLOCKSIZE_DEFAULT, block_mode_lin checksum (bool): Whether to produce frame checksum autoflush (bool): Whether to return (or write to fp) compressed data on each update() call rather than waiting for internal buffer to be filled. (This reduces internal buffer size.) - level (int): Compression level. Values lower than 3 use fast compression. Recommended - range for hc compression is between 4 and 9, with a maximum of LZ4_COMPRESSION_MAX. + level (int): Compression level. Values lower than 3 (including negative ones) use fast compression. + Recommended range for hc compression is between 4 and 9, with a maximum of LZ4_COMPRESSION_MAX. block_checksum (bool): Whether to produce checksum after each block """ self.__ctx = create_compression_context() diff --git a/lz4framed/py-lz4framed.c b/lz4framed/py-lz4framed.c index 7db946e..de597d3 100644 --- a/lz4framed/py-lz4framed.c +++ b/lz4framed/py-lz4framed.c @@ -31,6 +31,7 @@ #define MAX(x, y) (x) >= (y) ? (x) : (y) #define KB *(1<<10) #define MB *(1<<20) +// Due to negative levels now being supported, this no longer is particularly meaningful. #define LZ4_COMPRESSION_MIN 0 #define LZ4_COMPRESSION_MIN_HC LZ4HC_CLEVEL_MIN #define LZ4_COMPRESSION_MAX LZ4HC_CLEVEL_MAX @@ -210,9 +211,9 @@ PyDoc_STRVAR(_lz4framed_compress__doc__, " block_mode_linked (bool): Whether compression blocks are linked. Better compression\n" " is achieved in linked mode.\n" " checksum (bool): Whether to produce frame checksum\n" -" level (int): Compression level. Values lower than LZ4F_COMPRESSION_MIN_HC use fast\n" -" compression. Recommended range for hc compression is between 4 and 9,\n" -" with a maximum of LZ4F_COMPRESSION_MAX.\n" +" level (int): Compression level. Values lower than LZ4F_COMPRESSION_MIN_HC (including\n" +" negative ones) use fast compression. Recommended range for hc compression\n" +" is between 4 and 9, with a maximum of LZ4F_COMPRESSION_MAX.\n" " block_checksum (bool): Whether to produce checksum after each block.\n" "\n" "Raises:\n" @@ -571,9 +572,9 @@ PyDoc_STRVAR(_lz4framed_compress_begin__doc__, " checksum (bool): Whether to produce frame checksum\n" " autoflush (bool): Whether to flush output on update() calls rather than buffering\n" " incomplete blocks internally.\n" -" level (int): Compression level. Values lower than LZ4F_COMPRESSION_MIN_HC use fast\n" -" compression. Recommended range for hc compression is between 4 and 9,\n" -" with a maximum of LZ4F_COMPRESSION_MAX.\n" +" level (int): Compression level. Values lower than LZ4F_COMPRESSION_MIN_HC (including\n" +" negative ones) use fast compression. Recommended range for hc compression\n" +" is between 4 and 9, with a maximum of LZ4F_COMPRESSION_MAX.\n" " block_checksum (bool): Whether to produce checksum after each block.\n" "\n" "Raises:\n" diff --git a/setup.py b/setup.py index e26962a..9725eb3 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ READ_MD = lambda f: convert(f, 'rst') # noqa: E731 -VERSION = '0.10.0' +VERSION = '0.11.0' setup( name='py-lz4framed',