1 /* miniz.c v1.14 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
2 See "unlicense" statement at the end of this file.
3 Rich Geldreich <richgel99@gmail.com>, last updated May 20, 2012
4 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
6 Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define
7 MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).
10 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect).
11 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
12 Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files.
13 Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
14 Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly
15 "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning).
16 Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64.
17 Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test.
18 Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives.
19 Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.)
20 Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself).
21 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's.
22 level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report.
23 5/28/11 v1.11 - Added statement from unlicense.org
24 5/27/11 v1.10 - Substantial compressor optimizations:
25 Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a
26 Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86).
27 Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types.
28 Refactored the compression code for better readability and maintainability.
29 Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large
30 drop in throughput on some files).
31 5/15/11 v1.09 - Initial stable release.
33 * Low-level Deflate/Inflate implementation notes:
35 Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or
36 greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses
37 approximately as well as zlib.
39 Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function
40 coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory
41 block large enough to hold the entire file.
43 The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation.
45 * Important: For best perf. be sure to customize the below macros for your target platform:
46 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
47 #define MINIZ_LITTLE_ENDIAN 1
48 #define MINIZ_HAS_64BIT_REGISTERS 1
51 #ifndef MINIZ_HEADER_INCLUDED
52 #define MINIZ_HEADER_INCLUDED
56 // Defines to completely disable specific portions of miniz.c:
57 // If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl.
59 // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
60 // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
61 // callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
62 // functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work.
63 //#define MINIZ_NO_MALLOC
65 #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
66 // MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
67 #define MINIZ_X86_OR_X64_CPU 1
70 #if (__BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
71 // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
72 #define MINIZ_LITTLE_ENDIAN 1
75 #if MINIZ_X86_OR_X64_CPU
76 // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses.
77 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
80 #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__)
81 // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions).
82 #define MINIZ_HAS_64BIT_REGISTERS 1
89 // ------------------- zlib-style API Definitions.
91 // For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
92 typedef unsigned long mz_ulong;
94 // mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap.
95 void mz_free(void *p);
97 #define MZ_ADLER32_INIT (1)
98 // mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL.
99 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
101 #define MZ_CRC32_INIT (0)
102 // mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL.
103 mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
105 // Compression strategies.
106 enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 };
109 #define MZ_DEFLATED 8
111 // ------------------- Types and macros
113 typedef unsigned char mz_uint8;
114 typedef signed short mz_int16;
115 typedef unsigned short mz_uint16;
116 typedef unsigned int mz_uint32;
117 typedef unsigned int mz_uint;
118 typedef long long mz_int64;
119 typedef unsigned long long mz_uint64;
125 // Works around MSVC's spammy "warning C4127: conditional expression is constant" message.
127 #define MZ_MACRO_END while (0, 0)
129 #define MZ_MACRO_END while (0)
132 // ------------------- Low-level Decompression API Definitions
134 // Decompression flags used by tinfl_decompress().
135 // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream.
136 // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input.
137 // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB).
138 // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes.
141 TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
142 TINFL_FLAG_HAS_MORE_INPUT = 2,
143 TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
144 TINFL_FLAG_COMPUTE_ADLER32 = 8
147 // High level decompression functions:
148 // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc().
150 // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress.
152 // Function returns a pointer to the decompressed data, or NULL on failure.
153 // *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data.
154 // The caller must call mz_free() on the returned block when it's no longer needed.
155 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
157 // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory.
158 // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success.
159 #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
160 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
162 // tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer.
163 // Returns 1 on success or 0 on failure.
164 typedef int (*tinfl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser);
165 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
167 struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor;
169 // Max size of LZ dictionary.
170 #define TINFL_LZ_DICT_SIZE 32768
175 TINFL_STATUS_BAD_PARAM = -3,
176 TINFL_STATUS_ADLER32_MISMATCH = -2,
177 TINFL_STATUS_FAILED = -1,
178 TINFL_STATUS_DONE = 0,
179 TINFL_STATUS_NEEDS_MORE_INPUT = 1,
180 TINFL_STATUS_HAS_MORE_OUTPUT = 2
183 // Initializes the decompressor to its initial state.
184 #define tinfl_init(r) do { (r)->m_state = 0; } MZ_MACRO_END
185 #define tinfl_get_adler32(r) (r)->m_check_adler32
187 // Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability.
188 // This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output.
189 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);
191 // Internal/private bits follow.
194 TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19,
195 TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
200 mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
201 mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
204 #if MINIZ_HAS_64BIT_REGISTERS
205 #define TINFL_USE_64BIT_BITBUF 1
208 #if TINFL_USE_64BIT_BITBUF
209 typedef mz_uint64 tinfl_bit_buf_t;
210 #define TINFL_BITBUF_SIZE (64)
212 typedef mz_uint32 tinfl_bit_buf_t;
213 #define TINFL_BITBUF_SIZE (32)
216 struct tinfl_decompressor_tag
218 mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
219 tinfl_bit_buf_t m_bit_buf;
220 size_t m_dist_from_out_buf_start;
221 tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
222 mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
225 // ------------------- Low-level Compression API Definitions
227 // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently).
228 #define TDEFL_LESS_MEMORY 0
230 // tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search):
231 // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression).
234 TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF
237 // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data.
238 // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers).
239 // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing.
240 // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory).
241 // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
242 // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
243 // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
244 // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
247 TDEFL_WRITE_ZLIB_HEADER = 0x01000,
248 TDEFL_COMPUTE_ADLER32 = 0x02000,
249 TDEFL_GREEDY_PARSING_FLAG = 0x04000,
250 TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
251 TDEFL_RLE_MATCHES = 0x10000,
252 TDEFL_FILTER_MATCHES = 0x20000,
253 TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
254 TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
257 // High level compression functions:
258 // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc().
260 // pSrc_buf, src_buf_len: Pointer and size of source block to compress.
261 // flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression.
263 // Function returns a pointer to the compressed data, or NULL on failure.
264 // *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data.
265 // The caller must free() the returned block when it's no longer needed.
266 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
268 // tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory.
269 // Returns 0 on failure.
270 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
272 // Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
273 typedef mz_bool (*tdefl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser);
275 // tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally.
276 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
278 enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 };
280 // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes).
281 #if TDEFL_LESS_MEMORY
282 enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
284 enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
287 // The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions.
290 TDEFL_STATUS_BAD_PARAM = -2,
291 TDEFL_STATUS_PUT_BUF_FAILED = -1,
292 TDEFL_STATUS_OKAY = 0,
293 TDEFL_STATUS_DONE = 1,
296 // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
300 TDEFL_SYNC_FLUSH = 2,
301 TDEFL_FULL_FLUSH = 3,
305 // tdefl's compression state structure.
308 tdefl_put_buf_func_ptr m_pPut_buf_func;
309 void *m_pPut_buf_user;
310 mz_uint m_flags, m_max_probes[2];
311 int m_greedy_parsing;
312 mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
313 mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
314 mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
315 mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
316 tdefl_status m_prev_return_status;
317 const void *m_pIn_buf;
319 size_t *m_pIn_buf_size, *m_pOut_buf_size;
321 const mz_uint8 *m_pSrc;
322 size_t m_src_buf_left, m_out_buf_ofs;
323 mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
324 mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
325 mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
326 mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
327 mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
328 mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
329 mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
330 mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
333 // Initializes the compressor.
334 // There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory.
335 // pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression.
336 // If pBut_buf_func is NULL the user should always call the tdefl_compress() API.
337 // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.)
338 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
340 // Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible.
341 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);
343 // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr.
344 // tdefl_compress_buffer() always consumes the entire input buffer.
345 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);
347 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
348 mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
354 #endif // MINIZ_HEADER_INCLUDED
356 // ------------------- End of Header: Implementation follows. (If you only want the header, define MINIZ_HEADER_FILE_ONLY.)
358 #ifndef MINIZ_HEADER_FILE_ONLY
360 typedef unsigned char mz_validate_uint16[sizeof(mz_uint16)==2 ? 1 : -1];
361 typedef unsigned char mz_validate_uint32[sizeof(mz_uint32)==4 ? 1 : -1];
362 typedef unsigned char mz_validate_uint64[sizeof(mz_uint64)==8 ? 1 : -1];
367 #define MZ_ASSERT(x) assert(x)
369 #ifdef MINIZ_NO_MALLOC
370 #define MZ_MALLOC(x) NULL
371 #define MZ_FREE(x) (void)x, ((void)0)
372 #define MZ_REALLOC(p, x) NULL
374 #define MZ_MALLOC(x) malloc(x)
375 #define MZ_FREE(x) free(x)
376 #define MZ_REALLOC(p, x) realloc(p, x)
379 #define MZ_MAX(a,b) (((a)>(b))?(a):(b))
380 #define MZ_MIN(a,b) (((a)<(b))?(a):(b))
381 #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
383 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
384 #define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
385 #define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
387 #define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
388 #define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
392 #define MZ_FORCEINLINE __forceinline
393 #elif defined(__GNUC__)
394 #define MZ_FORCEINLINE inline __attribute__((__always_inline__))
396 #define MZ_FORCEINLINE
403 // ------------------- zlib-style API's
405 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
407 mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552;
408 if (!ptr) return MZ_ADLER32_INIT;
410 for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
411 s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
412 s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
414 for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1;
415 s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
417 return (s2 << 16) + s1;
420 // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/
421 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
423 static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
424 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
425 mz_uint32 crcu32 = (mz_uint32)crc;
426 if (!ptr) return MZ_CRC32_INIT;
427 crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; }
431 void mz_free(void *p)
436 // ------------------- Low-level Decompression (completely independent from all compression API's)
438 #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
439 #define TINFL_MEMSET(p, c, l) memset(p, c, l)
441 #define TINFL_CR_BEGIN switch(r->m_state) { case 0:
442 #define TINFL_CR_RETURN(state_index, result) do { status = result; r->m_state = state_index; goto common_exit; case state_index:; } MZ_MACRO_END
443 #define TINFL_CR_RETURN_FOREVER(state_index, result) do { for ( ; ; ) { TINFL_CR_RETURN(state_index, result); } } MZ_MACRO_END
444 #define TINFL_CR_FINISH }
446 // TODO: If the caller has indicated that there's no more input, and we attempt to read beyond the input buf, then something is wrong with the input because the inflator never
447 // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario.
448 #define TINFL_GET_BYTE(state_index, c) do { \
449 if (pIn_buf_cur >= pIn_buf_end) { \
451 if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
452 TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
453 if (pIn_buf_cur < pIn_buf_end) { \
454 c = *pIn_buf_cur++; \
462 } else c = *pIn_buf_cur++; } MZ_MACRO_END
464 #define TINFL_NEED_BITS(state_index, n) do { mz_uint c; TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; } while (num_bits < (mz_uint)(n))
465 #define TINFL_SKIP_BITS(state_index, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
466 #define TINFL_GET_BITS(state_index, b, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } b = bit_buf & ((1 << (n)) - 1); bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
468 // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2.
469 // It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a
470 // Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the
471 // bit buffer contains >=15 bits (deflate's max. Huffman code size).
472 #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
474 temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
476 code_len = temp >> 9; \
477 if ((code_len) && (num_bits >= code_len)) \
479 } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
480 code_len = TINFL_FAST_LOOKUP_BITS; \
482 temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
483 } while ((temp < 0) && (num_bits >= (code_len + 1))); if (temp >= 0) break; \
484 } TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; \
485 } while (num_bits < 15);
487 // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read
488 // beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully
489 // decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32.
490 // The slow path is only executed at the very end of the input buffer.
491 #define TINFL_HUFF_DECODE(state_index, sym, pHuff) do { \
492 int temp; mz_uint code_len, c; \
493 if (num_bits < 15) { \
494 if ((pIn_buf_end - pIn_buf_cur) < 2) { \
495 TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
497 bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); pIn_buf_cur += 2; num_bits += 16; \
500 if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
501 code_len = temp >> 9, temp &= 511; \
503 code_len = TINFL_FAST_LOOKUP_BITS; do { temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; } while (temp < 0); \
504 } sym = temp; bit_buf >>= code_len; num_bits -= code_len; } MZ_MACRO_END
506 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
508 static const int s_length_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 };
509 static const int s_length_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
510 static const int s_dist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
511 static const int s_dist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
512 static const mz_uint8 s_length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
513 static const int s_min_table_sizes[3] = { 257, 1, 4 };
515 tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf;
516 const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
517 mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
518 size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
520 // Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter).
521 if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; }
523 num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start;
526 bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1;
527 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
529 TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1);
530 counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
531 if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
532 if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); }
537 TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1;
540 TINFL_SKIP_BITS(5, num_bits & 7);
541 for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); }
542 if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); }
543 while ((counter) && (num_bits))
545 TINFL_GET_BITS(51, dist, 8);
546 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); }
547 *pOut_buf_cur++ = (mz_uint8)dist;
552 size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); }
553 while (pIn_buf_cur >= pIn_buf_end)
555 if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT)
557 TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
561 TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
564 n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
565 TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n;
568 else if (r->m_type == 3)
570 TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
576 mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i;
577 r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
578 for ( i = 0; i <= 143; ++i) *p++ = 8;
579 for ( ; i <= 255; ++i) *p++ = 9;
580 for ( ; i <= 279; ++i) *p++ = 7;
581 for ( ; i <= 287; ++i) *p++ = 8;
585 for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; }
586 MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; }
587 r->m_table_sizes[2] = 19;
589 for ( ; (int)r->m_type >= 0; r->m_type--)
591 int tree_next, tree_cur; tinfl_huff_table *pTable;
592 mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree);
593 for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++;
594 used_syms = 0, total = 0; next_code[0] = next_code[1] = 0;
595 for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); }
596 if ((65536 != total) && (used_syms > 1))
598 TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
600 for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
602 mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue;
603 cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1);
604 if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; }
605 if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; }
606 rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
607 for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
609 tree_cur -= ((rev_code >>= 1) & 1);
610 if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1];
612 tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
616 for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]); )
618 mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; }
619 if ((dist == 16) && (!counter))
621 TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
623 num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16];
624 TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s;
626 if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
628 TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
630 TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
638 if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
640 TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
643 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); }
644 *pOut_buf_cur++ = (mz_uint8)counter;
648 int sym2; mz_uint code_len;
649 #if TINFL_USE_64BIT_BITBUF
650 if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; }
652 if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
654 if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
655 code_len = sym2 >> 9;
658 code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
660 counter = sym2; bit_buf >>= code_len; num_bits -= code_len;
664 #if !TINFL_USE_64BIT_BITBUF
665 if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
667 if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
668 code_len = sym2 >> 9;
671 code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
673 bit_buf >>= code_len; num_bits -= code_len;
675 pOut_buf_cur[0] = (mz_uint8)counter;
682 pOut_buf_cur[1] = (mz_uint8)sym2;
686 if ((counter &= 511) == 256) break;
688 num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257];
689 if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; }
691 TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
692 num_extra = s_dist_extra[dist]; dist = s_dist_base[dist];
693 if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; }
695 dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
696 if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
698 TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
701 pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
703 if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
707 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); }
708 *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
712 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
713 else if ((counter >= 9) && (counter <= dist))
715 const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
718 ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
719 ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
721 } while ((pSrc += 8) < pSrc_end);
722 if ((counter &= 7) < 3)
726 pOut_buf_cur[0] = pSrc[0];
728 pOut_buf_cur[1] = pSrc[1];
729 pOut_buf_cur += counter;
737 pOut_buf_cur[0] = pSrc[0];
738 pOut_buf_cur[1] = pSrc[1];
739 pOut_buf_cur[2] = pSrc[2];
740 pOut_buf_cur += 3; pSrc += 3;
741 } while ((int)(counter -= 3) > 2);
742 if ((int)counter > 0)
744 pOut_buf_cur[0] = pSrc[0];
745 if ((int)counter > 1)
746 pOut_buf_cur[1] = pSrc[1];
747 pOut_buf_cur += counter;
751 } while (!(r->m_final & 1));
752 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
754 TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; }
756 TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
760 r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start;
761 *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
762 if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
764 const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size;
765 mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552;
768 for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
770 s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
771 s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
773 for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1;
774 s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
776 r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH;
781 // Higher level helper functions.
782 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
784 tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0;
789 size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
790 tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8*)pBuf, pBuf ? (mz_uint8*)pBuf + *pOut_len : NULL, &dst_buf_size,
791 (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
792 if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
794 MZ_FREE(pBuf); *pOut_len = 0; return NULL;
796 src_buf_ofs += src_buf_size;
797 *pOut_len += dst_buf_size;
798 if (status == TINFL_STATUS_DONE) break;
799 new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
800 pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
803 MZ_FREE(pBuf); *pOut_len = 0; return NULL;
805 pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity;
810 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
812 tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp);
813 status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf, &src_buf_len, (mz_uint8*)pOut_buf, (mz_uint8*)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
814 return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
817 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
820 tinfl_decompressor decomp;
821 mz_uint8 *pDict = (mz_uint8*)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0;
823 return TINFL_STATUS_FAILED;
827 size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
828 tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
829 (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
830 in_buf_ofs += in_buf_size;
831 if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
833 if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
835 result = (status == TINFL_STATUS_DONE);
838 dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
841 *pIn_buf_size = in_buf_ofs;
845 // ------------------- Low-level Compression (independent from all decompression API's)
847 // Purposely making these tables static for faster init and thread safety.
848 static const mz_uint16 s_tdefl_len_sym[256] = {
849 257,258,259,260,261,262,263,264,265,265,266,266,267,267,268,268,269,269,269,269,270,270,270,270,271,271,271,271,272,272,272,272,
850 273,273,273,273,273,273,273,273,274,274,274,274,274,274,274,274,275,275,275,275,275,275,275,275,276,276,276,276,276,276,276,276,
851 277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,
852 279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,
853 281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,
854 282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,
855 283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,
856 284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,285 };
858 static const mz_uint8 s_tdefl_len_extra[256] = {
859 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
860 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
861 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
862 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,0 };
864 static const mz_uint8 s_tdefl_small_dist_sym[512] = {
865 0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,
866 11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,
867 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,
868 14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,
869 14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
870 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,
871 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
872 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
873 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
874 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
875 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
876 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17 };
878 static const mz_uint8 s_tdefl_small_dist_extra[512] = {
879 0,0,0,0,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,
880 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
881 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
882 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
883 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
884 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
885 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
888 static const mz_uint8 s_tdefl_large_dist_sym[128] = {
889 0,0,18,19,20,20,21,21,22,22,22,22,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,
890 26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,
891 28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 };
893 static const mz_uint8 s_tdefl_large_dist_extra[128] = {
894 0,0,8,8,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,
895 12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
896 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13 };
898 // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values.
899 typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
900 static tdefl_sym_freq* tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq* pSyms0, tdefl_sym_freq* pSyms1)
902 mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist);
903 for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; }
904 while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
905 for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
907 const mz_uint32* pHist = &hist[pass << 8];
908 mz_uint offsets[256], cur_ofs = 0;
909 for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
910 for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
911 { tdefl_sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; }
916 // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
917 static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
919 int root, leaf, next, avbl, used, dpth;
920 if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
921 A[0].m_key += A[1].m_key; root = 0; leaf = 2;
922 for (next=1; next < n-1; next++)
924 if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key;
925 if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
927 A[n-2].m_key = 0; for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
928 avbl = 1; used = dpth = 0; root = n-2; next = n-1;
931 while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
932 while (avbl>used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; }
933 avbl = 2*used; dpth++; used = 0;
937 // Limits canonical Huffman code table's max code size.
938 enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
939 static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
941 int i; mz_uint32 total = 0; if (code_list_len <= 1) return;
942 for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
943 for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
944 while (total != (1UL << max_code_size))
946 pNum_codes[max_code_size]--;
947 for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
952 static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
954 int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes);
957 for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++;
961 tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
962 int num_used_syms = 0;
963 const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
964 for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; }
966 pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
968 for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
970 tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
972 MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
973 for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
974 for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
977 next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1);
979 for (i = 0; i < table_len; i++)
981 mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
982 code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1);
983 d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
987 #define TDEFL_PUT_BITS(b, l) do { \
988 mz_uint bits = b; mz_uint len = l; MZ_ASSERT(bits <= ((1U << len) - 1U)); \
989 d->m_bit_buffer |= (bits << d->m_bits_in); d->m_bits_in += len; \
990 while (d->m_bits_in >= 8) { \
991 if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
992 *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
993 d->m_bit_buffer >>= 8; \
998 #define TDEFL_RLE_PREV_CODE_SIZE() { if (rle_repeat_count) { \
999 if (rle_repeat_count < 3) { \
1000 d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
1001 while (rle_repeat_count--) packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
1003 d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); packed_code_sizes[num_packed_code_sizes++] = 16; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \
1004 } rle_repeat_count = 0; } }
1006 #define TDEFL_RLE_ZERO_CODE_SIZE() { if (rle_z_count) { \
1007 if (rle_z_count < 3) { \
1008 d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
1009 } else if (rle_z_count <= 10) { \
1010 d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); packed_code_sizes[num_packed_code_sizes++] = 17; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \
1012 d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); packed_code_sizes[num_packed_code_sizes++] = 18; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
1013 } rle_z_count = 0; } }
1015 static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
1017 static void tdefl_start_dynamic_block(tdefl_compressor *d)
1019 int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
1020 mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;
1022 d->m_huff_count[0][256] = 1;
1024 tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
1025 tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
1027 for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
1028 for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
1030 memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
1031 memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
1032 total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0;
1034 memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
1035 for (i = 0; i < total_code_sizes_to_pack; i++)
1037 mz_uint8 code_size = code_sizes_to_pack[i];
1040 TDEFL_RLE_PREV_CODE_SIZE();
1041 if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); }
1045 TDEFL_RLE_ZERO_CODE_SIZE();
1046 if (code_size != prev_code_size)
1048 TDEFL_RLE_PREV_CODE_SIZE();
1049 d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size;
1051 else if (++rle_repeat_count == 6)
1053 TDEFL_RLE_PREV_CODE_SIZE();
1056 prev_code_size = code_size;
1058 if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); }
1060 tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
1062 TDEFL_PUT_BITS(2, 2);
1064 TDEFL_PUT_BITS(num_lit_codes - 257, 5);
1065 TDEFL_PUT_BITS(num_dist_codes - 1, 5);
1067 for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break;
1068 num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
1069 for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
1071 for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes; )
1073 mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
1074 TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
1075 if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
1079 static void tdefl_start_static_block(tdefl_compressor *d)
1082 mz_uint8 *p = &d->m_huff_code_sizes[0][0];
1084 for (i = 0; i <= 143; ++i) *p++ = 8;
1085 for ( ; i <= 255; ++i) *p++ = 9;
1086 for ( ; i <= 279; ++i) *p++ = 7;
1087 for ( ; i <= 287; ++i) *p++ = 8;
1089 memset(d->m_huff_code_sizes[1], 5, 32);
1091 tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
1092 tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
1094 TDEFL_PUT_BITS(1, 2);
1097 static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
1099 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1100 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1103 mz_uint8 *pLZ_codes;
1104 mz_uint8 *pOutput_buf = d->m_pOutput_buf;
1105 mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
1106 mz_uint64 bit_buffer = d->m_bit_buffer;
1107 mz_uint bits_in = d->m_bits_in;
1109 #define TDEFL_PUT_BITS_FAST(b, l) { bit_buffer |= (((mz_uint64)(b)) << bits_in); bits_in += (l); }
1112 for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1)
1115 flags = *pLZ_codes++ | 0x100;
1119 mz_uint s0, s1, n0, n1, sym, num_extra_bits;
1120 mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3;
1122 MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1123 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1124 TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1126 // This sequence coaxes MSVC into using cmov's vs. jmp's.
1127 s0 = s_tdefl_small_dist_sym[match_dist & 511];
1128 n0 = s_tdefl_small_dist_extra[match_dist & 511];
1129 s1 = s_tdefl_large_dist_sym[match_dist >> 8];
1130 n1 = s_tdefl_large_dist_extra[match_dist >> 8];
1131 sym = (match_dist < 512) ? s0 : s1;
1132 num_extra_bits = (match_dist < 512) ? n0 : n1;
1134 MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1135 TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1136 TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1140 mz_uint lit = *pLZ_codes++;
1141 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1142 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1144 if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
1148 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1149 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1151 if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
1155 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1156 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1161 if (pOutput_buf >= d->m_pOutput_buf_end)
1164 *(mz_uint64*)pOutput_buf = bit_buffer;
1165 pOutput_buf += (bits_in >> 3);
1166 bit_buffer >>= (bits_in & ~7);
1170 #undef TDEFL_PUT_BITS_FAST
1172 d->m_pOutput_buf = pOutput_buf;
1174 d->m_bit_buffer = 0;
1178 mz_uint32 n = MZ_MIN(bits_in, 16);
1179 TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
1184 TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1186 return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1189 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1192 mz_uint8 *pLZ_codes;
1195 for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1)
1198 flags = *pLZ_codes++ | 0x100;
1201 mz_uint sym, num_extra_bits;
1202 mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3;
1204 MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1205 TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1206 TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1208 if (match_dist < 512)
1210 sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist];
1214 sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
1216 MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1217 TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1218 TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1222 mz_uint lit = *pLZ_codes++;
1223 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1224 TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1228 TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1230 return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1232 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1234 static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
1237 tdefl_start_static_block(d);
1239 tdefl_start_dynamic_block(d);
1240 return tdefl_compress_lz_codes(d);
1243 static int tdefl_flush_block(tdefl_compressor *d, int flush)
1245 mz_uint saved_bit_buf, saved_bits_in;
1246 mz_uint8 *pSaved_output_buf;
1247 mz_bool comp_block_succeeded = MZ_FALSE;
1248 int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
1249 mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;
1251 d->m_pOutput_buf = pOutput_buf_start;
1252 d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
1254 MZ_ASSERT(!d->m_output_flush_remaining);
1255 d->m_output_flush_ofs = 0;
1256 d->m_output_flush_remaining = 0;
1258 *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
1259 d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
1261 if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
1263 TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8);
1266 TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
1268 pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in;
1271 comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));
1273 // If the block gets expanded, forget the current contents of the output buffer and send a raw block instead.
1274 if ( ((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
1275 ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size) )
1277 mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1278 TDEFL_PUT_BITS(0, 2);
1279 if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1280 for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF)
1282 TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
1284 for (i = 0; i < d->m_total_lz_bytes; ++i)
1286 TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
1289 // Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes.
1290 else if (!comp_block_succeeded)
1292 d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1293 tdefl_compress_block(d, MZ_TRUE);
1298 if (flush == TDEFL_FINISH)
1300 if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1301 if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } }
1305 mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); }
1309 MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
1311 memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
1312 memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
1314 d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++;
1316 if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0)
1318 if (d->m_pPut_buf_func)
1320 *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
1321 if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
1322 return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
1324 else if (pOutput_buf_start == d->m_output_buf)
1326 int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
1327 memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
1328 d->m_out_buf_ofs += bytes_to_copy;
1329 if ((n -= bytes_to_copy) != 0)
1331 d->m_output_flush_ofs = bytes_to_copy;
1332 d->m_output_flush_remaining = n;
1337 d->m_out_buf_ofs += n;
1341 return d->m_output_flush_remaining;
1344 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1345 #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16*)(p)
1346 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1348 mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1349 mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1350 const mz_uint16 *s = (const mz_uint16*)(d->m_dict + pos), *p, *q;
1351 mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s);
1352 MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1357 if (--num_probes_left == 0) return;
1358 #define TDEFL_PROBE \
1359 next_probe_pos = d->m_next[probe_pos]; \
1360 if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1361 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1362 if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) break;
1363 TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1365 if (!dist) break; q = (const mz_uint16*)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32;
1366 do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1367 (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) );
1370 *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break;
1372 else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8*)p == *(const mz_uint8*)q)) > match_len)
1374 *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break;
1375 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
1380 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1382 mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1383 mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1384 const mz_uint8 *s = d->m_dict + pos, *p, *q;
1385 mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
1386 MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1391 if (--num_probes_left == 0) return;
1392 #define TDEFL_PROBE \
1393 next_probe_pos = d->m_next[probe_pos]; \
1394 if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1395 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1396 if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) break;
1397 TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1400 p = s; q = d->m_dict + probe_pos;
1401 for (probe_len = 0; probe_len < max_match_len; probe_len++)
1402 if (*p++ != *q++) break;
1403 if (probe_len > match_len)
1405 *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
1406 c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1];
1410 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1412 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1413 static mz_bool tdefl_compress_fast(tdefl_compressor *d)
1415 // Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio.
1416 mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
1417 mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
1418 mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
1420 while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size)))
1422 const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
1423 mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
1424 mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
1425 d->m_src_buf_left -= num_bytes_to_process;
1426 lookahead_size += num_bytes_to_process;
1428 while (num_bytes_to_process)
1430 mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
1431 memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
1432 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
1433 memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
1435 dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
1436 num_bytes_to_process -= n;
1439 dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
1440 if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break;
1442 while (lookahead_size >= 4)
1444 mz_uint cur_match_dist, cur_match_len = 1;
1445 mz_uint8 *pCur_dict = d->m_dict + cur_pos;
1446 mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
1447 mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
1448 mz_uint probe_pos = d->m_hash[hash];
1449 d->m_hash[hash] = (mz_uint16)lookahead_pos;
1451 if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram))
1453 const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
1454 const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
1455 mz_uint32 probe_len = 32;
1456 do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1457 (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) );
1458 cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
1460 cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
1462 if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)))
1465 *pLZ_code_buf++ = (mz_uint8)first_trigram;
1466 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1467 d->m_huff_count[0][(mz_uint8)first_trigram]++;
1472 cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
1474 MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
1478 pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
1479 *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
1481 *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
1483 s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
1484 s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
1485 d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
1487 d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
1492 *pLZ_code_buf++ = (mz_uint8)first_trigram;
1493 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1494 d->m_huff_count[0][(mz_uint8)first_trigram]++;
1497 if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
1499 total_lz_bytes += cur_match_len;
1500 lookahead_pos += cur_match_len;
1501 dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
1502 cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
1503 MZ_ASSERT(lookahead_size >= cur_match_len);
1504 lookahead_size -= cur_match_len;
1506 if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
1509 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1510 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1511 if ((n = tdefl_flush_block(d, 0)) != 0)
1512 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1513 total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
1517 while (lookahead_size)
1519 mz_uint8 lit = d->m_dict[cur_pos];
1522 *pLZ_code_buf++ = lit;
1523 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1524 if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
1526 d->m_huff_count[0][lit]++;
1529 dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
1530 cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
1533 if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
1536 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1537 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1538 if ((n = tdefl_flush_block(d, 0)) != 0)
1539 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1540 total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
1545 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1546 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1549 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1551 static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
1553 d->m_total_lz_bytes++;
1554 *d->m_pLZ_code_buf++ = lit;
1555 *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
1556 d->m_huff_count[0][lit]++;
1559 static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
1563 MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));
1565 d->m_total_lz_bytes += match_len;
1567 d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
1570 d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
1571 d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3;
1573 *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
1575 s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
1576 d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
1578 if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
1581 static mz_bool tdefl_compress_normal(tdefl_compressor *d)
1583 const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left;
1584 tdefl_flush flush = d->m_flush;
1586 while ((src_buf_left) || ((flush) && (d->m_lookahead_size)))
1588 mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
1589 // Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN.
1590 if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1))
1592 mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
1593 mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
1594 mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
1595 const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
1596 src_buf_left -= num_bytes_to_process;
1597 d->m_lookahead_size += num_bytes_to_process;
1598 while (pSrc != pSrc_end)
1600 mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
1601 hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
1602 d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
1603 dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++;
1608 while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
1610 mz_uint8 c = *pSrc++;
1611 mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
1613 d->m_dict[dst_pos] = c;
1614 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
1615 d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
1616 if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN)
1618 mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
1619 mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
1620 d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
1624 d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
1625 if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
1628 // Simple lazy/greedy parsing state machine.
1629 len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
1630 if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS))
1632 if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))
1634 mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
1635 cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; }
1636 if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1;
1641 tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
1643 if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5)))
1645 cur_match_dist = cur_match_len = 0;
1647 if (d->m_saved_match_len)
1649 if (cur_match_len > d->m_saved_match_len)
1651 tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
1652 if (cur_match_len >= 128)
1654 tdefl_record_match(d, cur_match_len, cur_match_dist);
1655 d->m_saved_match_len = 0; len_to_move = cur_match_len;
1659 d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
1664 tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
1665 len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0;
1668 else if (!cur_match_dist)
1669 tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
1670 else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128))
1672 tdefl_record_match(d, cur_match_len, cur_match_dist);
1673 len_to_move = cur_match_len;
1677 d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
1679 // Move the lookahead forward by len_to_move bytes.
1680 d->m_lookahead_pos += len_to_move;
1681 MZ_ASSERT(d->m_lookahead_size >= len_to_move);
1682 d->m_lookahead_size -= len_to_move;
1683 d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE);
1684 // Check if it's time to flush the current LZ codes to the internal output buffer.
1685 if ( (d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
1686 ( (d->m_total_lz_bytes > 31*1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) )
1689 d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
1690 if ((n = tdefl_flush_block(d, 0)) != 0)
1691 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1695 d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
1699 static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
1701 if (d->m_pIn_buf_size)
1703 *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
1706 if (d->m_pOut_buf_size)
1708 size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
1709 memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
1710 d->m_output_flush_ofs += (mz_uint)n;
1711 d->m_output_flush_remaining -= (mz_uint)n;
1712 d->m_out_buf_ofs += n;
1714 *d->m_pOut_buf_size = d->m_out_buf_ofs;
1717 return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
1720 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
1724 if (pIn_buf_size) *pIn_buf_size = 0;
1725 if (pOut_buf_size) *pOut_buf_size = 0;
1726 return TDEFL_STATUS_BAD_PARAM;
1729 d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size;
1730 d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size;
1731 d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
1732 d->m_out_buf_ofs = 0;
1735 if ( ((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
1736 (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf) )
1738 if (pIn_buf_size) *pIn_buf_size = 0;
1739 if (pOut_buf_size) *pOut_buf_size = 0;
1740 return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
1742 d->m_wants_to_finish |= (flush == TDEFL_FINISH);
1744 if ((d->m_output_flush_remaining) || (d->m_finished))
1745 return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
1747 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1748 if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
1749 ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
1750 ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0))
1752 if (!tdefl_compress_fast(d))
1753 return d->m_prev_return_status;
1756 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1758 if (!tdefl_compress_normal(d))
1759 return d->m_prev_return_status;
1762 if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
1763 d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);
1765 if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining))
1767 if (tdefl_flush_block(d, flush) < 0)
1768 return d->m_prev_return_status;
1769 d->m_finished = (flush == TDEFL_FINISH);
1770 if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; }
1773 return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
1776 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
1778 MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
1781 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
1783 d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user;
1784 d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
1785 d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
1786 if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
1787 d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
1788 d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
1789 d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8;
1790 d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY;
1791 d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1;
1792 d->m_pIn_buf = NULL; d->m_pOut_buf = NULL;
1793 d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL;
1794 d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0;
1795 memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
1796 memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
1797 return TDEFL_STATUS_OKAY;
1800 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
1802 return d->m_prev_return_status;
1805 mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
1807 return d->m_adler32;
1810 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
1812 tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
1813 pComp = (tdefl_compressor*)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE;
1814 succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
1815 succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
1816 MZ_FREE(pComp); return succeeded;
1821 size_t m_size, m_capacity;
1823 mz_bool m_expandable;
1824 } tdefl_output_buffer;
1826 static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
1828 tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
1829 size_t new_size = p->m_size + len;
1830 if (new_size > p->m_capacity)
1832 size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE;
1833 do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity);
1834 pNew_buf = (mz_uint8*)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE;
1835 p->m_pBuf = pNew_buf; p->m_capacity = new_capacity;
1837 memcpy((mz_uint8*)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size;
1841 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
1843 tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
1844 if (!pOut_len) return MZ_FALSE; else *pOut_len = 0;
1845 out_buf.m_expandable = MZ_TRUE;
1846 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL;
1847 *pOut_len = out_buf.m_size; return out_buf.m_pBuf;
1850 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
1852 tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
1853 if (!pOut_buf) return 0;
1854 out_buf.m_pBuf = (mz_uint8*)pOut_buf; out_buf.m_capacity = out_buf_len;
1855 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0;
1856 return out_buf.m_size;
1863 #endif // MINIZ_HEADER_FILE_ONLY
1866 This is free and unencumbered software released into the public domain.
1868 Anyone is free to copy, modify, publish, use, compile, sell, or
1869 distribute this software, either in source code form or as a compiled
1870 binary, for any purpose, commercial or non-commercial, and by any
1873 In jurisdictions that recognize copyright laws, the author or authors
1874 of this software dedicate any and all copyright interest in the
1875 software to the public domain. We make this dedication for the benefit
1876 of the public at large and to the detriment of our heirs and
1877 successors. We intend this dedication to be an overt act of
1878 relinquishment in perpetuity of all present and future rights to this
1879 software under copyright law.
1881 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1882 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1883 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
1884 IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1885 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1886 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1887 OTHER DEALINGS IN THE SOFTWARE.
1889 For more information, please refer to <http://unlicense.org/>