1 /* miniz.c v1.14 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
2 See "unlicense" statement at the end of this file.
3 Rich Geldreich <richgel99@gmail.com>, last updated May 20, 2012
4 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
6 Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define
7 MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).
10 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect).
11 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
12 Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files.
13 Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
14 Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly
15 "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning).
16 Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64.
17 Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test.
18 Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives.
19 Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.)
20 Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself).
21 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's.
22 level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report.
23 5/28/11 v1.11 - Added statement from unlicense.org
24 5/27/11 v1.10 - Substantial compressor optimizations:
25 Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a
26 Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86).
27 Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types.
28 Refactored the compression code for better readability and maintainability.
29 Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large
30 drop in throughput on some files).
31 5/15/11 v1.09 - Initial stable release.
33 * Low-level Deflate/Inflate implementation notes:
35 Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or
36 greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses
37 approximately as well as zlib.
39 Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function
40 coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory
41 block large enough to hold the entire file.
43 The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation.
45 * Important: For best perf. be sure to customize the below macros for your target platform:
46 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
47 #define MINIZ_LITTLE_ENDIAN 1
48 #define MINIZ_HAS_64BIT_REGISTERS 1
51 #ifndef MINIZ_HEADER_INCLUDED
52 #define MINIZ_HEADER_INCLUDED
56 // Defines to completely disable specific portions of miniz.c:
57 // If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl.
59 // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
60 // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
61 // callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
62 // functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work.
63 //#define MINIZ_NO_MALLOC
65 #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
66 // MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
67 #define MINIZ_X86_OR_X64_CPU 1
70 #if (__BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
71 // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
72 #define MINIZ_LITTLE_ENDIAN 1
75 #if MINIZ_X86_OR_X64_CPU
76 // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses.
77 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
80 #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__)
81 // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions).
82 #define MINIZ_HAS_64BIT_REGISTERS 1
89 // ------------------- zlib-style API Definitions.
91 // For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
92 typedef unsigned long mz_ulong;
94 // mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap.
95 void mz_free(void *p);
97 #define MZ_ADLER32_INIT (1)
98 // mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL.
99 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
101 #define MZ_CRC32_INIT (0)
102 // mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL.
103 mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
105 // Compression strategies.
106 enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 };
109 #define MZ_DEFLATED 8
111 // ------------------- Types and macros
113 typedef unsigned char mz_uint8;
114 typedef signed short mz_int16;
115 typedef unsigned short mz_uint16;
116 typedef unsigned int mz_uint32;
117 typedef unsigned int mz_uint;
118 typedef long long mz_int64;
119 typedef unsigned long long mz_uint64;
125 // Works around MSVC's spammy "warning C4127: conditional expression is constant" message.
127 #define MZ_MACRO_END while (0, 0)
129 #define MZ_MACRO_END while (0)
132 // ------------------- Low-level Decompression API Definitions
134 // Decompression flags used by tinfl_decompress().
135 // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream.
136 // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input.
137 // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB).
138 // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes.
141 TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
142 TINFL_FLAG_HAS_MORE_INPUT = 2,
143 TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
144 TINFL_FLAG_COMPUTE_ADLER32 = 8
147 // High level decompression functions:
148 // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc().
150 // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress.
152 // Function returns a pointer to the decompressed data, or NULL on failure.
153 // *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data.
154 // The caller must call mz_free() on the returned block when it's no longer needed.
155 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
157 // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory.
158 // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success.
159 #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
160 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
162 // tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer.
163 // Returns 1 on success or 0 on failure.
164 typedef int (*tinfl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser);
165 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
167 struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor;
169 // Max size of LZ dictionary.
170 #define TINFL_LZ_DICT_SIZE 32768
175 TINFL_STATUS_BAD_PARAM = -3,
176 TINFL_STATUS_ADLER32_MISMATCH = -2,
177 TINFL_STATUS_FAILED = -1,
178 TINFL_STATUS_DONE = 0,
179 TINFL_STATUS_NEEDS_MORE_INPUT = 1,
180 TINFL_STATUS_HAS_MORE_OUTPUT = 2
183 // Initializes the decompressor to its initial state.
184 #define tinfl_init(r) do { (r)->m_state = 0; } MZ_MACRO_END
185 #define tinfl_get_adler32(r) (r)->m_check_adler32
187 // Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability.
188 // This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output.
189 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);
191 // Internal/private bits follow.
194 TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19,
195 TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
200 mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
201 mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
204 #if MINIZ_HAS_64BIT_REGISTERS
205 #define TINFL_USE_64BIT_BITBUF 1
208 #if TINFL_USE_64BIT_BITBUF
209 typedef mz_uint64 tinfl_bit_buf_t;
210 #define TINFL_BITBUF_SIZE (64)
212 typedef mz_uint32 tinfl_bit_buf_t;
213 #define TINFL_BITBUF_SIZE (32)
216 struct tinfl_decompressor_tag
218 mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
219 tinfl_bit_buf_t m_bit_buf;
220 size_t m_dist_from_out_buf_start;
221 tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
222 mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
225 // ------------------- Low-level Compression API Definitions
227 // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently).
228 #define TDEFL_LESS_MEMORY 0
230 // tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search):
231 // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression).
234 TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF
237 // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data.
238 // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers).
239 // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing.
240 // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory).
241 // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
242 // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
243 // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
244 // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
247 TDEFL_WRITE_ZLIB_HEADER = 0x01000,
248 TDEFL_COMPUTE_ADLER32 = 0x02000,
249 TDEFL_GREEDY_PARSING_FLAG = 0x04000,
250 TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
251 TDEFL_RLE_MATCHES = 0x10000,
252 TDEFL_FILTER_MATCHES = 0x20000,
253 TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
254 TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
257 // High level compression functions:
258 // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc().
260 // pSrc_buf, src_buf_len: Pointer and size of source block to compress.
261 // flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression.
263 // Function returns a pointer to the compressed data, or NULL on failure.
264 // *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data.
265 // The caller must free() the returned block when it's no longer needed.
266 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
268 // tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory.
269 // Returns 0 on failure.
270 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
272 // Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
273 typedef mz_bool (*tdefl_put_buf_func_ptr)(const void* pBuf, int len, void *pUser);
275 // tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally.
276 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
278 enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 };
280 // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes).
281 #if TDEFL_LESS_MEMORY
282 enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
284 enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13 ) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS };
287 // The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions.
290 TDEFL_STATUS_BAD_PARAM = -2,
291 TDEFL_STATUS_PUT_BUF_FAILED = -1,
292 TDEFL_STATUS_OKAY = 0,
293 TDEFL_STATUS_DONE = 1,
296 // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
300 TDEFL_SYNC_FLUSH = 2,
301 TDEFL_FULL_FLUSH = 3,
305 // tdefl's compression state structure.
308 tdefl_put_buf_func_ptr m_pPut_buf_func;
309 void *m_pPut_buf_user;
310 mz_uint m_flags, m_max_probes[2];
311 int m_greedy_parsing;
312 mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
313 mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
314 mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
315 mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
316 tdefl_status m_prev_return_status;
317 const void *m_pIn_buf;
319 size_t *m_pIn_buf_size, *m_pOut_buf_size;
321 const mz_uint8 *m_pSrc;
322 size_t m_src_buf_left, m_out_buf_ofs;
323 mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
324 mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
325 mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
326 mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
327 mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
328 mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
329 mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
330 mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
333 // Initializes the compressor.
334 // There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory.
335 // pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression.
336 // If pBut_buf_func is NULL the user should always call the tdefl_compress() API.
337 // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.)
338 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
340 // Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible.
341 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);
343 // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr.
344 // tdefl_compress_buffer() always consumes the entire input buffer.
345 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);
347 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
348 mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
354 #endif // MINIZ_HEADER_INCLUDED
356 // ------------------- End of Header: Implementation follows. (If you only want the header, define MINIZ_HEADER_FILE_ONLY.)
358 #ifndef MINIZ_HEADER_FILE_ONLY
360 typedef unsigned char mz_validate_uint16[sizeof(mz_uint16)==2 ? 1 : -1];
361 typedef unsigned char mz_validate_uint32[sizeof(mz_uint32)==4 ? 1 : -1];
362 typedef unsigned char mz_validate_uint64[sizeof(mz_uint64)==8 ? 1 : -1];
367 #define MZ_ASSERT(x) assert(x)
369 #ifdef MINIZ_NO_MALLOC
370 #define MZ_MALLOC(x) NULL
371 #define MZ_FREE(x) (void)x, ((void)0)
372 #define MZ_REALLOC(p, x) NULL
374 #define MZ_MALLOC(x) malloc(x)
375 #define MZ_FREE(x) free(x)
376 #define MZ_REALLOC(p, x) realloc(p, x)
379 #define MZ_MAX(a,b) (((a)>(b))?(a):(b))
380 #define MZ_MIN(a,b) (((a)<(b))?(a):(b))
381 #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
383 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
384 #define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
385 #define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
387 #define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
388 #define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
392 #define MZ_FORCEINLINE __forceinline
393 #elif defined(__GNUC__)
394 #define MZ_FORCEINLINE inline __attribute__((__always_inline__))
396 #define MZ_FORCEINLINE
403 // ------------------- zlib-style API's
405 mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
407 mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552;
408 if (!ptr) return MZ_ADLER32_INIT;
410 for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
411 s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
412 s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
414 for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1;
415 s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
417 return (s2 << 16) + s1;
420 // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/
421 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
423 static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
424 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
425 mz_uint32 crcu32 = (mz_uint32)crc;
426 if (!ptr) return MZ_CRC32_INIT;
427 crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; }
431 void mz_free(void *p)
436 // ------------------- Low-level Decompression (completely independent from all compression API's)
438 #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
439 #define TINFL_MEMSET(p, c, l) memset(p, c, l)
441 #define TINFL_CR_BEGIN switch(r->m_state) { case 0:
442 #define TINFL_CR_RETURN(state_index, result) do { status = result; r->m_state = state_index; goto common_exit; case state_index:; } MZ_MACRO_END
443 #define TINFL_CR_RETURN_FOREVER(state_index, result) do { for ( ; ; ) { TINFL_CR_RETURN(state_index, result); } } MZ_MACRO_END
444 #define TINFL_CR_FINISH }
446 // TODO: If the caller has indicated that there's no more input, and we attempt to read beyond the input buf, then something is wrong with the input because the inflator never
447 // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of the stream with 0's in this scenario.
448 #define TINFL_GET_BYTE(state_index, c) do { \
449 if (pIn_buf_cur >= pIn_buf_end) { \
451 if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
452 TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
453 if (pIn_buf_cur < pIn_buf_end) { \
454 c = *pIn_buf_cur++; \
462 } else c = *pIn_buf_cur++; } MZ_MACRO_END
464 #define TINFL_NEED_BITS(state_index, n) do { mz_uint c; TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; } while (num_bits < (mz_uint)(n))
465 #define TINFL_SKIP_BITS(state_index, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
466 #define TINFL_GET_BITS(state_index, b, n) do { if (num_bits < (mz_uint)(n)) { TINFL_NEED_BITS(state_index, n); } b = bit_buf & ((1 << (n)) - 1); bit_buf >>= (n); num_bits -= (n); } MZ_MACRO_END
468 // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2.
469 // It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a
470 // Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the
471 // bit buffer contains >=15 bits (deflate's max. Huffman code size).
472 #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
474 temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
476 code_len = temp >> 9; \
477 if ((code_len) && (num_bits >= code_len)) \
479 } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
480 code_len = TINFL_FAST_LOOKUP_BITS; \
482 temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
483 } while ((temp < 0) && (num_bits >= (code_len + 1))); if (temp >= 0) break; \
484 } TINFL_GET_BYTE(state_index, c); bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); num_bits += 8; \
485 } while (num_bits < 15);
487 // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read
488 // beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully
489 // decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32.
490 // The slow path is only executed at the very end of the input buffer.
491 #define TINFL_HUFF_DECODE(state_index, sym, pHuff) do { \
492 int temp; mz_uint code_len, c; \
493 if (num_bits < 15) { \
494 if ((pIn_buf_end - pIn_buf_cur) < 2) { \
495 TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
497 bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); pIn_buf_cur += 2; num_bits += 16; \
500 if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
501 code_len = temp >> 9, temp &= 511; \
503 code_len = TINFL_FAST_LOOKUP_BITS; do { temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; } while (temp < 0); \
504 } sym = temp; bit_buf >>= code_len; num_bits -= code_len; } MZ_MACRO_END
506 tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
508 static const int s_length_base[31] = { 3,4,5,6,7,8,9,10,11,13, 15,17,19,23,27,31,35,43,51,59, 67,83,99,115,131,163,195,227,258,0,0 };
509 static const int s_length_extra[31]= { 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
510 static const int s_dist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
511 static const int s_dist_extra[32] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
512 static const mz_uint8 s_length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
513 static const int s_min_table_sizes[3] = { 257, 1, 4 };
515 tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf;
516 const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
517 mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
518 size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
520 // Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter).
521 if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; }
523 num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start;
526 bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1;
527 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
529 TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1);
530 counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
531 if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
532 if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); }
537 TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1;
540 TINFL_SKIP_BITS(5, num_bits & 7);
541 for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); }
542 if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); }
543 while ((counter) && (num_bits))
545 TINFL_GET_BITS(51, dist, 8);
546 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); }
547 *pOut_buf_cur++ = (mz_uint8)dist;
552 size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); }
553 while (pIn_buf_cur >= pIn_buf_end)
555 if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT)
557 TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
561 TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
564 n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
565 TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n;
568 else if (r->m_type == 3)
570 TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
576 mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i;
577 r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
578 for ( i = 0; i <= 143; ++i) *p++ = 8; for ( ; i <= 255; ++i) *p++ = 9; for ( ; i <= 279; ++i) *p++ = 7; for ( ; i <= 287; ++i) *p++ = 8;
582 for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; }
583 MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; }
584 r->m_table_sizes[2] = 19;
586 for ( ; (int)r->m_type >= 0; r->m_type--)
588 int tree_next, tree_cur; tinfl_huff_table *pTable;
589 mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree);
590 for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++;
591 used_syms = 0, total = 0; next_code[0] = next_code[1] = 0;
592 for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); }
593 if ((65536 != total) && (used_syms > 1))
595 TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
597 for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
599 mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue;
600 cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1);
601 if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; }
602 if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; }
603 rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
604 for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
606 tree_cur -= ((rev_code >>= 1) & 1);
607 if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1];
609 tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
613 for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]); )
615 mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; }
616 if ((dist == 16) && (!counter))
618 TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
620 num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16];
621 TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s;
623 if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
625 TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
627 TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
635 if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
637 TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
640 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); }
641 *pOut_buf_cur++ = (mz_uint8)counter;
645 int sym2; mz_uint code_len;
646 #if TINFL_USE_64BIT_BITBUF
647 if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; }
649 if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
651 if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
652 code_len = sym2 >> 9;
655 code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
657 counter = sym2; bit_buf >>= code_len; num_bits -= code_len;
661 #if !TINFL_USE_64BIT_BITBUF
662 if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; }
664 if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
665 code_len = sym2 >> 9;
668 code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0);
670 bit_buf >>= code_len; num_bits -= code_len;
672 pOut_buf_cur[0] = (mz_uint8)counter;
679 pOut_buf_cur[1] = (mz_uint8)sym2;
683 if ((counter &= 511) == 256) break;
685 num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257];
686 if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; }
688 TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
689 num_extra = s_dist_extra[dist]; dist = s_dist_base[dist];
690 if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; }
692 dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
693 if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
695 TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
698 pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
700 if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
704 while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); }
705 *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
709 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
710 else if ((counter >= 9) && (counter <= dist))
712 const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
715 ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
716 ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
718 } while ((pSrc += 8) < pSrc_end);
719 if ((counter &= 7) < 3)
723 pOut_buf_cur[0] = pSrc[0];
725 pOut_buf_cur[1] = pSrc[1];
726 pOut_buf_cur += counter;
734 pOut_buf_cur[0] = pSrc[0];
735 pOut_buf_cur[1] = pSrc[1];
736 pOut_buf_cur[2] = pSrc[2];
737 pOut_buf_cur += 3; pSrc += 3;
738 } while ((int)(counter -= 3) > 2);
739 if ((int)counter > 0)
741 pOut_buf_cur[0] = pSrc[0];
742 if ((int)counter > 1)
743 pOut_buf_cur[1] = pSrc[1];
744 pOut_buf_cur += counter;
748 } while (!(r->m_final & 1));
749 if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
751 TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; }
753 TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
757 r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start;
758 *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
759 if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
761 const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size;
762 mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552;
765 for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
767 s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1;
768 s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1;
770 for ( ; i < block_len; ++i) s1 += *ptr++, s2 += s1;
771 s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552;
773 r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH;
778 // Higher level helper functions.
779 void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
781 tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0;
786 size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
787 tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8*)pBuf, pBuf ? (mz_uint8*)pBuf + *pOut_len : NULL, &dst_buf_size,
788 (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
789 if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
791 MZ_FREE(pBuf); *pOut_len = 0; return NULL;
793 src_buf_ofs += src_buf_size;
794 *pOut_len += dst_buf_size;
795 if (status == TINFL_STATUS_DONE) break;
796 new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
797 pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
800 MZ_FREE(pBuf); *pOut_len = 0; return NULL;
802 pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity;
807 size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
809 tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp);
810 status = tinfl_decompress(&decomp, (const mz_uint8*)pSrc_buf, &src_buf_len, (mz_uint8*)pOut_buf, (mz_uint8*)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
811 return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
814 int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
817 tinfl_decompressor decomp;
818 mz_uint8 *pDict = (mz_uint8*)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0;
820 return TINFL_STATUS_FAILED;
824 size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
825 tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8*)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
826 (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
827 in_buf_ofs += in_buf_size;
828 if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
830 if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
832 result = (status == TINFL_STATUS_DONE);
835 dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
838 *pIn_buf_size = in_buf_ofs;
842 // ------------------- Low-level Compression (independent from all decompression API's)
844 // Purposely making these tables static for faster init and thread safety.
845 static const mz_uint16 s_tdefl_len_sym[256] = {
846 257,258,259,260,261,262,263,264,265,265,266,266,267,267,268,268,269,269,269,269,270,270,270,270,271,271,271,271,272,272,272,272,
847 273,273,273,273,273,273,273,273,274,274,274,274,274,274,274,274,275,275,275,275,275,275,275,275,276,276,276,276,276,276,276,276,
848 277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,277,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,278,
849 279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,279,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,280,
850 281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,281,
851 282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,282,
852 283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,283,
853 284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,284,285 };
855 static const mz_uint8 s_tdefl_len_extra[256] = {
856 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
857 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
858 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
859 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,0 };
861 static const mz_uint8 s_tdefl_small_dist_sym[512] = {
862 0,1,2,3,4,4,5,5,6,6,6,6,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,
863 11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,
864 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,
865 14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,
866 14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
867 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,
868 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
869 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
870 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
871 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
872 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,
873 17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17 };
875 static const mz_uint8 s_tdefl_small_dist_extra[512] = {
876 0,0,0,0,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,
877 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
878 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
879 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
880 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
881 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
882 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
885 static const mz_uint8 s_tdefl_large_dist_sym[128] = {
886 0,0,18,19,20,20,21,21,22,22,22,22,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,
887 26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,
888 28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29 };
890 static const mz_uint8 s_tdefl_large_dist_extra[128] = {
891 0,0,8,8,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,
892 12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
893 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13 };
895 // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values.
896 typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
897 static tdefl_sym_freq* tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq* pSyms0, tdefl_sym_freq* pSyms1)
899 mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist);
900 for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; }
901 while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
902 for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
904 const mz_uint32* pHist = &hist[pass << 8];
905 mz_uint offsets[256], cur_ofs = 0;
906 for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
907 for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
908 { tdefl_sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; }
913 // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
914 static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
916 int root, leaf, next, avbl, used, dpth;
917 if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
918 A[0].m_key += A[1].m_key; root = 0; leaf = 2;
919 for (next=1; next < n-1; next++)
921 if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key;
922 if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
924 A[n-2].m_key = 0; for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
925 avbl = 1; used = dpth = 0; root = n-2; next = n-1;
928 while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
929 while (avbl>used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; }
930 avbl = 2*used; dpth++; used = 0;
934 // Limits canonical Huffman code table's max code size.
935 enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
936 static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
938 int i; mz_uint32 total = 0; if (code_list_len <= 1) return;
939 for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
940 for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
941 while (total != (1UL << max_code_size))
943 pNum_codes[max_code_size]--;
944 for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
949 static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
951 int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes);
954 for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++;
958 tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
959 int num_used_syms = 0;
960 const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
961 for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; }
963 pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
965 for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
967 tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
969 MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
970 for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
971 for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
974 next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1);
976 for (i = 0; i < table_len; i++)
978 mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
979 code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1);
980 d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
984 #define TDEFL_PUT_BITS(b, l) do { \
985 mz_uint bits = b; mz_uint len = l; MZ_ASSERT(bits <= ((1U << len) - 1U)); \
986 d->m_bit_buffer |= (bits << d->m_bits_in); d->m_bits_in += len; \
987 while (d->m_bits_in >= 8) { \
988 if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
989 *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
990 d->m_bit_buffer >>= 8; \
995 #define TDEFL_RLE_PREV_CODE_SIZE() { if (rle_repeat_count) { \
996 if (rle_repeat_count < 3) { \
997 d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
998 while (rle_repeat_count--) packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
1000 d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); packed_code_sizes[num_packed_code_sizes++] = 16; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \
1001 } rle_repeat_count = 0; } }
1003 #define TDEFL_RLE_ZERO_CODE_SIZE() { if (rle_z_count) { \
1004 if (rle_z_count < 3) { \
1005 d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
1006 } else if (rle_z_count <= 10) { \
1007 d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); packed_code_sizes[num_packed_code_sizes++] = 17; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \
1009 d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); packed_code_sizes[num_packed_code_sizes++] = 18; packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
1010 } rle_z_count = 0; } }
1012 static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
1014 static void tdefl_start_dynamic_block(tdefl_compressor *d)
1016 int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
1017 mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;
1019 d->m_huff_count[0][256] = 1;
1021 tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
1022 tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
1024 for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
1025 for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
1027 memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
1028 memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
1029 total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0;
1031 memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
1032 for (i = 0; i < total_code_sizes_to_pack; i++)
1034 mz_uint8 code_size = code_sizes_to_pack[i];
1037 TDEFL_RLE_PREV_CODE_SIZE();
1038 if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); }
1042 TDEFL_RLE_ZERO_CODE_SIZE();
1043 if (code_size != prev_code_size)
1045 TDEFL_RLE_PREV_CODE_SIZE();
1046 d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size;
1048 else if (++rle_repeat_count == 6)
1050 TDEFL_RLE_PREV_CODE_SIZE();
1053 prev_code_size = code_size;
1055 if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); }
1057 tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
1059 TDEFL_PUT_BITS(2, 2);
1061 TDEFL_PUT_BITS(num_lit_codes - 257, 5);
1062 TDEFL_PUT_BITS(num_dist_codes - 1, 5);
1064 for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break;
1065 num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
1066 for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
1068 for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes; )
1070 mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
1071 TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
1072 if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
1076 static void tdefl_start_static_block(tdefl_compressor *d)
1079 mz_uint8 *p = &d->m_huff_code_sizes[0][0];
1081 for (i = 0; i <= 143; ++i) *p++ = 8;
1082 for ( ; i <= 255; ++i) *p++ = 9;
1083 for ( ; i <= 279; ++i) *p++ = 7;
1084 for ( ; i <= 287; ++i) *p++ = 8;
1086 memset(d->m_huff_code_sizes[1], 5, 32);
1088 tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
1089 tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
1091 TDEFL_PUT_BITS(1, 2);
1094 static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
1096 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1097 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1100 mz_uint8 *pLZ_codes;
1101 mz_uint8 *pOutput_buf = d->m_pOutput_buf;
1102 mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
1103 mz_uint64 bit_buffer = d->m_bit_buffer;
1104 mz_uint bits_in = d->m_bits_in;
1106 #define TDEFL_PUT_BITS_FAST(b, l) { bit_buffer |= (((mz_uint64)(b)) << bits_in); bits_in += (l); }
1109 for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1)
1112 flags = *pLZ_codes++ | 0x100;
1116 mz_uint s0, s1, n0, n1, sym, num_extra_bits;
1117 mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3;
1119 MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1120 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1121 TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1123 // This sequence coaxes MSVC into using cmov's vs. jmp's.
1124 s0 = s_tdefl_small_dist_sym[match_dist & 511];
1125 n0 = s_tdefl_small_dist_extra[match_dist & 511];
1126 s1 = s_tdefl_large_dist_sym[match_dist >> 8];
1127 n1 = s_tdefl_large_dist_extra[match_dist >> 8];
1128 sym = (match_dist < 512) ? s0 : s1;
1129 num_extra_bits = (match_dist < 512) ? n0 : n1;
1131 MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1132 TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1133 TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1137 mz_uint lit = *pLZ_codes++;
1138 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1139 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1141 if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
1145 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1146 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1148 if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
1152 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1153 TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1158 if (pOutput_buf >= d->m_pOutput_buf_end)
1161 *(mz_uint64*)pOutput_buf = bit_buffer;
1162 pOutput_buf += (bits_in >> 3);
1163 bit_buffer >>= (bits_in & ~7);
1167 #undef TDEFL_PUT_BITS_FAST
1169 d->m_pOutput_buf = pOutput_buf;
1171 d->m_bit_buffer = 0;
1175 mz_uint32 n = MZ_MIN(bits_in, 16);
1176 TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
1181 TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1183 return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1186 static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
1189 mz_uint8 *pLZ_codes;
1192 for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1)
1195 flags = *pLZ_codes++ | 0x100;
1198 mz_uint sym, num_extra_bits;
1199 mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3;
1201 MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1202 TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
1203 TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
1205 if (match_dist < 512)
1207 sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist];
1211 sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
1213 MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
1214 TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
1215 TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
1219 mz_uint lit = *pLZ_codes++;
1220 MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
1221 TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
1225 TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
1227 return (d->m_pOutput_buf < d->m_pOutput_buf_end);
1229 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
1231 static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
1234 tdefl_start_static_block(d);
1236 tdefl_start_dynamic_block(d);
1237 return tdefl_compress_lz_codes(d);
1240 static int tdefl_flush_block(tdefl_compressor *d, int flush)
1242 mz_uint saved_bit_buf, saved_bits_in;
1243 mz_uint8 *pSaved_output_buf;
1244 mz_bool comp_block_succeeded = MZ_FALSE;
1245 int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
1246 mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;
1248 d->m_pOutput_buf = pOutput_buf_start;
1249 d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
1251 MZ_ASSERT(!d->m_output_flush_remaining);
1252 d->m_output_flush_ofs = 0;
1253 d->m_output_flush_remaining = 0;
1255 *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
1256 d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
1258 if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
1260 TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8);
1263 TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
1265 pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in;
1268 comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));
1270 // If the block gets expanded, forget the current contents of the output buffer and send a raw block instead.
1271 if ( ((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
1272 ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size) )
1274 mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1275 TDEFL_PUT_BITS(0, 2);
1276 if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1277 for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF)
1279 TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
1281 for (i = 0; i < d->m_total_lz_bytes; ++i)
1283 TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
1286 // Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes.
1287 else if (!comp_block_succeeded)
1289 d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
1290 tdefl_compress_block(d, MZ_TRUE);
1295 if (flush == TDEFL_FINISH)
1297 if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); }
1298 if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } }
1302 mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); }
1306 MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
1308 memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
1309 memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
1311 d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++;
1313 if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0)
1315 if (d->m_pPut_buf_func)
1317 *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
1318 if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
1319 return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
1321 else if (pOutput_buf_start == d->m_output_buf)
1323 int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
1324 memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
1325 d->m_out_buf_ofs += bytes_to_copy;
1326 if ((n -= bytes_to_copy) != 0)
1328 d->m_output_flush_ofs = bytes_to_copy;
1329 d->m_output_flush_remaining = n;
1334 d->m_out_buf_ofs += n;
1338 return d->m_output_flush_remaining;
1341 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1342 #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16*)(p)
1343 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1345 mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1346 mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1347 const mz_uint16 *s = (const mz_uint16*)(d->m_dict + pos), *p, *q;
1348 mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s);
1349 MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1354 if (--num_probes_left == 0) return;
1355 #define TDEFL_PROBE \
1356 next_probe_pos = d->m_next[probe_pos]; \
1357 if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1358 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1359 if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) break;
1360 TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1362 if (!dist) break; q = (const mz_uint16*)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32;
1363 do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1364 (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) );
1367 *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break;
1369 else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8*)p == *(const mz_uint8*)q)) > match_len)
1371 *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break;
1372 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
1377 static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
1379 mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
1380 mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
1381 const mz_uint8 *s = d->m_dict + pos, *p, *q;
1382 mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
1383 MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return;
1388 if (--num_probes_left == 0) return;
1389 #define TDEFL_PROBE \
1390 next_probe_pos = d->m_next[probe_pos]; \
1391 if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) return; \
1392 probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
1393 if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) break;
1394 TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE;
1396 if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break;
1397 if (probe_len > match_len)
1399 *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
1400 c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1];
1404 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
1406 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1407 static mz_bool tdefl_compress_fast(tdefl_compressor *d)
1409 // Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio.
1410 mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
1411 mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
1412 mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
1414 while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size)))
1416 const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
1417 mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
1418 mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
1419 d->m_src_buf_left -= num_bytes_to_process;
1420 lookahead_size += num_bytes_to_process;
1422 while (num_bytes_to_process)
1424 mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
1425 memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
1426 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
1427 memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
1429 dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
1430 num_bytes_to_process -= n;
1433 dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
1434 if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break;
1436 while (lookahead_size >= 4)
1438 mz_uint cur_match_dist, cur_match_len = 1;
1439 mz_uint8 *pCur_dict = d->m_dict + cur_pos;
1440 mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
1441 mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
1442 mz_uint probe_pos = d->m_hash[hash];
1443 d->m_hash[hash] = (mz_uint16)lookahead_pos;
1445 if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram))
1447 const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
1448 const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
1449 mz_uint32 probe_len = 32;
1450 do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
1451 (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0) );
1452 cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
1454 cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
1456 if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)))
1459 *pLZ_code_buf++ = (mz_uint8)first_trigram;
1460 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1461 d->m_huff_count[0][(mz_uint8)first_trigram]++;
1466 cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
1468 MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
1472 pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
1473 *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
1475 *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
1477 s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
1478 s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
1479 d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
1481 d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
1486 *pLZ_code_buf++ = (mz_uint8)first_trigram;
1487 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1488 d->m_huff_count[0][(mz_uint8)first_trigram]++;
1491 if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
1493 total_lz_bytes += cur_match_len;
1494 lookahead_pos += cur_match_len;
1495 dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
1496 cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
1497 MZ_ASSERT(lookahead_size >= cur_match_len);
1498 lookahead_size -= cur_match_len;
1500 if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
1503 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1504 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1505 if ((n = tdefl_flush_block(d, 0)) != 0)
1506 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1507 total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
1511 while (lookahead_size)
1513 mz_uint8 lit = d->m_dict[cur_pos];
1516 *pLZ_code_buf++ = lit;
1517 *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
1518 if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; }
1520 d->m_huff_count[0][lit]++;
1523 dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
1524 cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
1527 if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
1530 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1531 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1532 if ((n = tdefl_flush_block(d, 0)) != 0)
1533 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1534 total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left;
1539 d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size;
1540 d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left;
1543 #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1545 static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
1547 d->m_total_lz_bytes++;
1548 *d->m_pLZ_code_buf++ = lit;
1549 *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
1550 d->m_huff_count[0][lit]++;
1553 static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
1557 MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));
1559 d->m_total_lz_bytes += match_len;
1561 d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
1564 d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
1565 d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3;
1567 *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; }
1569 s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
1570 d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
1572 if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
1575 static mz_bool tdefl_compress_normal(tdefl_compressor *d)
1577 const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left;
1578 tdefl_flush flush = d->m_flush;
1580 while ((src_buf_left) || ((flush) && (d->m_lookahead_size)))
1582 mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
1583 // Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN.
1584 if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1))
1586 mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
1587 mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
1588 mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
1589 const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
1590 src_buf_left -= num_bytes_to_process;
1591 d->m_lookahead_size += num_bytes_to_process;
1592 while (pSrc != pSrc_end)
1594 mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
1595 hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
1596 d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
1597 dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++;
1602 while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
1604 mz_uint8 c = *pSrc++;
1605 mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
1607 d->m_dict[dst_pos] = c;
1608 if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
1609 d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
1610 if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN)
1612 mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
1613 mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
1614 d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos);
1618 d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
1619 if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
1622 // Simple lazy/greedy parsing state machine.
1623 len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
1624 if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS))
1626 if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))
1628 mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
1629 cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; }
1630 if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1;
1635 tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
1637 if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U*1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5)))
1639 cur_match_dist = cur_match_len = 0;
1641 if (d->m_saved_match_len)
1643 if (cur_match_len > d->m_saved_match_len)
1645 tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
1646 if (cur_match_len >= 128)
1648 tdefl_record_match(d, cur_match_len, cur_match_dist);
1649 d->m_saved_match_len = 0; len_to_move = cur_match_len;
1653 d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
1658 tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
1659 len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0;
1662 else if (!cur_match_dist)
1663 tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
1664 else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128))
1666 tdefl_record_match(d, cur_match_len, cur_match_dist);
1667 len_to_move = cur_match_len;
1671 d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len;
1673 // Move the lookahead forward by len_to_move bytes.
1674 d->m_lookahead_pos += len_to_move;
1675 MZ_ASSERT(d->m_lookahead_size >= len_to_move);
1676 d->m_lookahead_size -= len_to_move;
1677 d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, TDEFL_LZ_DICT_SIZE);
1678 // Check if it's time to flush the current LZ codes to the internal output buffer.
1679 if ( (d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
1680 ( (d->m_total_lz_bytes > 31*1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) )
1683 d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
1684 if ((n = tdefl_flush_block(d, 0)) != 0)
1685 return (n < 0) ? MZ_FALSE : MZ_TRUE;
1689 d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left;
1693 static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
1695 if (d->m_pIn_buf_size)
1697 *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
1700 if (d->m_pOut_buf_size)
1702 size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
1703 memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
1704 d->m_output_flush_ofs += (mz_uint)n;
1705 d->m_output_flush_remaining -= (mz_uint)n;
1706 d->m_out_buf_ofs += n;
1708 *d->m_pOut_buf_size = d->m_out_buf_ofs;
1711 return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
1714 tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
1718 if (pIn_buf_size) *pIn_buf_size = 0;
1719 if (pOut_buf_size) *pOut_buf_size = 0;
1720 return TDEFL_STATUS_BAD_PARAM;
1723 d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size;
1724 d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size;
1725 d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
1726 d->m_out_buf_ofs = 0;
1729 if ( ((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
1730 (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf) )
1732 if (pIn_buf_size) *pIn_buf_size = 0;
1733 if (pOut_buf_size) *pOut_buf_size = 0;
1734 return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
1736 d->m_wants_to_finish |= (flush == TDEFL_FINISH);
1738 if ((d->m_output_flush_remaining) || (d->m_finished))
1739 return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
1741 #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1742 if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
1743 ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
1744 ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0))
1746 if (!tdefl_compress_fast(d))
1747 return d->m_prev_return_status;
1750 #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
1752 if (!tdefl_compress_normal(d))
1753 return d->m_prev_return_status;
1756 if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
1757 d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);
1759 if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining))
1761 if (tdefl_flush_block(d, flush) < 0)
1762 return d->m_prev_return_status;
1763 d->m_finished = (flush == TDEFL_FINISH);
1764 if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; }
1767 return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
1770 tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
1772 MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
1775 tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
1777 d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user;
1778 d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
1779 d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
1780 if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
1781 d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
1782 d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
1783 d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8;
1784 d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY;
1785 d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1;
1786 d->m_pIn_buf = NULL; d->m_pOut_buf = NULL;
1787 d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL;
1788 d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0;
1789 memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
1790 memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
1791 return TDEFL_STATUS_OKAY;
1794 tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
1796 return d->m_prev_return_status;
1799 mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
1801 return d->m_adler32;
1804 mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
1806 tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
1807 pComp = (tdefl_compressor*)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE;
1808 succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
1809 succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
1810 MZ_FREE(pComp); return succeeded;
1815 size_t m_size, m_capacity;
1817 mz_bool m_expandable;
1818 } tdefl_output_buffer;
1820 static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
1822 tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
1823 size_t new_size = p->m_size + len;
1824 if (new_size > p->m_capacity)
1826 size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE;
1827 do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity);
1828 pNew_buf = (mz_uint8*)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE;
1829 p->m_pBuf = pNew_buf; p->m_capacity = new_capacity;
1831 memcpy((mz_uint8*)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size;
1835 void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
1837 tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
1838 if (!pOut_len) return MZ_FALSE; else *pOut_len = 0;
1839 out_buf.m_expandable = MZ_TRUE;
1840 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL;
1841 *pOut_len = out_buf.m_size; return out_buf.m_pBuf;
1844 size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
1846 tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf);
1847 if (!pOut_buf) return 0;
1848 out_buf.m_pBuf = (mz_uint8*)pOut_buf; out_buf.m_capacity = out_buf_len;
1849 if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0;
1850 return out_buf.m_size;
1857 #endif // MINIZ_HEADER_FILE_ONLY
1860 This is free and unencumbered software released into the public domain.
1862 Anyone is free to copy, modify, publish, use, compile, sell, or
1863 distribute this software, either in source code form or as a compiled
1864 binary, for any purpose, commercial or non-commercial, and by any
1867 In jurisdictions that recognize copyright laws, the author or authors
1868 of this software dedicate any and all copyright interest in the
1869 software to the public domain. We make this dedication for the benefit
1870 of the public at large and to the detriment of our heirs and
1871 successors. We intend this dedication to be an overt act of
1872 relinquishment in perpetuity of all present and future rights to this
1873 software under copyright law.
1875 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1876 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1877 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
1878 IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1879 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1880 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1881 OTHER DEALINGS IN THE SOFTWARE.
1883 For more information, please refer to <http://unlicense.org/>