Watt-32 tcp/ip  2.2 dev-rel.10
zdeflate.c
1 /* deflate.c -- compress data using the deflation algorithm
2  * Copyright (C) 1995-2003 Jean-loup Gailly.
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 /*
7  * ALGORITHM
8  *
9  * The "deflation" process depends on being able to identify portions
10  * of the input text which are identical to earlier input (within a
11  * sliding window trailing behind the input currently being processed).
12  *
13  * The most straightforward technique turns out to be the fastest for
14  * most input files: try all possible matches and select the longest.
15  * The key feature of this algorithm is that insertions into the string
16  * dictionary are very simple and thus fast, and deletions are avoided
17  * completely. Insertions are performed at each input character, whereas
18  * string matches are performed only when the previous match ends. So it
19  * is preferable to spend more time in matches to allow very fast string
20  * insertions and avoid deletions. The matching algorithm for small
21  * strings is inspired from that of Rabin & Karp. A brute force approach
22  * is used to find longer strings when a small match has been found.
23  * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24  * (by Leonid Broukhis).
25  * A previous version of this file used a more sophisticated algorithm
26  * (by Fiala and Greene) which is guaranteed to run in linear amortized
27  * time, but has a larger average cost, uses more memory and is patented.
28  * However the F&G algorithm may be faster for some highly redundant
29  * files if the parameter max_chain_length (described below) is too large.
30  *
31  * ACKNOWLEDGEMENTS
32  *
33  * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34  * I found it in 'freeze' written by Leonid Broukhis.
35  * Thanks to many people for bug reports and testing.
36  *
37  * REFERENCES
38  *
39  * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40  * Available in http://www.ietf.org/rfc/rfc1951.txt
41  *
42  * A description of the Rabin and Karp algorithm is given in the book
43  * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44  *
45  * Fiala,E.R., and Greene,D.H.
46  * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47  *
48  */
49 
50 /* @(#) $Id$ */
51 
52 #include "wattcp.h"
53 #include "zdeflate.h"
54 
55 #if defined(USE_GZIP)
56 
57 const char deflate_copyright[] =
58  " deflate 1.2.1 Copyright 1995-2003 Jean-loup Gailly ";
59 /*
60  If you use the zlib library in a product, an acknowledgment is welcome
61  in the documentation of your product. If for some reason you cannot
62  include such an acknowledgment, I would appreciate that you keep this
63  copyright string in the executable of your product.
64  */
65 
66 /* ===========================================================================
67  * Function prototypes.
68  */
69 typedef enum {
70  need_more, /* block not completed, need more input or more output */
71  block_done, /* block flush performed */
72  finish_started, /* finish started, need only more output at next deflate */
73  finish_done /* finish done, accept no more input or output */
74 } block_state;
75 
76 typedef block_state (*compress_func) OF((deflate_state *s, int flush));
77 /* Compression function. Returns the block state after the call. */
78 
79 local void fill_window OF((deflate_state *s));
80 local block_state deflate_stored OF((deflate_state *s, int flush));
81 local block_state deflate_fast OF((deflate_state *s, int flush));
82 #ifndef FASTEST
83 local block_state deflate_slow OF((deflate_state *s, int flush));
84 #endif
85 local void lm_init OF((deflate_state *s));
86 local void putShortMSB OF((deflate_state *s, uInt b));
87 local void flush_pending OF((z_streamp strm));
88 local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
89 #ifndef FASTEST
90 #ifdef ASMV
91  void match_init OF((void)); /* asm code initialization */
92  uInt longest_match OF((deflate_state *s, IPos cur_match));
93 #else
94 local uInt longest_match OF((deflate_state *s, IPos cur_match));
95 #endif
96 #endif
97 local uInt longest_match_fast OF((deflate_state *s, IPos cur_match));
98 
99 #ifdef DEBUG
100 local void check_match OF((deflate_state *s, IPos start, IPos match,
101  int length));
102 #endif
103 
104 /* ===========================================================================
105  * Local data
106  */
107 
108 #define NIL 0
109 /* Tail of hash chains */
110 
111 #ifndef TOO_FAR
112 # define TOO_FAR 4096
113 #endif
114 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
115 
116 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
117 /* Minimum amount of lookahead, except at the end of the input file.
118  * See deflate.c for comments about the MIN_MATCH+1.
119  */
120 
121 /* Values for max_lazy_match, good_match and max_chain_length, depending on
122  * the desired pack level (0..9). The values given below have been tuned to
123  * exclude worst case performance for pathological files. Better values may be
124  * found for specific files.
125  */
126 typedef struct config_s {
127  ush good_length; /* reduce lazy search above this match length */
128  ush max_lazy; /* do not perform lazy search above this match length */
129  ush nice_length; /* quit search above this match length */
130  ush max_chain;
131  compress_func func;
132 } config;
133 
134 #ifdef FASTEST
135 local const config configuration_table[2] = {
136 /* good lazy nice chain */
137 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
138 /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
139 #else
140 local const config configuration_table[10] = {
141 /* good lazy nice chain */
142 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
143 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
144 /* 2 */ {4, 5, 16, 8, deflate_fast},
145 /* 3 */ {4, 6, 32, 32, deflate_fast},
146 
147 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
148 /* 5 */ {8, 16, 32, 32, deflate_slow},
149 /* 6 */ {8, 16, 128, 128, deflate_slow},
150 /* 7 */ {8, 32, 128, 256, deflate_slow},
151 /* 8 */ {32, 128, 258, 1024, deflate_slow},
152 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
153 #endif
154 
155 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
156  * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
157  * meaning.
158  */
159 
160 #define EQUAL 0
161 /* result of memcmp for equal strings */
162 
163 #ifndef NO_DUMMY_DECL
164 struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
165 #endif
166 
167 /* ===========================================================================
168  * Update a hash value with the given input byte
169  * IN assertion: all calls to to UPDATE_HASH are made with consecutive
170  * input characters, so that a running hash key can be computed from the
171  * previous key instead of complete recalculation each time.
172  */
173 #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
174 
175 
176 /* ===========================================================================
177  * Insert string str in the dictionary and set match_head to the previous head
178  * of the hash chain (the most recent string with same hash key). Return
179  * the previous length of the hash chain.
180  * If this file is compiled with -DFASTEST, the compression level is forced
181  * to 1, and no hash chains are maintained.
182  * IN assertion: all calls to to INSERT_STRING are made with consecutive
183  * input characters and the first MIN_MATCH bytes of str are valid
184  * (except for the last MIN_MATCH-1 bytes of the input file).
185  */
186 #ifdef FASTEST
187 #define INSERT_STRING(s, str, match_head) \
188  (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
189  match_head = s->head[s->ins_h], \
190  s->head[s->ins_h] = (Pos)(str))
191 #else
192 #define INSERT_STRING(s, str, match_head) \
193  (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
194  match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
195  s->head[s->ins_h] = (Pos)(str))
196 #endif
197 
198 /* ===========================================================================
199  * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
200  * prev[] will be initialized on the fly.
201  */
202 #define CLEAR_HASH(s) \
203  s->head[s->hash_size-1] = NIL; \
204  zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
205 
206 /* ========================================================================= */
207 int ZEXPORT deflateInit_(strm, level, version, stream_size)
208  z_streamp strm;
209  int level;
210  const char *version;
211  int stream_size;
212 {
213  return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
214  Z_DEFAULT_STRATEGY, version, stream_size);
215  /* To do: ignore strm->next_in if we use it as window */
216 }
217 
218 /* ========================================================================= */
219 int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
220  version, stream_size)
221  z_streamp strm;
222  int level;
223  int method;
224  int windowBits;
225  int memLevel;
226  int strategy;
227  const char *version;
228  int stream_size;
229 {
230  deflate_state *s;
231  int wrap = 1;
232  static const char my_version[] = ZLIB_VERSION;
233 
234  ushf *overlay;
235  /* We overlay pending_buf and d_buf+l_buf. This works since the average
236  * output size for (length,distance) codes is <= 24 bits.
237  */
238 
239  if (version == Z_NULL || version[0] != my_version[0] ||
240  stream_size != sizeof(z_stream)) {
241  return Z_VERSION_ERROR;
242  }
243  if (strm == Z_NULL) return Z_STREAM_ERROR;
244 
245  strm->msg = Z_NULL;
246  if (strm->zalloc == (alloc_func)0) {
247  strm->zalloc = zcalloc;
248  strm->opaque = (voidpf)0;
249  }
250  if (strm->zfree == (free_func)0) strm->zfree = zcfree;
251 
252 #ifdef FASTEST
253  if (level != 0) level = 1;
254 #else
255  if (level == Z_DEFAULT_COMPRESSION) level = 6;
256 #endif
257 
258  if (windowBits < 0) { /* suppress zlib wrapper */
259  wrap = 0;
260  windowBits = -windowBits;
261  }
262 #ifdef GZIP
263  else if (windowBits > 15) {
264  wrap = 2; /* write gzip wrapper instead */
265  windowBits -= 16;
266  }
267 #endif
268  if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
269  windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
270  strategy < 0 || strategy > Z_RLE) {
271  return Z_STREAM_ERROR;
272  }
273  if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
274  s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
275  if (s == Z_NULL) return Z_MEM_ERROR;
276  strm->state = (struct internal_state FAR *)s;
277  s->strm = strm;
278 
279  s->wrap = wrap;
280  s->w_bits = windowBits;
281  s->w_size = 1 << s->w_bits;
282  s->w_mask = s->w_size - 1;
283 
284  s->hash_bits = memLevel + 7;
285  s->hash_size = 1 << s->hash_bits;
286  s->hash_mask = s->hash_size - 1;
287  s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
288 
289  s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
290  s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
291  s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
292 
293  s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
294 
295  overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
296  s->pending_buf = (uchf *) overlay;
297  s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
298 
299  if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
300  s->pending_buf == Z_NULL) {
301  s->status = FINISH_STATE;
302  strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
303  deflateEnd (strm);
304  return Z_MEM_ERROR;
305  }
306  s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
307  s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
308 
309  s->level = level;
310  s->strategy = strategy;
311  s->method = (Byte)method;
312 
313  return deflateReset(strm);
314 }
315 
316 /* ========================================================================= */
317 int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
318  z_streamp strm;
319  const Bytef *dictionary;
320  uInt dictLength;
321 {
322  deflate_state *s;
323  uInt length = dictLength;
324  uInt n;
325  IPos hash_head = 0;
326 
327  if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL ||
328  strm->state->wrap == 2 ||
329  (strm->state->wrap == 1 && strm->state->status != INIT_STATE))
330  return Z_STREAM_ERROR;
331 
332  s = strm->state;
333  if (s->wrap)
334  strm->adler = adler32(strm->adler, dictionary, dictLength);
335 
336  if (length < MIN_MATCH) return Z_OK;
337  if (length > MAX_DIST(s)) {
338  length = MAX_DIST(s);
339 #ifndef USE_DICT_HEAD
340  dictionary += dictLength - length; /* use the tail of the dictionary */
341 #endif
342  }
343  zmemcpy(s->window, dictionary, length);
344  s->strstart = length;
345  s->block_start = (long)length;
346 
347  /* Insert all strings in the hash table (except for the last two bytes).
348  * s->lookahead stays null, so s->ins_h will be recomputed at the next
349  * call of fill_window.
350  */
351  s->ins_h = s->window[0];
352  UPDATE_HASH(s, s->ins_h, s->window[1]);
353  for (n = 0; n <= length - MIN_MATCH; n++) {
354  INSERT_STRING(s, n, hash_head);
355  }
356  if (hash_head) hash_head = 0; /* to make compiler happy */
357  return Z_OK;
358 }
359 
360 /* ========================================================================= */
361 int ZEXPORT deflateReset (strm)
362  z_streamp strm;
363 {
364  deflate_state *s;
365 
366  if (strm == Z_NULL || strm->state == Z_NULL ||
367  strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) {
368  return Z_STREAM_ERROR;
369  }
370 
371  strm->total_in = strm->total_out = 0;
372  strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
373  strm->data_type = Z_UNKNOWN;
374 
375  s = (deflate_state *)strm->state;
376  s->pending = 0;
377  s->pending_out = s->pending_buf;
378 
379  if (s->wrap < 0) {
380  s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
381  }
382  s->status = s->wrap ? INIT_STATE : BUSY_STATE;
383  strm->adler =
384 #ifdef GZIP
385  s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
386 #endif
387  adler32(0L, Z_NULL, 0);
388  s->last_flush = Z_NO_FLUSH;
389 
390  _tr_init(s);
391  lm_init(s);
392 
393  return Z_OK;
394 }
395 
396 /* ========================================================================= */
397 int ZEXPORT deflatePrime (strm, bits, value)
398  z_streamp strm;
399  int bits;
400  int value;
401 {
402  if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
403  strm->state->bi_valid = bits;
404  strm->state->bi_buf = (ush)(value & ((1 << bits) - 1));
405  return Z_OK;
406 }
407 
408 /* ========================================================================= */
409 int ZEXPORT deflateParams(strm, level, strategy)
410  z_streamp strm;
411  int level;
412  int strategy;
413 {
414  deflate_state *s;
415  compress_func func;
416  int err = Z_OK;
417 
418  if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
419  s = strm->state;
420 
421 #ifdef FASTEST
422  if (level != 0) level = 1;
423 #else
424  if (level == Z_DEFAULT_COMPRESSION) level = 6;
425 #endif
426  if (level < 0 || level > 9 || strategy < 0 || strategy > Z_RLE) {
427  return Z_STREAM_ERROR;
428  }
429  func = configuration_table[s->level].func;
430 
431  if (func != configuration_table[level].func && strm->total_in != 0) {
432  /* Flush the last buffer: */
433  err = deflate(strm, Z_PARTIAL_FLUSH);
434  }
435  if (s->level != level) {
436  s->level = level;
437  s->max_lazy_match = configuration_table[level].max_lazy;
438  s->good_match = configuration_table[level].good_length;
439  s->nice_match = configuration_table[level].nice_length;
440  s->max_chain_length = configuration_table[level].max_chain;
441  }
442  s->strategy = strategy;
443  return err;
444 }
445 
446 /* =========================================================================
447  * For the default windowBits of 15 and memLevel of 8, this function returns
448  * a close to exact, as well as small, upper bound on the compressed size.
449  * They are coded as constants here for a reason--if the #define's are
450  * changed, then this function needs to be changed as well. The return
451  * value for 15 and 8 only works for those exact settings.
452  *
453  * For any setting other than those defaults for windowBits and memLevel,
454  * the value returned is a conservative worst case for the maximum expansion
455  * resulting from using fixed blocks instead of stored blocks, which deflate
456  * can emit on compressed data for some combinations of the parameters.
457  *
458  * This function could be more sophisticated to provide closer upper bounds
459  * for every combination of windowBits and memLevel, as well as wrap.
460  * But even the conservative upper bound of about 14% expansion does not
461  * seem onerous for output buffer allocation.
462  */
463 uLong ZEXPORT deflateBound(strm, sourceLen)
464  z_streamp strm;
465  uLong sourceLen;
466 {
467  deflate_state *s;
468  uLong destLen;
469 
470  /* conservative upper bound */
471  destLen = sourceLen +
472  ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 11;
473 
474  /* if can't get parameters, return conservative bound */
475  if (strm == Z_NULL || strm->state == Z_NULL)
476  return destLen;
477 
478  /* if not default parameters, return conservative bound */
479  s = strm->state;
480  if (s->w_bits != 15 || s->hash_bits != 8 + 7)
481  return destLen;
482 
483  /* default settings: return tight bound for that case */
484  return compressBound(sourceLen);
485 }
486 
487 /* =========================================================================
488  * Put a short in the pending buffer. The 16-bit value is put in MSB order.
489  * IN assertion: the stream state is correct and there is enough room in
490  * pending_buf.
491  */
492 local void putShortMSB (s, b)
493  deflate_state *s;
494  uInt b;
495 {
496  put_byte(s, (Byte)(b >> 8));
497  put_byte(s, (Byte)(b & 0xff));
498 }
499 
500 /* =========================================================================
501  * Flush as much pending output as possible. All deflate() output goes
502  * through this function so some applications may wish to modify it
503  * to avoid allocating a large strm->next_out buffer and copying into it.
504  * (See also read_buf()).
505  */
506 local void flush_pending(strm)
507  z_streamp strm;
508 {
509  unsigned len = strm->state->pending;
510 
511  if (len > strm->avail_out) len = strm->avail_out;
512  if (len == 0) return;
513 
514  zmemcpy(strm->next_out, strm->state->pending_out, len);
515  strm->next_out += len;
516  strm->state->pending_out += len;
517  strm->total_out += len;
518  strm->avail_out -= len;
519  strm->state->pending -= len;
520  if (strm->state->pending == 0) {
521  strm->state->pending_out = strm->state->pending_buf;
522  }
523 }
524 
525 /* ========================================================================= */
526 int ZEXPORT deflate (strm, flush)
527  z_streamp strm;
528  int flush;
529 {
530  int old_flush; /* value of flush param for previous deflate call */
531  deflate_state *s;
532 
533  if (strm == Z_NULL || strm->state == Z_NULL ||
534  flush > Z_FINISH || flush < 0) {
535  return Z_STREAM_ERROR;
536  }
537  s = strm->state;
538 
539  if (strm->next_out == Z_NULL ||
540  (strm->next_in == Z_NULL && strm->avail_in != 0) ||
541  (s->status == FINISH_STATE && flush != Z_FINISH)) {
542  ERR_RETURN(strm, Z_STREAM_ERROR);
543  }
544  if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
545 
546  s->strm = strm; /* just in case */
547  old_flush = s->last_flush;
548  s->last_flush = flush;
549 
550  /* Write the header */
551  if (s->status == INIT_STATE) {
552 #ifdef GZIP
553  if (s->wrap == 2) {
554  put_byte(s, 31);
555  put_byte(s, 139);
556  put_byte(s, 8);
557  put_byte(s, 0);
558  put_byte(s, 0);
559  put_byte(s, 0);
560  put_byte(s, 0);
561  put_byte(s, 0);
562  put_byte(s, s->level == 9 ? 2 :
563  (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
564  4 : 0));
565  put_byte(s, 255);
566  s->status = BUSY_STATE;
567  strm->adler = crc32(0L, Z_NULL, 0);
568  }
569  else
570 #endif
571  {
572  uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
573  uInt level_flags;
574 
575  if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
576  level_flags = 0;
577  else if (s->level < 6)
578  level_flags = 1;
579  else if (s->level == 6)
580  level_flags = 2;
581  else
582  level_flags = 3;
583  header |= (level_flags << 6);
584  if (s->strstart != 0) header |= PRESET_DICT;
585  header += 31 - (header % 31);
586 
587  s->status = BUSY_STATE;
588  putShortMSB(s, header);
589 
590  /* Save the adler32 of the preset dictionary: */
591  if (s->strstart != 0) {
592  putShortMSB(s, (uInt)(strm->adler >> 16));
593  putShortMSB(s, (uInt)(strm->adler & 0xffff));
594  }
595  strm->adler = adler32(0L, Z_NULL, 0);
596  }
597  }
598 
599  /* Flush as much pending output as possible */
600  if (s->pending != 0) {
601  flush_pending(strm);
602  if (strm->avail_out == 0) {
603  /* Since avail_out is 0, deflate will be called again with
604  * more output space, but possibly with both pending and
605  * avail_in equal to zero. There won't be anything to do,
606  * but this is not an error situation so make sure we
607  * return OK instead of BUF_ERROR at next call of deflate:
608  */
609  s->last_flush = -1;
610  return Z_OK;
611  }
612 
613  /* Make sure there is something to do and avoid duplicate consecutive
614  * flushes. For repeated and useless calls with Z_FINISH, we keep
615  * returning Z_STREAM_END instead of Z_BUF_ERROR.
616  */
617  } else if (strm->avail_in == 0 && flush <= old_flush &&
618  flush != Z_FINISH) {
619  ERR_RETURN(strm, Z_BUF_ERROR);
620  }
621 
622  /* User must not provide more input after the first FINISH: */
623  if (s->status == FINISH_STATE && strm->avail_in != 0) {
624  ERR_RETURN(strm, Z_BUF_ERROR);
625  }
626 
627  /* Start a new block or continue the current one.
628  */
629  if (strm->avail_in != 0 || s->lookahead != 0 ||
630  (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
631  block_state bstate;
632 
633  bstate = (*(configuration_table[s->level].func))(s, flush);
634 
635  if (bstate == finish_started || bstate == finish_done) {
636  s->status = FINISH_STATE;
637  }
638  if (bstate == need_more || bstate == finish_started) {
639  if (strm->avail_out == 0) {
640  s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
641  }
642  return Z_OK;
643  /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
644  * of deflate should use the same flush parameter to make sure
645  * that the flush is complete. So we don't have to output an
646  * empty block here, this will be done at next call. This also
647  * ensures that for a very small output buffer, we emit at most
648  * one empty block.
649  */
650  }
651  if (bstate == block_done) {
652  if (flush == Z_PARTIAL_FLUSH) {
653  _tr_align(s);
654  } else { /* FULL_FLUSH or SYNC_FLUSH */
655  _tr_stored_block(s, (char*)0, 0L, 0);
656  /* For a full flush, this empty block will be recognized
657  * as a special marker by inflate_sync().
658  */
659  if (flush == Z_FULL_FLUSH) {
660  CLEAR_HASH(s); /* forget history */
661  }
662  }
663  flush_pending(strm);
664  if (strm->avail_out == 0) {
665  s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
666  return Z_OK;
667  }
668  }
669  }
670  Assert(strm->avail_out > 0, "bug2");
671 
672  if (flush != Z_FINISH) return Z_OK;
673  if (s->wrap <= 0) return Z_STREAM_END;
674 
675  /* Write the trailer */
676 #ifdef GZIP
677  if (s->wrap == 2) {
678  put_byte(s, (Byte)(strm->adler & 0xff));
679  put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
680  put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
681  put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
682  put_byte(s, (Byte)(strm->total_in & 0xff));
683  put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
684  put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
685  put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
686  }
687  else
688 #endif
689  {
690  putShortMSB(s, (uInt)(strm->adler >> 16));
691  putShortMSB(s, (uInt)(strm->adler & 0xffff));
692  }
693  flush_pending(strm);
694  /* If avail_out is zero, the application will call deflate again
695  * to flush the rest.
696  */
697  if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
698  return s->pending != 0 ? Z_OK : Z_STREAM_END;
699 }
700 
701 /* ========================================================================= */
702 int ZEXPORT deflateEnd (strm)
703  z_streamp strm;
704 {
705  int status;
706 
707  if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
708 
709  status = strm->state->status;
710  if (status != INIT_STATE && status != BUSY_STATE &&
711  status != FINISH_STATE) {
712  return Z_STREAM_ERROR;
713  }
714 
715  /* Deallocate in reverse order of allocations: */
716  TRY_FREE(strm, strm->state->pending_buf);
717  TRY_FREE(strm, strm->state->head);
718  TRY_FREE(strm, strm->state->prev);
719  TRY_FREE(strm, strm->state->window);
720 
721  ZFREE(strm, strm->state);
722  strm->state = Z_NULL;
723 
724  return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
725 }
726 
727 /* =========================================================================
728  * Copy the source state to the destination state.
729  * To simplify the source, this is not supported for 16-bit MSDOS (which
730  * doesn't have enough memory anyway to duplicate compression states).
731  */
732 int ZEXPORT deflateCopy (dest, source)
733  z_streamp dest;
734  z_streamp source;
735 {
736 #ifdef MAXSEG_64K
737  return Z_STREAM_ERROR;
738 #else
739  deflate_state *ds;
740  deflate_state *ss;
741  ushf *overlay;
742 
743 
744  if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
745  return Z_STREAM_ERROR;
746  }
747 
748  ss = source->state;
749 
750  *dest = *source;
751 
752  ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
753  if (ds == Z_NULL) return Z_MEM_ERROR;
754  dest->state = (struct internal_state FAR *) ds;
755  *ds = *ss;
756  ds->strm = dest;
757 
758  ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
759  ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
760  ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
761  overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
762  ds->pending_buf = (uchf *) overlay;
763 
764  if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
765  ds->pending_buf == Z_NULL) {
766  deflateEnd (dest);
767  return Z_MEM_ERROR;
768  }
769  /* following zmemcpy do not work for 16-bit MSDOS */
770  zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
771  zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
772  zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
773  zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
774 
775  ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
776  ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
777  ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
778 
779  ds->l_desc.dyn_tree = ds->dyn_ltree;
780  ds->d_desc.dyn_tree = ds->dyn_dtree;
781  ds->bl_desc.dyn_tree = ds->bl_tree;
782 
783  return Z_OK;
784 #endif /* MAXSEG_64K */
785 }
786 
787 /* ===========================================================================
788  * Read a new buffer from the current input stream, update the adler32
789  * and total number of bytes read. All deflate() input goes through
790  * this function so some applications may wish to modify it to avoid
791  * allocating a large strm->next_in buffer and copying from it.
792  * (See also flush_pending()).
793  */
794 local int read_buf(strm, buf, size)
795  z_streamp strm;
796  Bytef *buf;
797  unsigned size;
798 {
799  unsigned len = strm->avail_in;
800 
801  if (len > size) len = size;
802  if (len == 0) return 0;
803 
804  strm->avail_in -= len;
805 
806  if (strm->state->wrap == 1) {
807  strm->adler = adler32(strm->adler, strm->next_in, len);
808  }
809 #ifdef GZIP
810  else if (strm->state->wrap == 2) {
811  strm->adler = crc32(strm->adler, strm->next_in, len);
812  }
813 #endif
814  zmemcpy(buf, strm->next_in, len);
815  strm->next_in += len;
816  strm->total_in += len;
817 
818  return (int)len;
819 }
820 
821 /* ===========================================================================
822  * Initialize the "longest match" routines for a new zlib stream
823  */
824 local void lm_init (s)
825  deflate_state *s;
826 {
827  s->window_size = (ulg)2L*s->w_size;
828 
829  CLEAR_HASH(s);
830 
831  /* Set the default configuration parameters:
832  */
833  s->max_lazy_match = configuration_table[s->level].max_lazy;
834  s->good_match = configuration_table[s->level].good_length;
835  s->nice_match = configuration_table[s->level].nice_length;
836  s->max_chain_length = configuration_table[s->level].max_chain;
837 
838  s->strstart = 0;
839  s->block_start = 0L;
840  s->lookahead = 0;
841  s->match_length = s->prev_length = MIN_MATCH-1;
842  s->match_available = 0;
843  s->ins_h = 0;
844 #ifdef ASMV
845  match_init(); /* initialize the asm code */
846 #endif
847 }
848 
849 #ifndef FASTEST
850 /* ===========================================================================
851  * Set match_start to the longest match starting at the given string and
852  * return its length. Matches shorter or equal to prev_length are discarded,
853  * in which case the result is equal to prev_length and match_start is
854  * garbage.
855  * IN assertions: cur_match is the head of the hash chain for the current
856  * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
857  * OUT assertion: the match length is not greater than s->lookahead.
858  */
859 #ifndef ASMV
860 /* For 80x86 and 680x0, an optimized version will be provided in match.asm or
861  * match.S. The code will be functionally equivalent.
862  */
863 local uInt longest_match(s, cur_match)
864  deflate_state *s;
865  IPos cur_match; /* current match */
866 {
867  unsigned chain_length = s->max_chain_length;/* max hash chain length */
868  register Bytef *scan = s->window + s->strstart; /* current string */
869  register Bytef *match; /* matched string */
870  register int len; /* length of current match */
871  int best_len = s->prev_length; /* best match length so far */
872  int nice_match = s->nice_match; /* stop if match long enough */
873  IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
874  s->strstart - (IPos)MAX_DIST(s) : NIL;
875  /* Stop when cur_match becomes <= limit. To simplify the code,
876  * we prevent matches with the string of window index 0.
877  */
878  Posf *prev = s->prev;
879  uInt wmask = s->w_mask;
880 
881 #ifdef UNALIGNED_OK
882  /* Compare two bytes at a time. Note: this is not always beneficial.
883  * Try with and without -DUNALIGNED_OK to check.
884  */
885  register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
886  register ush scan_start = *(ushf*)scan;
887  register ush scan_end = *(ushf*)(scan+best_len-1);
888 #else
889  register Bytef *strend = s->window + s->strstart + MAX_MATCH;
890  register Byte scan_end1 = scan[best_len-1];
891  register Byte scan_end = scan[best_len];
892 #endif
893 
894  /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
895  * It is easy to get rid of this optimization if necessary.
896  */
897  Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
898 
899  /* Do not waste too much time if we already have a good match: */
900  if (s->prev_length >= s->good_match) {
901  chain_length >>= 2;
902  }
903  /* Do not look for matches beyond the end of the input. This is necessary
904  * to make deflate deterministic.
905  */
906  if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
907 
908  Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
909 
910  do {
911  Assert(cur_match < s->strstart, "no future");
912  match = s->window + cur_match;
913 
914  /* Skip to next match if the match length cannot increase
915  * or if the match length is less than 2:
916  */
917 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
918  /* This code assumes sizeof(unsigned short) == 2. Do not use
919  * UNALIGNED_OK if your compiler uses a different size.
920  */
921  if (*(ushf*)(match+best_len-1) != scan_end ||
922  *(ushf*)match != scan_start) continue;
923 
924  /* It is not necessary to compare scan[2] and match[2] since they are
925  * always equal when the other bytes match, given that the hash keys
926  * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
927  * strstart+3, +5, ... up to strstart+257. We check for insufficient
928  * lookahead only every 4th comparison; the 128th check will be made
929  * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
930  * necessary to put more guard bytes at the end of the window, or
931  * to check more often for insufficient lookahead.
932  */
933  Assert(scan[2] == match[2], "scan[2]?");
934  scan++, match++;
935  do {
936  } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
937  *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
938  *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
939  *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
940  scan < strend);
941  /* The funny "do {}" generates better code on most compilers */
942 
943  /* Here, scan <= window+strstart+257 */
944  Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
945  if (*scan == *match) scan++;
946 
947  len = (MAX_MATCH - 1) - (int)(strend-scan);
948  scan = strend - (MAX_MATCH-1);
949 
950 #else /* UNALIGNED_OK */
951 
952  if (match[best_len] != scan_end ||
953  match[best_len-1] != scan_end1 ||
954  *match != *scan ||
955  *++match != scan[1]) continue;
956 
957  /* The check at best_len-1 can be removed because it will be made
958  * again later. (This heuristic is not always a win.)
959  * It is not necessary to compare scan[2] and match[2] since they
960  * are always equal when the other bytes match, given that
961  * the hash keys are equal and that HASH_BITS >= 8.
962  */
963  scan += 2, match++;
964  Assert(*scan == *match, "match[2]?");
965 
966  /* We check for insufficient lookahead only every 8th comparison;
967  * the 256th check will be made at strstart+258.
968  */
969  do {
970  } while (*++scan == *++match && *++scan == *++match &&
971  *++scan == *++match && *++scan == *++match &&
972  *++scan == *++match && *++scan == *++match &&
973  *++scan == *++match && *++scan == *++match &&
974  scan < strend);
975 
976  Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
977 
978  len = MAX_MATCH - (int)(strend - scan);
979  scan = strend - MAX_MATCH;
980 
981 #endif /* UNALIGNED_OK */
982 
983  if (len > best_len) {
984  s->match_start = cur_match;
985  best_len = len;
986  if (len >= nice_match) break;
987 #ifdef UNALIGNED_OK
988  scan_end = *(ushf*)(scan+best_len-1);
989 #else
990  scan_end1 = scan[best_len-1];
991  scan_end = scan[best_len];
992 #endif
993  }
994  } while ((cur_match = prev[cur_match & wmask]) > limit
995  && --chain_length != 0);
996 
997  if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
998  return s->lookahead;
999 }
1000 #endif /* ASMV */
1001 #endif /* FASTEST */
1002 
1003 /* ---------------------------------------------------------------------------
1004  * Optimized version for level == 1 or strategy == Z_RLE only
1005  */
1006 local uInt longest_match_fast(s, cur_match)
1007  deflate_state *s;
1008  IPos cur_match; /* current match */
1009 {
1010  register Bytef *scan = s->window + s->strstart; /* current string */
1011  register Bytef *match; /* matched string */
1012  register int len; /* length of current match */
1013  register Bytef *strend = s->window + s->strstart + MAX_MATCH;
1014 
1015  /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
1016  * It is easy to get rid of this optimization if necessary.
1017  */
1018  Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
1019 
1020  Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
1021 
1022  Assert(cur_match < s->strstart, "no future");
1023 
1024  match = s->window + cur_match;
1025 
1026  /* Return failure if the match length is less than 2:
1027  */
1028  if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
1029 
1030  /* The check at best_len-1 can be removed because it will be made
1031  * again later. (This heuristic is not always a win.)
1032  * It is not necessary to compare scan[2] and match[2] since they
1033  * are always equal when the other bytes match, given that
1034  * the hash keys are equal and that HASH_BITS >= 8.
1035  */
1036  scan += 2, match += 2;
1037  Assert(*scan == *match, "match[2]?");
1038 
1039  /* We check for insufficient lookahead only every 8th comparison;
1040  * the 256th check will be made at strstart+258.
1041  */
1042  do {
1043  } while (*++scan == *++match && *++scan == *++match &&
1044  *++scan == *++match && *++scan == *++match &&
1045  *++scan == *++match && *++scan == *++match &&
1046  *++scan == *++match && *++scan == *++match &&
1047  scan < strend);
1048 
1049  Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
1050 
1051  len = MAX_MATCH - (int)(strend - scan);
1052 
1053  if (len < MIN_MATCH) return MIN_MATCH - 1;
1054 
1055  s->match_start = cur_match;
1056  return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
1057 }
1058 
1059 #ifdef DEBUG
1060 /* ===========================================================================
1061  * Check that the match at match_start is indeed a match.
1062  */
1063 local void check_match(s, start, match, length)
1064  deflate_state *s;
1065  IPos start, match;
1066  int length;
1067 {
1068  /* check that the match is indeed a match */
1069  if (zmemcmp(s->window + match,
1070  s->window + start, length) != EQUAL) {
1071  fprintf(stderr, " start %u, match %u, length %d\n",
1072  start, match, length);
1073  do {
1074  fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
1075  } while (--length != 0);
1076  z_error("invalid match");
1077  }
1078  if (z_verbose > 1) {
1079  fprintf(stderr,"\\[%d,%d]", start-match, length);
1080  do { putc(s->window[start++], stderr); } while (--length != 0);
1081  }
1082 }
1083 #else
1084 # define check_match(s, start, match, length)
1085 #endif /* DEBUG */
1086 
1087 /* ===========================================================================
1088  * Fill the window when the lookahead becomes insufficient.
1089  * Updates strstart and lookahead.
1090  *
1091  * IN assertion: lookahead < MIN_LOOKAHEAD
1092  * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
1093  * At least one byte has been read, or avail_in == 0; reads are
1094  * performed for at least two bytes (required for the zip translate_eol
1095  * option -- not supported here).
1096  */
1097 local void fill_window(s)
1098  deflate_state *s;
1099 {
1100  register unsigned n, m;
1101  register Posf *p;
1102  unsigned more; /* Amount of free space at the end of the window. */
1103  uInt wsize = s->w_size;
1104 
1105  do {
1106  more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
1107 
1108 #if (DOSX == 0)
1109  /* Deal with !@#$% 64K limit: */
1110  if (sizeof(int) <= 2) {
1111  if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
1112  more = wsize;
1113 
1114  } else if (more == (unsigned)(-1)) {
1115  /* Very unlikely, but possible on 16 bit machine if
1116  * strstart == 0 && lookahead == 1 (input done a byte at time)
1117  */
1118  more--;
1119  }
1120  }
1121 #endif
1122 
1123  /* If the window is almost full and there is insufficient lookahead,
1124  * move the upper half to the lower one to make room in the upper half.
1125  */
1126  if (s->strstart >= wsize+MAX_DIST(s)) {
1127 
1128  zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
1129  s->match_start -= wsize;
1130  s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
1131  s->block_start -= (long) wsize;
1132 
1133  /* Slide the hash table (could be avoided with 32 bit values
1134  at the expense of memory usage). We slide even when level == 0
1135  to keep the hash table consistent if we switch back to level > 0
1136  later. (Using level 0 permanently is not an optimal usage of
1137  zlib, so we don't care about this pathological case.)
1138  */
1139  n = s->hash_size;
1140  p = &s->head[n];
1141  do {
1142  m = *--p;
1143  *p = (Pos)(m >= wsize ? m-wsize : NIL);
1144  } while (--n);
1145 
1146  n = wsize;
1147 #ifndef FASTEST
1148  p = &s->prev[n];
1149  do {
1150  m = *--p;
1151  *p = (Pos)(m >= wsize ? m-wsize : NIL);
1152  /* If n is not on any hash chain, prev[n] is garbage but
1153  * its value will never be used.
1154  */
1155  } while (--n);
1156 #endif
1157  more += wsize;
1158  }
1159  if (s->strm->avail_in == 0) return;
1160 
1161  /* If there was no sliding:
1162  * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1163  * more == window_size - lookahead - strstart
1164  * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1165  * => more >= window_size - 2*WSIZE + 2
1166  * In the BIG_MEM or MMAP case (not yet supported),
1167  * window_size == input_size + MIN_LOOKAHEAD &&
1168  * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1169  * Otherwise, window_size == 2*WSIZE so more >= 2.
1170  * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1171  */
1172  Assert(more >= 2, "more < 2");
1173 
1174  n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
1175  s->lookahead += n;
1176 
1177  /* Initialize the hash value now that we have some input: */
1178  if (s->lookahead >= MIN_MATCH) {
1179  s->ins_h = s->window[s->strstart];
1180  UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1181 #if MIN_MATCH != 3
1182  Call UPDATE_HASH() MIN_MATCH-3 more times
1183 #endif
1184  }
1185  /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1186  * but this is not important since only literal bytes will be emitted.
1187  */
1188 
1189  } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1190 }
1191 
1192 /* ===========================================================================
1193  * Flush the current block, with given end-of-file flag.
1194  * IN assertion: strstart is set to the end of the current match.
1195  */
1196 #define FLUSH_BLOCK_ONLY(s, eof) { \
1197  _tr_flush_block(s, (s->block_start >= 0L ? \
1198  (charf *)&s->window[(unsigned)s->block_start] : \
1199  (charf *)Z_NULL), \
1200  (ulg)((long)s->strstart - s->block_start), \
1201  (eof)); \
1202  s->block_start = s->strstart; \
1203  flush_pending(s->strm); \
1204  Tracev((stderr,"[FLUSH]")); \
1205 }
1206 
1207 /* Same but force premature exit if necessary. */
1208 #define FLUSH_BLOCK(s, eof) { \
1209  FLUSH_BLOCK_ONLY(s, eof); \
1210  if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
1211 }
1212 
1213 /* ===========================================================================
1214  * Copy without compression as much as possible from the input stream, return
1215  * the current block state.
1216  * This function does not insert new strings in the dictionary since
1217  * uncompressible data is probably not useful. This function is used
1218  * only for the level=0 compression option.
1219  * NOTE: this function should be optimized to avoid extra copying from
1220  * window to pending_buf.
1221  */
1222 local block_state deflate_stored(s, flush)
1223  deflate_state *s;
1224  int flush;
1225 {
1226  /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
1227  * to pending_buf_size, and each stored block has a 5 byte header:
1228  */
1229  ulg max_block_size = 0xffff;
1230  ulg max_start;
1231 
1232  if (max_block_size > s->pending_buf_size - 5) {
1233  max_block_size = s->pending_buf_size - 5;
1234  }
1235 
1236  /* Copy as much as possible from input to output: */
1237  for (;;) {
1238  /* Fill the window as much as possible: */
1239  if (s->lookahead <= 1) {
1240 
1241  Assert(s->strstart < s->w_size+MAX_DIST(s) ||
1242  s->block_start >= (long)s->w_size, "slide too late");
1243 
1244  fill_window(s);
1245  if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
1246 
1247  if (s->lookahead == 0) break; /* flush the current block */
1248  }
1249  Assert(s->block_start >= 0L, "block gone");
1250 
1251  s->strstart += s->lookahead;
1252  s->lookahead = 0;
1253 
1254  /* Emit a stored block if pending_buf will be full: */
1255  max_start = s->block_start + max_block_size;
1256  if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
1257  /* strstart == 0 is possible when wraparound on 16-bit machine */
1258  s->lookahead = (uInt)(s->strstart - max_start);
1259  s->strstart = (uInt)max_start;
1260  FLUSH_BLOCK(s, 0);
1261  }
1262  /* Flush if we may have to slide, otherwise block_start may become
1263  * negative and the data will be gone:
1264  */
1265  if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
1266  FLUSH_BLOCK(s, 0);
1267  }
1268  }
1269  FLUSH_BLOCK(s, flush == Z_FINISH);
1270  return flush == Z_FINISH ? finish_done : block_done;
1271 }
1272 
1273 /* ===========================================================================
1274  * Compress as much as possible from the input stream, return the current
1275  * block state.
1276  * This function does not perform lazy evaluation of matches and inserts
1277  * new strings in the dictionary only for unmatched strings or for short
1278  * matches. It is used only for the fast compression options.
1279  */
1280 local block_state deflate_fast(s, flush)
1281  deflate_state *s;
1282  int flush;
1283 {
1284  IPos hash_head = NIL; /* head of the hash chain */
1285  int bflush; /* set if current block must be flushed */
1286 
1287  for (;;) {
1288  /* Make sure that we always have enough lookahead, except
1289  * at the end of the input file. We need MAX_MATCH bytes
1290  * for the next match, plus MIN_MATCH bytes to insert the
1291  * string following the next match.
1292  */
1293  if (s->lookahead < MIN_LOOKAHEAD) {
1294  fill_window(s);
1295  if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1296  return need_more;
1297  }
1298  if (s->lookahead == 0) break; /* flush the current block */
1299  }
1300 
1301  /* Insert the string window[strstart .. strstart+2] in the
1302  * dictionary, and set hash_head to the head of the hash chain:
1303  */
1304  if (s->lookahead >= MIN_MATCH) {
1305  INSERT_STRING(s, s->strstart, hash_head);
1306  }
1307 
1308  /* Find the longest match, discarding those <= prev_length.
1309  * At this point we have always match_length < MIN_MATCH
1310  */
1311  if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1312  /* To simplify the code, we prevent matches with the string
1313  * of window index 0 (in particular we have to avoid a match
1314  * of the string with itself at the start of the input file).
1315  */
1316 #ifdef FASTEST
1317  if ((s->strategy < Z_HUFFMAN_ONLY) ||
1318  (s->strategy == Z_RLE && s->strstart - hash_head == 1)) {
1319  s->match_length = longest_match_fast (s, hash_head);
1320  }
1321 #else
1322  if (s->strategy < Z_HUFFMAN_ONLY) {
1323  s->match_length = longest_match (s, hash_head);
1324  } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) {
1325  s->match_length = longest_match_fast (s, hash_head);
1326  }
1327 #endif
1328  /* longest_match() or longest_match_fast() sets match_start */
1329  }
1330  if (s->match_length >= MIN_MATCH) {
1331  check_match(s, s->strstart, s->match_start, s->match_length);
1332 
1333  _tr_tally_dist(s, s->strstart - s->match_start,
1334  s->match_length - MIN_MATCH, bflush);
1335 
1336  s->lookahead -= s->match_length;
1337 
1338  /* Insert new strings in the hash table only if the match length
1339  * is not too large. This saves time but degrades compression.
1340  */
1341 #ifndef FASTEST
1342  if (s->match_length <= s->max_insert_length &&
1343  s->lookahead >= MIN_MATCH) {
1344  s->match_length--; /* string at strstart already in table */
1345  do {
1346  s->strstart++;
1347  INSERT_STRING(s, s->strstart, hash_head);
1348  /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1349  * always MIN_MATCH bytes ahead.
1350  */
1351  } while (--s->match_length != 0);
1352  s->strstart++;
1353  } else
1354 #endif
1355  {
1356  s->strstart += s->match_length;
1357  s->match_length = 0;
1358  s->ins_h = s->window[s->strstart];
1359  UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1360 #if MIN_MATCH != 3
1361  Call UPDATE_HASH() MIN_MATCH-3 more times
1362 #endif
1363  /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1364  * matter since it will be recomputed at next deflate call.
1365  */
1366  }
1367  } else {
1368  /* No match, output a literal byte */
1369  Tracevv((stderr,"%c", s->window[s->strstart]));
1370  _tr_tally_lit (s, s->window[s->strstart], bflush);
1371  s->lookahead--;
1372  s->strstart++;
1373  }
1374  if (bflush) FLUSH_BLOCK(s, 0);
1375  }
1376  FLUSH_BLOCK(s, flush == Z_FINISH);
1377  return flush == Z_FINISH ? finish_done : block_done;
1378 }
1379 
1380 #ifndef FASTEST
1381 /* ===========================================================================
1382  * Same as above, but achieves better compression. We use a lazy
1383  * evaluation for matches: a match is finally adopted only if there is
1384  * no better match at the next window position.
1385  */
1386 local block_state deflate_slow(s, flush)
1387  deflate_state *s;
1388  int flush;
1389 {
1390  IPos hash_head = NIL; /* head of hash chain */
1391  int bflush; /* set if current block must be flushed */
1392 
1393  /* Process the input block. */
1394  for (;;) {
1395  /* Make sure that we always have enough lookahead, except
1396  * at the end of the input file. We need MAX_MATCH bytes
1397  * for the next match, plus MIN_MATCH bytes to insert the
1398  * string following the next match.
1399  */
1400  if (s->lookahead < MIN_LOOKAHEAD) {
1401  fill_window(s);
1402  if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1403  return need_more;
1404  }
1405  if (s->lookahead == 0) break; /* flush the current block */
1406  }
1407 
1408  /* Insert the string window[strstart .. strstart+2] in the
1409  * dictionary, and set hash_head to the head of the hash chain:
1410  */
1411  if (s->lookahead >= MIN_MATCH) {
1412  INSERT_STRING(s, s->strstart, hash_head);
1413  }
1414 
1415  /* Find the longest match, discarding those <= prev_length.
1416  */
1417  s->prev_length = s->match_length, s->prev_match = s->match_start;
1418  s->match_length = MIN_MATCH-1;
1419 
1420  if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1421  s->strstart - hash_head <= MAX_DIST(s)) {
1422  /* To simplify the code, we prevent matches with the string
1423  * of window index 0 (in particular we have to avoid a match
1424  * of the string with itself at the start of the input file).
1425  */
1426  if (s->strategy < Z_HUFFMAN_ONLY) {
1427  s->match_length = longest_match (s, hash_head);
1428  } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) {
1429  s->match_length = longest_match_fast (s, hash_head);
1430  }
1431  /* longest_match() or longest_match_fast() sets match_start */
1432 
1433  if (s->match_length <= 5 && (s->strategy == Z_FILTERED
1434 #if TOO_FAR <= 32767
1435  || (s->match_length == MIN_MATCH &&
1436  s->strstart - s->match_start > TOO_FAR)
1437 #endif
1438  )) {
1439 
1440  /* If prev_match is also MIN_MATCH, match_start is garbage
1441  * but we will ignore the current match anyway.
1442  */
1443  s->match_length = MIN_MATCH-1;
1444  }
1445  }
1446  /* If there was a match at the previous step and the current
1447  * match is not better, output the previous match:
1448  */
1449  if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1450  uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1451  /* Do not insert strings in hash table beyond this. */
1452 
1453  check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1454 
1455  _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1456  s->prev_length - MIN_MATCH, bflush);
1457 
1458  /* Insert in hash table all strings up to the end of the match.
1459  * strstart-1 and strstart are already inserted. If there is not
1460  * enough lookahead, the last two strings are not inserted in
1461  * the hash table.
1462  */
1463  s->lookahead -= s->prev_length-1;
1464  s->prev_length -= 2;
1465  do {
1466  if (++s->strstart <= max_insert) {
1467  INSERT_STRING(s, s->strstart, hash_head);
1468  }
1469  } while (--s->prev_length != 0);
1470  s->match_available = 0;
1471  s->match_length = MIN_MATCH-1;
1472  s->strstart++;
1473 
1474  if (bflush) FLUSH_BLOCK(s, 0);
1475 
1476  } else if (s->match_available) {
1477  /* If there was no match at the previous position, output a
1478  * single literal. If there was a match but the current match
1479  * is longer, truncate the previous match to a single literal.
1480  */
1481  Tracevv((stderr,"%c", s->window[s->strstart-1]));
1482  _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1483  if (bflush) {
1484  FLUSH_BLOCK_ONLY(s, 0);
1485  }
1486  s->strstart++;
1487  s->lookahead--;
1488  if (s->strm->avail_out == 0) return need_more;
1489  } else {
1490  /* There is no previous match to compare with, wait for
1491  * the next step to decide.
1492  */
1493  s->match_available = 1;
1494  s->strstart++;
1495  s->lookahead--;
1496  }
1497  }
1498  Assert (flush != Z_NO_FLUSH, "no flush?");
1499  if (s->match_available) {
1500  Tracevv((stderr,"%c", s->window[s->strstart-1]));
1501  _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1502  s->match_available = 0;
1503  }
1504  FLUSH_BLOCK(s, flush == Z_FINISH);
1505  return flush == Z_FINISH ? finish_done : block_done;
1506 }
1507 #endif /* FASTEST */
1508 #endif /* USE_GZIP */
1509 
Core definitions.