2 * Zlib (RFC1950 / RFC1951) compression for PuTTY.
4 * There will no doubt be criticism of my decision to reimplement
5 * Zlib compression from scratch instead of using the existing zlib
6 * code. People will cry `reinventing the wheel'; they'll claim
7 * that the `fundamental basis of OSS' is code reuse; they'll want
8 * to see a really good reason for me having chosen not to use the
11 * Well, here are my reasons. Firstly, I don't want to link the
12 * whole of zlib into the PuTTY binary; PuTTY is justifiably proud
13 * of its small size and I think zlib contains a lot of unnecessary
14 * baggage for the kind of compression that SSH requires.
16 * Secondly, I also don't like the alternative of using zlib.dll.
17 * Another thing PuTTY is justifiably proud of is its ease of
18 * installation, and the last thing I want to do is to start
19 * mandating DLLs. Not only that, but there are two _kinds_ of
20 * zlib.dll kicking around, one with C calling conventions on the
21 * exported functions and another with WINAPI conventions, and
22 * there would be a significant danger of getting the wrong one.
24 * Thirdly, there seems to be a difference of opinion on the IETF
25 * secsh mailing list about the correct way to round off a
26 * compressed packet and start the next. In particular, there's
27 * some talk of switching to a mechanism zlib isn't currently
28 * capable of supporting (see below for an explanation). Given that
29 * sort of uncertainty, I thought it might be better to have code
30 * that will support even the zlib-incompatible worst case.
32 * Fourthly, it's a _second implementation_. Second implementations
33 * are fundamentally a Good Thing in standardisation efforts. The
34 * difference of opinion mentioned above has arisen _precisely_
35 * because there has been only one zlib implementation and
36 * everybody has used it. I don't intend that this should happen
50 /* ----------------------------------------------------------------------
51 * Basic LZ77 code. This bit is designed modularly, so it could be
52 * ripped out and used in a different LZ77 compressor. Go to it,
56 struct LZ77InternalContext;
58 struct LZ77InternalContext *ictx;
60 void (*literal) (struct LZ77Context * ctx, unsigned char c);
61 void (*match) (struct LZ77Context * ctx, int distance, int len);
65 * Initialise the private fields of an LZ77Context. It's up to the
66 * user to initialise the public fields.
68 static int lz77_init(struct LZ77Context *ctx);
71 * Supply data to be compressed. Will update the private fields of
72 * the LZ77Context, and will call literal() and match() to output.
73 * If `compress' is FALSE, it will never emit a match, but will
74 * instead call literal() for everything.
76 static void lz77_compress(struct LZ77Context *ctx,
77 unsigned char *data, int len, int compress);
80 * Modifiable parameters.
82 #define WINSIZE 32768 /* window size. Must be power of 2! */
83 #define HASHMAX 2039 /* one more than max hash value */
84 #define MAXMATCH 32 /* how many matches we track */
85 #define HASHCHARS 3 /* how many chars make a hash */
88 * This compressor takes a less slapdash approach than the
89 * gzip/zlib one. Rather than allowing our hash chains to fall into
90 * disuse near the far end, we keep them doubly linked so we can
91 * _find_ the far end, and then every time we add a new byte to the
92 * window (thus rolling round by one and removing the previous
93 * byte), we can carefully remove the hash chain entry.
96 #define INVALID -1 /* invalid hash _and_ invalid offset */
98 short next, prev; /* array indices within the window */
103 short first; /* window index of first in chain */
110 struct LZ77InternalContext {
111 struct WindowEntry win[WINSIZE];
112 unsigned char data[WINSIZE];
114 struct HashEntry hashtab[HASHMAX];
115 unsigned char pending[HASHCHARS];
119 static int lz77_hash(unsigned char *data)
121 return (257 * data[0] + 263 * data[1] + 269 * data[2]) % HASHMAX;
124 static int lz77_init(struct LZ77Context *ctx)
126 struct LZ77InternalContext *st;
129 st = (struct LZ77InternalContext *) smalloc(sizeof(*st));
135 for (i = 0; i < WINSIZE; i++)
136 st->win[i].next = st->win[i].prev = st->win[i].hashval = INVALID;
137 for (i = 0; i < HASHMAX; i++)
138 st->hashtab[i].first = INVALID;
146 static void lz77_advance(struct LZ77InternalContext *st,
147 unsigned char c, int hash)
152 * Remove the hash entry at winpos from the tail of its chain,
153 * or empty the chain if it's the only thing on the chain.
155 if (st->win[st->winpos].prev != INVALID) {
156 st->win[st->win[st->winpos].prev].next = INVALID;
157 } else if (st->win[st->winpos].hashval != INVALID) {
158 st->hashtab[st->win[st->winpos].hashval].first = INVALID;
162 * Create a new entry at winpos and add it to the head of its
165 st->win[st->winpos].hashval = hash;
166 st->win[st->winpos].prev = INVALID;
167 off = st->win[st->winpos].next = st->hashtab[hash].first;
168 st->hashtab[hash].first = st->winpos;
170 st->win[off].prev = st->winpos;
171 st->data[st->winpos] = c;
174 * Advance the window pointer.
176 st->winpos = (st->winpos + 1) & (WINSIZE - 1);
179 #define CHARAT(k) ( (k)<0 ? st->data[(st->winpos+k)&(WINSIZE-1)] : data[k] )
181 static void lz77_compress(struct LZ77Context *ctx,
182 unsigned char *data, int len, int compress)
184 struct LZ77InternalContext *st = ctx->ictx;
185 int i, hash, distance, off, nmatch, matchlen, advance;
186 struct Match defermatch, matches[MAXMATCH];
190 * Add any pending characters from last time to the window. (We
191 * might not be able to.)
193 for (i = 0; i < st->npending; i++) {
194 unsigned char foo[HASHCHARS];
196 if (len + st->npending - i < HASHCHARS) {
197 /* Update the pending array. */
198 for (j = i; j < st->npending; j++)
199 st->pending[j - i] = st->pending[j];
202 for (j = 0; j < HASHCHARS; j++)
203 foo[j] = (i + j < st->npending ? st->pending[i + j] :
204 data[i + j - st->npending]);
205 lz77_advance(st, foo[0], lz77_hash(foo));
213 /* Don't even look for a match, if we're not compressing. */
214 if (compress && len >= HASHCHARS) {
216 * Hash the next few characters.
218 hash = lz77_hash(data);
221 * Look the hash up in the corresponding hash chain and see
225 for (off = st->hashtab[hash].first;
226 off != INVALID; off = st->win[off].next) {
227 /* distance = 1 if off == st->winpos-1 */
228 /* distance = WINSIZE if off == st->winpos */
230 WINSIZE - (off + WINSIZE - st->winpos) % WINSIZE;
231 for (i = 0; i < HASHCHARS; i++)
232 if (CHARAT(i) != CHARAT(i - distance))
234 if (i == HASHCHARS) {
235 matches[nmatch].distance = distance;
236 matches[nmatch].len = 3;
237 if (++nmatch >= MAXMATCH)
248 * We've now filled up matches[] with nmatch potential
249 * matches. Follow them down to find the longest. (We
250 * assume here that it's always worth favouring a
251 * longer match over a shorter one.)
253 matchlen = HASHCHARS;
254 while (matchlen < len) {
256 for (i = j = 0; i < nmatch; i++) {
257 if (CHARAT(matchlen) ==
258 CHARAT(matchlen - matches[i].distance)) {
259 matches[j++] = matches[i];
269 * We've now got all the longest matches. We favour the
270 * shorter distances, which means we go with matches[0].
271 * So see if we want to defer it or throw it away.
273 matches[0].len = matchlen;
274 if (defermatch.len > 0) {
275 if (matches[0].len > defermatch.len + 1) {
276 /* We have a better match. Emit the deferred char,
277 * and defer this match. */
278 ctx->literal(ctx, (unsigned char) deferchr);
279 defermatch = matches[0];
283 /* We don't have a better match. Do the deferred one. */
284 ctx->match(ctx, defermatch.distance, defermatch.len);
285 advance = defermatch.len - 1;
289 /* There was no deferred match. Defer this one. */
290 defermatch = matches[0];
296 * We found no matches. Emit the deferred match, if
297 * any; otherwise emit a literal.
299 if (defermatch.len > 0) {
300 ctx->match(ctx, defermatch.distance, defermatch.len);
301 advance = defermatch.len - 1;
304 ctx->literal(ctx, data[0]);
310 * Now advance the position by `advance' characters,
311 * keeping the window and hash chains consistent.
313 while (advance > 0) {
314 if (len >= HASHCHARS) {
315 lz77_advance(st, *data, lz77_hash(data));
317 st->pending[st->npending++] = *data;
326 /* ----------------------------------------------------------------------
327 * Zlib compression. We always use the static Huffman tree option.
328 * Mostly this is because it's hard to scan a block in advance to
329 * work out better trees; dynamic trees are great when you're
330 * compressing a large file under no significant time constraint,
331 * but when you're compressing little bits in real time, things get
334 * I suppose it's possible that I could compute Huffman trees based
335 * on the frequencies in the _previous_ block, as a sort of
336 * heuristic, but I'm not confident that the gain would balance out
337 * having to transmit the trees.
340 static struct LZ77Context ectx;
343 unsigned char *outbuf;
345 unsigned long outbits;
351 static void outbits(struct Outbuf *out, unsigned long bits, int nbits)
353 assert(out->noutbits + nbits <= 32);
354 out->outbits |= bits << out->noutbits;
355 out->noutbits += nbits;
356 while (out->noutbits >= 8) {
357 if (out->outlen >= out->outsize) {
358 out->outsize = out->outlen + 64;
359 out->outbuf = srealloc(out->outbuf, out->outsize);
361 out->outbuf[out->outlen++] = (unsigned char) (out->outbits & 0xFF);
367 static const unsigned char mirrorbytes[256] = {
368 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
369 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
370 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
371 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
372 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
373 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
374 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
375 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
376 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
377 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
378 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
379 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
380 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
381 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
382 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
383 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
384 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
385 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
386 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
387 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
388 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
389 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
390 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
391 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
392 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
393 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
394 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
395 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
396 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
397 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
398 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
399 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
403 short code, extrabits;
407 static const coderecord lencodes[] = {
439 static const coderecord distcodes[] = {
462 {22, 10, 2049, 3072},
463 {23, 10, 3073, 4096},
464 {24, 11, 4097, 6144},
465 {25, 11, 6145, 8192},
466 {26, 12, 8193, 12288},
467 {27, 12, 12289, 16384},
468 {28, 13, 16385, 24576},
469 {29, 13, 24577, 32768},
472 static void zlib_literal(struct LZ77Context *ectx, unsigned char c)
474 struct Outbuf *out = (struct Outbuf *) ectx->userdata;
476 if (out->comp_disabled) {
478 * We're in an uncompressed block, so just output the byte.
485 /* 0 through 143 are 8 bits long starting at 00110000. */
486 outbits(out, mirrorbytes[0x30 + c], 8);
488 /* 144 through 255 are 9 bits long starting at 110010000. */
489 outbits(out, 1 + 2 * mirrorbytes[0x90 - 144 + c], 9);
493 static void zlib_match(struct LZ77Context *ectx, int distance, int len)
495 const coderecord *d, *l;
497 struct Outbuf *out = (struct Outbuf *) ectx->userdata;
499 assert(!out->comp_disabled);
505 * We can transmit matches of lengths 3 through 258
506 * inclusive. So if len exceeds 258, we must transmit in
507 * several steps, with 258 or less in each step.
509 * Specifically: if len >= 261, we can transmit 258 and be
510 * sure of having at least 3 left for the next step. And if
511 * len <= 258, we can just transmit len. But if len == 259
512 * or 260, we must transmit len-3.
514 thislen = (len > 260 ? 258 : len <= 258 ? len : len - 3);
518 * Binary-search to find which length code we're
522 j = sizeof(lencodes) / sizeof(*lencodes);
526 if (thislen < lencodes[k].min)
528 else if (thislen > lencodes[k].max)
532 break; /* found it! */
537 * Transmit the length code. 256-279 are seven bits
538 * starting at 0000000; 280-287 are eight bits starting at
541 if (l->code <= 279) {
542 outbits(out, mirrorbytes[(l->code - 256) * 2], 7);
544 outbits(out, mirrorbytes[0xc0 - 280 + l->code], 8);
548 * Transmit the extra bits.
551 outbits(out, thislen - l->min, l->extrabits);
554 * Binary-search to find which distance code we're
558 j = sizeof(distcodes) / sizeof(*distcodes);
562 if (distance < distcodes[k].min)
564 else if (distance > distcodes[k].max)
568 break; /* found it! */
573 * Transmit the distance code. Five bits starting at 00000.
575 outbits(out, mirrorbytes[d->code * 8], 5);
578 * Transmit the extra bits.
581 outbits(out, distance - d->min, d->extrabits);
585 void zlib_compress_init(void)
590 ectx.literal = zlib_literal;
591 ectx.match = zlib_match;
593 out = smalloc(sizeof(struct Outbuf));
594 out->outbits = out->noutbits = 0;
596 out->comp_disabled = FALSE;
599 logevent("Initialised zlib (RFC1950) compression");
603 * Turn off actual LZ77 analysis for one block, to facilitate
604 * construction of a precise-length IGNORE packet. Returns the
605 * length adjustment (which is only valid for packets < 65536
606 * bytes, but that seems reasonable enough).
608 int zlib_disable_compression(void)
610 struct Outbuf *out = (struct Outbuf *) ectx.userdata;
613 out->comp_disabled = TRUE;
617 * If this is the first block, we will start by outputting two
618 * header bytes, and then three bits to begin an uncompressed
619 * block. This will cost three bytes (because we will start on
620 * a byte boundary, this is certain).
622 if (out->firstblock) {
626 * Otherwise, we will output seven bits to close the
627 * previous static block, and _then_ three bits to begin an
628 * uncompressed block, and then flush the current byte.
629 * This may cost two bytes or three, depending on noutbits.
631 n += (out->noutbits + 10) / 8;
635 * Now we output four bytes for the length / ~length pair in
636 * the uncompressed block.
643 int zlib_compress_block(unsigned char *block, int len,
644 unsigned char **outblock, int *outlen)
646 struct Outbuf *out = (struct Outbuf *) ectx.userdata;
650 out->outlen = out->outsize = 0;
653 * If this is the first block, output the Zlib (RFC1950) header
654 * bytes 78 9C. (Deflate compression, 32K window size, default
657 if (out->firstblock) {
658 outbits(out, 0x9C78, 16);
665 if (out->comp_disabled) {
667 outbits(out, 0, 7); /* close static block */
670 int blen = (len < 65535 ? len : 65535);
673 * Start a Deflate (RFC1951) uncompressed block. We
674 * transmit a zero bit (BFINAL=0), followed by a zero
675 * bit and a one bit (BTYPE=00). Of course these are in
676 * the wrong order (00 0).
681 * Output zero bits to align to a byte boundary.
684 outbits(out, 0, 8 - out->noutbits);
687 * Output the block length, and then its one's
688 * complement. They're little-endian, so all we need to
689 * do is pass them straight to outbits() with bit count
692 outbits(out, blen, 16);
693 outbits(out, blen ^ 0xFFFF, 16);
696 * Do the `compression': we need to pass the data to
697 * lz77_compress so that it will be taken into account
698 * for subsequent (distance,length) pairs. But
699 * lz77_compress is passed FALSE, which means it won't
700 * actually find (or even look for) any matches; so
701 * every character will be passed straight to
702 * zlib_literal which will spot out->comp_disabled and
703 * emit in the uncompressed format.
705 lz77_compress(&ectx, block, blen, FALSE);
710 outbits(out, 2, 3); /* open new block */
714 * Start a Deflate (RFC1951) fixed-trees block. We
715 * transmit a zero bit (BFINAL=0), followed by a zero
716 * bit and a one bit (BTYPE=01). Of course these are in
717 * the wrong order (01 0).
723 * Do the compression.
725 lz77_compress(&ectx, block, len, TRUE);
728 * End the block (by transmitting code 256, which is
729 * 0000000 in fixed-tree mode), and transmit some empty
730 * blocks to ensure we have emitted the byte containing the
731 * last piece of genuine data. There are three ways we can
734 * - Minimal flush. Output end-of-block and then open a
735 * new static block. This takes 9 bits, which is
736 * guaranteed to flush out the last genuine code in the
737 * closed block; but allegedly zlib can't handle it.
739 * - Zlib partial flush. Output EOB, open and close an
740 * empty static block, and _then_ open the new block.
741 * This is the best zlib can handle.
743 * - Zlib sync flush. Output EOB, then an empty
744 * _uncompressed_ block (000, then sync to byte
745 * boundary, then send bytes 00 00 FF FF). Then open the
748 * For the moment, we will use Zlib partial flush.
750 outbits(out, 0, 7); /* close block */
751 outbits(out, 2, 3 + 7); /* empty static block */
752 outbits(out, 2, 3); /* open new block */
755 out->comp_disabled = FALSE;
757 *outblock = out->outbuf;
758 *outlen = out->outlen;
763 /* ----------------------------------------------------------------------
764 * Zlib decompression. Of course, even though our compressor always
765 * uses static trees, our _decompressor_ has to be capable of
766 * handling dynamic trees if it sees them.
770 * The way we work the Huffman decode is to have a table lookup on
771 * the first N bits of the input stream (in the order they arrive,
772 * of course, i.e. the first bit of the Huffman code is in bit 0).
773 * Each table entry lists the number of bits to consume, plus
774 * either an output code or a pointer to a secondary table.
777 struct zlib_tableentry;
779 struct zlib_tableentry {
782 struct zlib_table *nexttable;
786 int mask; /* mask applied to input bit stream */
787 struct zlib_tableentry *table;
790 #define MAXCODELEN 16
794 * Build a single-level decode table for elements
795 * [minlength,maxlength) of the provided code/length tables, and
796 * recurse to build subtables.
798 static struct zlib_table *zlib_mkonetab(int *codes, unsigned char *lengths,
800 int pfx, int pfxbits, int bits)
802 struct zlib_table *tab = smalloc(sizeof(struct zlib_table));
803 int pfxmask = (1 << pfxbits) - 1;
804 int nbits, i, j, code;
806 tab->table = smalloc((1 << bits) * sizeof(struct zlib_tableentry));
807 tab->mask = (1 << bits) - 1;
809 for (code = 0; code <= tab->mask; code++) {
810 tab->table[code].code = -1;
811 tab->table[code].nbits = 0;
812 tab->table[code].nexttable = NULL;
815 for (i = 0; i < nsyms; i++) {
816 if (lengths[i] <= pfxbits || (codes[i] & pfxmask) != pfx)
818 code = (codes[i] >> pfxbits) & tab->mask;
819 for (j = code; j <= tab->mask; j += 1 << (lengths[i] - pfxbits)) {
820 tab->table[j].code = i;
821 nbits = lengths[i] - pfxbits;
822 if (tab->table[j].nbits < nbits)
823 tab->table[j].nbits = nbits;
826 for (code = 0; code <= tab->mask; code++) {
827 if (tab->table[code].nbits <= bits)
829 /* Generate a subtable. */
830 tab->table[code].code = -1;
831 nbits = tab->table[code].nbits - bits;
834 tab->table[code].nbits = bits;
835 tab->table[code].nexttable = zlib_mkonetab(codes, lengths, nsyms,
836 pfx | (code << pfxbits),
837 pfxbits + bits, nbits);
844 * Build a decode table, given a set of Huffman tree lengths.
846 static struct zlib_table *zlib_mktable(unsigned char *lengths,
849 int count[MAXCODELEN], startcode[MAXCODELEN], codes[MAXSYMS];
853 /* Count the codes of each length. */
855 for (i = 1; i < MAXCODELEN; i++)
857 for (i = 0; i < nlengths; i++) {
859 if (maxlen < lengths[i])
862 /* Determine the starting code for each length block. */
864 for (i = 1; i < MAXCODELEN; i++) {
869 /* Determine the code for each symbol. Mirrored, of course. */
870 for (i = 0; i < nlengths; i++) {
871 code = startcode[lengths[i]]++;
873 for (j = 0; j < lengths[i]; j++) {
874 codes[i] = (codes[i] << 1) | (code & 1);
880 * Now we have the complete list of Huffman codes. Build a
883 return zlib_mkonetab(codes, lengths, nlengths, 0, 0,
884 maxlen < 9 ? maxlen : 9);
887 static int zlib_freetable(struct zlib_table **ztab)
889 struct zlib_table *tab;
900 for (code = 0; code <= tab->mask; code++)
901 if (tab->table[code].nexttable != NULL)
902 zlib_freetable(&tab->table[code].nexttable);
913 static struct zlib_decompress_ctx {
914 struct zlib_table *staticlentable, *staticdisttable;
915 struct zlib_table *currlentable, *currdisttable, *lenlentable;
918 TREES_HDR, TREES_LENLEN, TREES_LEN, TREES_LENREP,
919 INBLK, GOTLENSYM, GOTLEN, GOTDISTSYM,
920 UNCOMP_LEN, UNCOMP_NLEN, UNCOMP_DATA
922 int sym, hlit, hdist, hclen, lenptr, lenextrabits, lenaddon, len,
925 unsigned char lenlen[19];
926 unsigned char lengths[286 + 32];
929 unsigned char window[WINSIZE];
931 unsigned char *outblk;
935 void zlib_decompress_init(void)
937 unsigned char lengths[288];
938 memset(lengths, 8, 144);
939 memset(lengths + 144, 9, 256 - 144);
940 memset(lengths + 256, 7, 280 - 256);
941 memset(lengths + 280, 8, 288 - 280);
942 dctx.staticlentable = zlib_mktable(lengths, 288);
943 memset(lengths, 5, 32);
944 dctx.staticdisttable = zlib_mktable(lengths, 32);
945 dctx.state = START; /* even before header */
946 dctx.currlentable = dctx.currdisttable = dctx.lenlentable = NULL;
949 logevent("Initialised zlib (RFC1950) decompression");
952 int zlib_huflookup(unsigned long *bitsp, int *nbitsp,
953 struct zlib_table *tab)
955 unsigned long bits = *bitsp;
958 struct zlib_tableentry *ent;
959 ent = &tab->table[bits & tab->mask];
960 if (ent->nbits > nbits)
961 return -1; /* not enough data */
965 tab = ent->nexttable;
974 static void zlib_emit_char(int c)
976 dctx.window[dctx.winpos] = c;
977 dctx.winpos = (dctx.winpos + 1) & (WINSIZE - 1);
978 if (dctx.outlen >= dctx.outsize) {
979 dctx.outsize = dctx.outlen + 512;
980 dctx.outblk = srealloc(dctx.outblk, dctx.outsize);
982 dctx.outblk[dctx.outlen++] = c;
985 #define EATBITS(n) ( dctx.nbits -= (n), dctx.bits >>= (n) )
987 int zlib_decompress_block(unsigned char *block, int len,
988 unsigned char **outblock, int *outlen)
990 const coderecord *rec;
991 int code, blktype, rep, dist, nlen;
992 static const unsigned char lenlenmap[] = {
993 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
997 dctx.outsize = dctx.outlen = 0;
999 while (len > 0 || dctx.nbits > 0) {
1000 while (dctx.nbits < 24 && len > 0) {
1001 dctx.bits |= (*block++) << dctx.nbits;
1005 switch (dctx.state) {
1007 /* Expect 16-bit zlib header, which we'll dishonourably ignore. */
1008 if (dctx.nbits < 16)
1009 goto finished; /* done all we can */
1011 dctx.state = OUTSIDEBLK;
1014 /* Expect 3-bit block header. */
1016 goto finished; /* done all we can */
1018 blktype = dctx.bits & 3;
1021 int to_eat = dctx.nbits & 7;
1022 dctx.state = UNCOMP_LEN;
1023 EATBITS(to_eat); /* align to byte boundary */
1024 } else if (blktype == 1) {
1025 dctx.currlentable = dctx.staticlentable;
1026 dctx.currdisttable = dctx.staticdisttable;
1028 } else if (blktype == 2) {
1029 dctx.state = TREES_HDR;
1034 * Dynamic block header. Five bits of HLIT, five of
1035 * HDIST, four of HCLEN.
1037 if (dctx.nbits < 5 + 5 + 4)
1038 goto finished; /* done all we can */
1039 dctx.hlit = 257 + (dctx.bits & 31);
1041 dctx.hdist = 1 + (dctx.bits & 31);
1043 dctx.hclen = 4 + (dctx.bits & 15);
1046 dctx.state = TREES_LENLEN;
1047 memset(dctx.lenlen, 0, sizeof(dctx.lenlen));
1052 while (dctx.lenptr < dctx.hclen && dctx.nbits >= 3) {
1053 dctx.lenlen[lenlenmap[dctx.lenptr++]] =
1054 (unsigned char) (dctx.bits & 7);
1057 if (dctx.lenptr == dctx.hclen) {
1058 dctx.lenlentable = zlib_mktable(dctx.lenlen, 19);
1059 dctx.state = TREES_LEN;
1064 if (dctx.lenptr >= dctx.hlit + dctx.hdist) {
1065 dctx.currlentable = zlib_mktable(dctx.lengths, dctx.hlit);
1066 dctx.currdisttable = zlib_mktable(dctx.lengths + dctx.hlit,
1068 zlib_freetable(&dctx.lenlentable);
1073 zlib_huflookup(&dctx.bits, &dctx.nbits, dctx.lenlentable);
1077 dctx.lengths[dctx.lenptr++] = code;
1079 dctx.lenextrabits = (code == 16 ? 2 : code == 17 ? 3 : 7);
1080 dctx.lenaddon = (code == 18 ? 11 : 3);
1081 dctx.lenrep = (code == 16 && dctx.lenptr > 0 ?
1082 dctx.lengths[dctx.lenptr - 1] : 0);
1083 dctx.state = TREES_LENREP;
1087 if (dctx.nbits < dctx.lenextrabits)
1091 (dctx.bits & ((1 << dctx.lenextrabits) - 1));
1092 EATBITS(dctx.lenextrabits);
1093 while (rep > 0 && dctx.lenptr < dctx.hlit + dctx.hdist) {
1094 dctx.lengths[dctx.lenptr] = dctx.lenrep;
1098 dctx.state = TREES_LEN;
1102 zlib_huflookup(&dctx.bits, &dctx.nbits, dctx.currlentable);
1106 zlib_emit_char(code);
1107 else if (code == 256) {
1108 dctx.state = OUTSIDEBLK;
1109 if (dctx.currlentable != dctx.staticlentable)
1110 zlib_freetable(&dctx.currlentable);
1111 if (dctx.currdisttable != dctx.staticdisttable)
1112 zlib_freetable(&dctx.currdisttable);
1113 } else if (code < 286) { /* static tree can give >285; ignore */
1114 dctx.state = GOTLENSYM;
1119 rec = &lencodes[dctx.sym - 257];
1120 if (dctx.nbits < rec->extrabits)
1123 rec->min + (dctx.bits & ((1 << rec->extrabits) - 1));
1124 EATBITS(rec->extrabits);
1125 dctx.state = GOTLEN;
1129 zlib_huflookup(&dctx.bits, &dctx.nbits,
1130 dctx.currdisttable);
1133 dctx.state = GOTDISTSYM;
1137 rec = &distcodes[dctx.sym];
1138 if (dctx.nbits < rec->extrabits)
1140 dist = rec->min + (dctx.bits & ((1 << rec->extrabits) - 1));
1141 EATBITS(rec->extrabits);
1144 zlib_emit_char(dctx.window[(dctx.winpos - dist) &
1149 * Uncompressed block. We expect to see a 16-bit LEN.
1151 if (dctx.nbits < 16)
1153 dctx.uncomplen = dctx.bits & 0xFFFF;
1155 dctx.state = UNCOMP_NLEN;
1159 * Uncompressed block. We expect to see a 16-bit NLEN,
1160 * which should be the one's complement of the previous
1163 if (dctx.nbits < 16)
1165 nlen = dctx.bits & 0xFFFF;
1167 dctx.state = UNCOMP_DATA;
1172 zlib_emit_char(dctx.bits & 0xFF);
1174 if (--dctx.uncomplen == 0)
1175 dctx.state = OUTSIDEBLK; /* end of uncompressed block */
1181 *outblock = dctx.outblk;
1182 *outlen = dctx.outlen;
1187 const struct ssh_compress ssh_zlib = {
1190 zlib_compress_block,
1191 zlib_decompress_init,
1192 zlib_decompress_block,
1193 zlib_disable_compression