aboutsummaryrefslogtreecommitdiff
path: root/src/libflate
diff options
context:
space:
mode:
authorrsc <devnull@localhost>2003-11-23 18:19:18 +0000
committerrsc <devnull@localhost>2003-11-23 18:19:18 +0000
commitb6afd33e2f23953f00c6fac6b5d45946a9113654 (patch)
tree300478ec8ac939d1b7aa0c1b987367a49a4e47ce /src/libflate
parent8a708fb239f4272ac7e4f16f437093c56b2cab39 (diff)
downloadplan9port-b6afd33e2f23953f00c6fac6b5d45946a9113654.tar.gz
plan9port-b6afd33e2f23953f00c6fac6b5d45946a9113654.tar.bz2
plan9port-b6afd33e2f23953f00c6fac6b5d45946a9113654.zip
add libflate
Diffstat (limited to 'src/libflate')
-rw-r--r--src/libflate/adler.c72
-rw-r--r--src/libflate/crc.c40
-rw-r--r--src/libflate/deflate.c1359
-rw-r--r--src/libflate/deflateblock.c56
-rw-r--r--src/libflate/deflatezlib.c60
-rw-r--r--src/libflate/deflatezlibblock.c34
-rw-r--r--src/libflate/flateerr.c23
-rw-r--r--src/libflate/inflate.c693
-rw-r--r--src/libflate/inflateblock.c54
-rw-r--r--src/libflate/inflatezlib.c66
-rw-r--r--src/libflate/inflatezlibblock.c68
-rw-r--r--src/libflate/mkfile22
-rw-r--r--src/libflate/zlib.h11
13 files changed, 2558 insertions, 0 deletions
diff --git a/src/libflate/adler.c b/src/libflate/adler.c
new file mode 100644
index 00000000..22fb8f6d
--- /dev/null
+++ b/src/libflate/adler.c
@@ -0,0 +1,72 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+enum
+{
+ ADLERITERS = 5552, /* max iters before can overflow 32 bits */
+ ADLERBASE = 65521 /* largest prime smaller than 65536 */
+};
+
+ulong
+adler32(ulong adler, void *vbuf, int n)
+{
+ ulong s1, s2;
+ uchar *buf, *ebuf;
+ int m;
+
+ buf = vbuf;
+ s1 = adler & 0xffff;
+ s2 = (adler >> 16) & 0xffff;
+ for(; n >= 16; n -= m){
+ m = n;
+ if(m > ADLERITERS)
+ m = ADLERITERS;
+ m &= ~15;
+ for(ebuf = buf + m; buf < ebuf; buf += 16){
+ s1 += buf[0];
+ s2 += s1;
+ s1 += buf[1];
+ s2 += s1;
+ s1 += buf[2];
+ s2 += s1;
+ s1 += buf[3];
+ s2 += s1;
+ s1 += buf[4];
+ s2 += s1;
+ s1 += buf[5];
+ s2 += s1;
+ s1 += buf[6];
+ s2 += s1;
+ s1 += buf[7];
+ s2 += s1;
+ s1 += buf[8];
+ s2 += s1;
+ s1 += buf[9];
+ s2 += s1;
+ s1 += buf[10];
+ s2 += s1;
+ s1 += buf[11];
+ s2 += s1;
+ s1 += buf[12];
+ s2 += s1;
+ s1 += buf[13];
+ s2 += s1;
+ s1 += buf[14];
+ s2 += s1;
+ s1 += buf[15];
+ s2 += s1;
+ }
+ s1 %= ADLERBASE;
+ s2 %= ADLERBASE;
+ }
+ if(n){
+ for(ebuf = buf + n; buf < ebuf; buf++){
+ s1 += buf[0];
+ s2 += s1;
+ }
+ s1 %= ADLERBASE;
+ s2 %= ADLERBASE;
+ }
+ return (s2 << 16) + s1;
+}
diff --git a/src/libflate/crc.c b/src/libflate/crc.c
new file mode 100644
index 00000000..4da78f6c
--- /dev/null
+++ b/src/libflate/crc.c
@@ -0,0 +1,40 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+ulong*
+mkcrctab(ulong poly)
+{
+ ulong *crctab;
+ ulong crc;
+ int i, j;
+
+ crctab = malloc(256 * sizeof(ulong));
+ if(crctab == nil)
+ return nil;
+
+ for(i = 0; i < 256; i++){
+ crc = i;
+ for(j = 0; j < 8; j++){
+ if(crc & 1)
+ crc = (crc >> 1) ^ poly;
+ else
+ crc >>= 1;
+ }
+ crctab[i] = crc;
+ }
+ return crctab;
+}
+
+ulong
+blockcrc(ulong *crctab, ulong crc, void *vbuf, int n)
+{
+ uchar *buf, *ebuf;
+
+ crc ^= 0xffffffff;
+ buf = vbuf;
+ ebuf = buf + n;
+ while(buf < ebuf)
+ crc = crctab[(crc & 0xff) ^ *buf++] ^ (crc >> 8);
+ return crc ^ 0xffffffff;
+}
diff --git a/src/libflate/deflate.c b/src/libflate/deflate.c
new file mode 100644
index 00000000..3a55af3f
--- /dev/null
+++ b/src/libflate/deflate.c
@@ -0,0 +1,1359 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+typedef struct Chain Chain;
+typedef struct Chains Chains;
+typedef struct Dyncode Dyncode;
+typedef struct Huff Huff;
+typedef struct LZblock LZblock;
+typedef struct LZstate LZstate;
+
+enum
+{
+ /*
+ * deflate format paramaters
+ */
+ DeflateUnc = 0, /* uncompressed block */
+ DeflateFix = 1, /* fixed huffman codes */
+ DeflateDyn = 2, /* dynamic huffman codes */
+
+ DeflateEob = 256, /* end of block code in lit/len book */
+ DeflateMaxBlock = 64*1024-1, /* maximum size of uncompressed block */
+
+ DeflateMaxExp = 10, /* maximum expansion for a block */
+
+ LenStart = 257, /* start of length codes in litlen */
+ Nlitlen = 288, /* number of litlen codes */
+ Noff = 30, /* number of offset codes */
+ Nclen = 19, /* number of codelen codes */
+
+ MaxOff = 32*1024,
+ MinMatch = 3, /* shortest match possible */
+ MaxMatch = 258, /* longest match possible */
+
+ /*
+ * huffman code paramaters
+ */
+ MaxLeaf = Nlitlen,
+ MaxHuffBits = 16, /* max bits in a huffman code */
+ ChainMem = 2 * (MaxHuffBits - 1) * MaxHuffBits,
+
+ /*
+ * coding of the lz parse
+ */
+ LenFlag = 1 << 3,
+ LenShift = 4, /* leaves enough space for MinMatchMaxOff */
+ MaxLitRun = LenFlag - 1,
+
+ /*
+ * internal lz paramaters
+ */
+ DeflateOut = 4096, /* output buffer size */
+ BlockSize = 8192, /* attempted input read quanta */
+ DeflateBlock = DeflateMaxBlock & ~(BlockSize - 1),
+ MinMatchMaxOff = 4096, /* max profitable offset for small match;
+ * assumes 8 bits for len, 5+10 for offset
+ * DONT CHANGE WITHOUT CHANGING LZPARSE CONSTANTS
+ */
+ HistSlop = 512, /* must be at lead MaxMatch */
+ HistBlock = 64*1024,
+ HistSize = HistBlock + HistSlop,
+
+ HashLog = 13,
+ HashSize = 1<<HashLog,
+
+ MaxOffCode = 256, /* biggest offset looked up in direct table */
+
+ EstLitBits = 8,
+ EstLenBits = 4,
+ EstOffBits = 5,
+};
+
+/*
+ * knuth vol. 3 multiplicative hashing
+ * each byte x chosen according to rules
+ * 1/4 < x < 3/10, 1/3 x < < 3/7, 4/7 < x < 2/3, 7/10 < x < 3/4
+ * with reasonable spread between the bytes & their complements
+ *
+ * the 3 byte value appears to be as almost good as the 4 byte value,
+ * and might be faster on some machines
+ */
+/*
+#define hashit(c) (((ulong)(c) * 0x6b43a9) >> (24 - HashLog))
+*/
+#define hashit(c) ((((ulong)(c) & 0xffffff) * 0x6b43a9b5) >> (32 - HashLog))
+
+/*
+ * lempel-ziv style compression state
+ */
+struct LZstate
+{
+ uchar hist[HistSize];
+ ulong pos; /* current location in history buffer */
+ ulong avail; /* data available after pos */
+ int eof;
+ ushort hash[HashSize]; /* hash chains */
+ ushort nexts[MaxOff];
+ int now; /* pos in hash chains */
+ int dot; /* dawn of time in history */
+ int prevlen; /* lazy matching state */
+ int prevoff;
+ int maxcheck; /* compressor tuning */
+
+ uchar obuf[DeflateOut];
+ uchar *out; /* current position in the output buffer */
+ uchar *eout;
+ ulong bits; /* bit shift register */
+ int nbits;
+ int rbad; /* got an error reading the buffer */
+ int wbad; /* got an error writing the buffer */
+ int (*w)(void*, void*, int);
+ void *wr;
+
+ ulong totr; /* total input size */
+ ulong totw; /* total output size */
+ int debug;
+};
+
+struct LZblock
+{
+ ushort parse[DeflateMaxBlock / 2 + 1];
+ int lastv; /* value being constucted for parse */
+ ulong litlencount[Nlitlen];
+ ulong offcount[Noff];
+ ushort *eparse; /* limit for parse table */
+ int bytes; /* consumed from the input */
+ int excost; /* cost of encoding extra len & off bits */
+};
+
+/*
+ * huffman code table
+ */
+struct Huff
+{
+ short bits; /* length of the code */
+ ushort encode; /* the code */
+};
+
+/*
+ * encoding of dynamic huffman trees
+ */
+struct Dyncode
+{
+ int nlit;
+ int noff;
+ int nclen;
+ int ncode;
+ Huff codetab[Nclen];
+ uchar codes[Nlitlen+Noff];
+ uchar codeaux[Nlitlen+Noff];
+};
+
+static int deflateb(LZstate *lz, LZblock *lzb, void *rr, int (*r)(void*, void*, int));
+static int lzcomp(LZstate*, LZblock*, uchar*, ushort*, int finish);
+static void wrblock(LZstate*, int, ushort*, ushort*, Huff*, Huff*);
+static int bitcost(Huff*, ulong*, int);
+static int huffcodes(Dyncode*, Huff*, Huff*);
+static void wrdyncode(LZstate*, Dyncode*);
+static void lzput(LZstate*, ulong bits, int nbits);
+static void lzflushbits(LZstate*);
+static void lzflush(LZstate *lz);
+static void lzwrite(LZstate *lz, void *buf, int n);
+
+static int hufftabinit(Huff*, int, ulong*, int);
+static int mkgzprecode(Huff*, ulong *, int, int);
+
+static int mkprecode(Huff*, ulong *, int, int, ulong*);
+static void nextchain(Chains*, int);
+static void leafsort(ulong*, ushort*, int, int);
+
+/* conversion from len to code word */
+static int lencode[MaxMatch];
+
+/*
+ * conversion from off to code word
+ * off <= MaxOffCode ? offcode[off] : bigoffcode[off >> 7]
+*/
+static int offcode[MaxOffCode];
+static int bigoffcode[256];
+
+/* litlen code words LenStart-285 extra bits */
+static int litlenbase[Nlitlen-LenStart];
+static int litlenextra[Nlitlen-LenStart] =
+{
+/* 257 */ 0, 0, 0,
+/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+/* 280 */ 4, 5, 5, 5, 5, 0, 0, 0
+};
+
+/* offset code word extra bits */
+static int offbase[Noff];
+static int offextra[] =
+{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ 0, 0,
+};
+
+/* order code lengths */
+static int clenorder[Nclen] =
+{
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
+};
+
+/* static huffman tables */
+static Huff litlentab[Nlitlen];
+static Huff offtab[Noff];
+static Huff hofftab[Noff];
+
+/* bit reversal for brain dead endian swap in huffman codes */
+static uchar revtab[256];
+static ulong nlits;
+static ulong nmatches;
+
+int
+deflateinit(void)
+{
+ ulong bitcount[MaxHuffBits];
+ int i, j, ci, n;
+
+ /* byte reverse table */
+ for(i=0; i<256; i++)
+ for(j=0; j<8; j++)
+ if(i & (1<<j))
+ revtab[i] |= 0x80 >> j;
+
+ /* static Litlen bit lengths */
+ for(i=0; i<144; i++)
+ litlentab[i].bits = 8;
+ for(i=144; i<256; i++)
+ litlentab[i].bits = 9;
+ for(i=256; i<280; i++)
+ litlentab[i].bits = 7;
+ for(i=280; i<Nlitlen; i++)
+ litlentab[i].bits = 8;
+
+ memset(bitcount, 0, sizeof(bitcount));
+ bitcount[8] += 144 - 0;
+ bitcount[9] += 256 - 144;
+ bitcount[7] += 280 - 256;
+ bitcount[8] += Nlitlen - 280;
+
+ if(!hufftabinit(litlentab, Nlitlen, bitcount, 9))
+ return FlateInternal;
+
+ /* static offset bit lengths */
+ for(i = 0; i < Noff; i++)
+ offtab[i].bits = 5;
+
+ memset(bitcount, 0, sizeof(bitcount));
+ bitcount[5] = Noff;
+
+ if(!hufftabinit(offtab, Noff, bitcount, 5))
+ return FlateInternal;
+
+ bitcount[0] = 0;
+ bitcount[1] = 0;
+ if(!mkgzprecode(hofftab, bitcount, 2, MaxHuffBits))
+ return FlateInternal;
+
+ /* conversion tables for lens & offs to codes */
+ ci = 0;
+ for(i = LenStart; i < 286; i++){
+ n = ci + (1 << litlenextra[i - LenStart]);
+ litlenbase[i - LenStart] = ci;
+ for(; ci < n; ci++)
+ lencode[ci] = i;
+ }
+ /* patch up special case for len MaxMatch */
+ lencode[MaxMatch-MinMatch] = 285;
+ litlenbase[285-LenStart] = MaxMatch-MinMatch;
+
+ ci = 0;
+ for(i = 0; i < 16; i++){
+ n = ci + (1 << offextra[i]);
+ offbase[i] = ci;
+ for(; ci < n; ci++)
+ offcode[ci] = i;
+ }
+
+ ci = ci >> 7;
+ for(; i < 30; i++){
+ n = ci + (1 << (offextra[i] - 7));
+ offbase[i] = ci << 7;
+ for(; ci < n; ci++)
+ bigoffcode[ci] = i;
+ }
+ return FlateOk;
+}
+
+static void
+deflatereset(LZstate *lz, int level, int debug)
+{
+ memset(lz->nexts, 0, sizeof lz->nexts);
+ memset(lz->hash, 0, sizeof lz->hash);
+ lz->totr = 0;
+ lz->totw = 0;
+ lz->pos = 0;
+ lz->avail = 0;
+ lz->out = lz->obuf;
+ lz->eout = &lz->obuf[DeflateOut];
+ lz->prevlen = MinMatch - 1;
+ lz->prevoff = 0;
+ lz->now = MaxOff + 1;
+ lz->dot = lz->now;
+ lz->bits = 0;
+ lz->nbits = 0;
+ lz->maxcheck = (1 << level);
+ lz->maxcheck -= lz->maxcheck >> 2;
+ if(lz->maxcheck < 2)
+ lz->maxcheck = 2;
+ else if(lz->maxcheck > 1024)
+ lz->maxcheck = 1024;
+
+ lz->debug = debug;
+}
+
+int
+deflate(void *wr, int (*w)(void*, void*, int), void *rr, int (*r)(void*, void*, int), int level, int debug)
+{
+ LZstate *lz;
+ LZblock *lzb;
+ int ok;
+
+ lz = malloc(sizeof *lz + sizeof *lzb);
+ if(lz == nil)
+ return FlateNoMem;
+ lzb = (LZblock*)&lz[1];
+
+ deflatereset(lz, level, debug);
+ lz->w = w;
+ lz->wr = wr;
+ lz->wbad = 0;
+ lz->rbad = 0;
+ lz->eof = 0;
+ ok = FlateOk;
+ while(!lz->eof || lz->avail){
+ ok = deflateb(lz, lzb, rr, r);
+ if(ok != FlateOk)
+ break;
+ }
+ if(ok == FlateOk && lz->rbad)
+ ok = FlateInputFail;
+ if(ok == FlateOk && lz->wbad)
+ ok = FlateOutputFail;
+ free(lz);
+ return ok;
+}
+
+static int
+deflateb(LZstate *lz, LZblock *lzb, void *rr, int (*r)(void*, void*, int))
+{
+ Dyncode dyncode, hdyncode;
+ Huff dlitlentab[Nlitlen], dofftab[Noff], hlitlentab[Nlitlen];
+ ulong litcount[Nlitlen];
+ long nunc, ndyn, nfix, nhuff;
+ uchar *slop, *hslop;
+ ulong ep;
+ int i, n, m, mm, nslop;
+
+ memset(lzb->litlencount, 0, sizeof lzb->litlencount);
+ memset(lzb->offcount, 0, sizeof lzb->offcount);
+ lzb->litlencount[DeflateEob]++;
+
+ lzb->bytes = 0;
+ lzb->eparse = lzb->parse;
+ lzb->lastv = 0;
+ lzb->excost = 0;
+
+ slop = &lz->hist[lz->pos];
+ n = lz->avail;
+ while(n < DeflateBlock && (!lz->eof || lz->avail)){
+ /*
+ * fill the buffer as much as possible,
+ * while leaving room for MaxOff history behind lz->pos,
+ * and not reading more than we can handle.
+ *
+ * make sure we read at least HistSlop bytes.
+ */
+ if(!lz->eof){
+ ep = lz->pos + lz->avail;
+ if(ep >= HistBlock)
+ ep -= HistBlock;
+ m = HistBlock - MaxOff - lz->avail;
+ if(m > HistBlock - n)
+ m = HistBlock - n;
+ if(m > (HistBlock + HistSlop) - ep)
+ m = (HistBlock + HistSlop) - ep;
+ if(m & ~(BlockSize - 1))
+ m &= ~(BlockSize - 1);
+
+ /*
+ * be nice to the caller: stop reads that are too small.
+ * can only get here when we've already filled the buffer some
+ */
+ if(m < HistSlop){
+ if(!m || !lzb->bytes)
+ return FlateInternal;
+ break;
+ }
+
+ mm = (*r)(rr, &lz->hist[ep], m);
+ if(mm > 0){
+ /*
+ * wrap data to end if we're read it from the beginning
+ * this way, we don't have to wrap searches.
+ *
+ * wrap reads past the end to the beginning.
+ * this way, we can guarantee minimum size reads.
+ */
+ if(ep < HistSlop)
+ memmove(&lz->hist[ep + HistBlock], &lz->hist[ep], HistSlop - ep);
+ else if(ep + mm > HistBlock)
+ memmove(&lz->hist[0], &lz->hist[HistBlock], ep + mm - HistBlock);
+
+ lz->totr += mm;
+ n += mm;
+ lz->avail += mm;
+ }else{
+ if(mm < 0)
+ lz->rbad = 1;
+ lz->eof = 1;
+ }
+ }
+ ep = lz->pos + lz->avail;
+ if(ep > HistSize)
+ ep = HistSize;
+ if(lzb->bytes + ep - lz->pos > DeflateMaxBlock)
+ ep = lz->pos + DeflateMaxBlock - lzb->bytes;
+ m = lzcomp(lz, lzb, &lz->hist[ep], lzb->eparse, lz->eof);
+ lzb->bytes += m;
+ lz->pos = (lz->pos + m) & (HistBlock - 1);
+ lz->avail -= m;
+ }
+ if(lzb->lastv)
+ *lzb->eparse++ = lzb->lastv;
+ if(lzb->eparse > lzb->parse + nelem(lzb->parse))
+ return FlateInternal;
+ nunc = lzb->bytes;
+
+ if(!mkgzprecode(dlitlentab, lzb->litlencount, Nlitlen, MaxHuffBits)
+ || !mkgzprecode(dofftab, lzb->offcount, Noff, MaxHuffBits))
+ return FlateInternal;
+
+ ndyn = huffcodes(&dyncode, dlitlentab, dofftab);
+ if(ndyn < 0)
+ return FlateInternal;
+ ndyn += bitcost(dlitlentab, lzb->litlencount, Nlitlen)
+ + bitcost(dofftab, lzb->offcount, Noff)
+ + lzb->excost;
+
+ memset(litcount, 0, sizeof litcount);
+
+ nslop = nunc;
+ if(nslop > &lz->hist[HistSize] - slop)
+ nslop = &lz->hist[HistSize] - slop;
+
+ for(i = 0; i < nslop; i++)
+ litcount[slop[i]]++;
+ hslop = &lz->hist[HistSlop - nslop];
+ for(; i < nunc; i++)
+ litcount[hslop[i]]++;
+ litcount[DeflateEob]++;
+
+ if(!mkgzprecode(hlitlentab, litcount, Nlitlen, MaxHuffBits))
+ return FlateInternal;
+ nhuff = huffcodes(&hdyncode, hlitlentab, hofftab);
+ if(nhuff < 0)
+ return FlateInternal;
+ nhuff += bitcost(hlitlentab, litcount, Nlitlen);
+
+ nfix = bitcost(litlentab, lzb->litlencount, Nlitlen)
+ + bitcost(offtab, lzb->offcount, Noff)
+ + lzb->excost;
+
+ lzput(lz, lz->eof && !lz->avail, 1);
+
+ if(lz->debug){
+ fprint(2, "block: bytes=%lud entries=%ld extra bits=%d\n\tuncompressed=%lud fixed=%lud dynamic=%lud huffman=%lud\n",
+ nunc, lzb->eparse - lzb->parse, lzb->excost, (nunc + 4) * 8, nfix, ndyn, nhuff);
+ fprint(2, "\tnlit=%lud matches=%lud eof=%d\n", nlits, nmatches, lz->eof && !lz->avail);
+ }
+
+ if((nunc + 4) * 8 < ndyn && (nunc + 4) * 8 < nfix && (nunc + 4) * 8 < nhuff){
+ lzput(lz, DeflateUnc, 2);
+ lzflushbits(lz);
+
+ lzput(lz, nunc & 0xff, 8);
+ lzput(lz, (nunc >> 8) & 0xff, 8);
+ lzput(lz, ~nunc & 0xff, 8);
+ lzput(lz, (~nunc >> 8) & 0xff, 8);
+ lzflush(lz);
+
+ lzwrite(lz, slop, nslop);
+ lzwrite(lz, &lz->hist[HistSlop], nunc - nslop);
+ }else if(ndyn < nfix && ndyn < nhuff){
+ lzput(lz, DeflateDyn, 2);
+
+ wrdyncode(lz, &dyncode);
+ wrblock(lz, slop - lz->hist, lzb->parse, lzb->eparse, dlitlentab, dofftab);
+ lzput(lz, dlitlentab[DeflateEob].encode, dlitlentab[DeflateEob].bits);
+ }else if(nhuff < nfix){
+ lzput(lz, DeflateDyn, 2);
+
+ wrdyncode(lz, &hdyncode);
+
+ m = 0;
+ for(i = nunc; i > MaxLitRun; i -= MaxLitRun)
+ lzb->parse[m++] = MaxLitRun;
+ lzb->parse[m++] = i;
+
+ wrblock(lz, slop - lz->hist, lzb->parse, lzb->parse + m, hlitlentab, hofftab);
+ lzput(lz, hlitlentab[DeflateEob].encode, hlitlentab[DeflateEob].bits);
+ }else{
+ lzput(lz, DeflateFix, 2);
+
+ wrblock(lz, slop - lz->hist, lzb->parse, lzb->eparse, litlentab, offtab);
+ lzput(lz, litlentab[DeflateEob].encode, litlentab[DeflateEob].bits);
+ }
+
+ if(lz->eof && !lz->avail){
+ lzflushbits(lz);
+ lzflush(lz);
+ }
+ return FlateOk;
+}
+
+static void
+lzwrite(LZstate *lz, void *buf, int n)
+{
+ int nw;
+
+ if(n && lz->w){
+ nw = (*lz->w)(lz->wr, buf, n);
+ if(nw != n){
+ lz->w = nil;
+ lz->wbad = 1;
+ }else
+ lz->totw += n;
+ }
+}
+
+static void
+lzflush(LZstate *lz)
+{
+ lzwrite(lz, lz->obuf, lz->out - lz->obuf);
+ lz->out = lz->obuf;
+}
+
+static void
+lzput(LZstate *lz, ulong bits, int nbits)
+{
+ bits = (bits << lz->nbits) | lz->bits;
+ for(nbits += lz->nbits; nbits >= 8; nbits -= 8){
+ *lz->out++ = bits;
+ if(lz->out == lz->eout)
+ lzflush(lz);
+ bits >>= 8;
+ }
+ lz->bits = bits;
+ lz->nbits = nbits;
+}
+
+static void
+lzflushbits(LZstate *lz)
+{
+ if(lz->nbits)
+ lzput(lz, 0, 8 - (lz->nbits & 7));
+}
+
+/*
+ * write out a block of n samples,
+ * given lz encoding and counts for huffman tables
+ */
+static void
+wrblock(LZstate *out, int litoff, ushort *soff, ushort *eoff, Huff *litlentab, Huff *offtab)
+{
+ ushort *off;
+ int i, run, offset, lit, len, c;
+
+ if(out->debug > 2){
+ for(off = soff; off < eoff; ){
+ offset = *off++;
+ run = offset & MaxLitRun;
+ if(run){
+ for(i = 0; i < run; i++){
+ lit = out->hist[litoff & (HistBlock - 1)];
+ litoff++;
+ fprint(2, "\tlit %.2ux %c\n", lit, lit);
+ }
+ if(!(offset & LenFlag))
+ continue;
+ len = offset >> LenShift;
+ offset = *off++;
+ }else if(offset & LenFlag){
+ len = offset >> LenShift;
+ offset = *off++;
+ }else{
+ len = 0;
+ offset >>= LenShift;
+ }
+ litoff += len + MinMatch;
+ fprint(2, "\t<%d, %d>\n", offset + 1, len + MinMatch);
+ }
+ }
+
+ for(off = soff; off < eoff; ){
+ offset = *off++;
+ run = offset & MaxLitRun;
+ if(run){
+ for(i = 0; i < run; i++){
+ lit = out->hist[litoff & (HistBlock - 1)];
+ litoff++;
+ lzput(out, litlentab[lit].encode, litlentab[lit].bits);
+ }
+ if(!(offset & LenFlag))
+ continue;
+ len = offset >> LenShift;
+ offset = *off++;
+ }else if(offset & LenFlag){
+ len = offset >> LenShift;
+ offset = *off++;
+ }else{
+ len = 0;
+ offset >>= LenShift;
+ }
+ litoff += len + MinMatch;
+ c = lencode[len];
+ lzput(out, litlentab[c].encode, litlentab[c].bits);
+ c -= LenStart;
+ if(litlenextra[c])
+ lzput(out, len - litlenbase[c], litlenextra[c]);
+
+ if(offset < MaxOffCode)
+ c = offcode[offset];
+ else
+ c = bigoffcode[offset >> 7];
+ lzput(out, offtab[c].encode, offtab[c].bits);
+ if(offextra[c])
+ lzput(out, offset - offbase[c], offextra[c]);
+ }
+}
+
+/*
+ * look for the longest, closest string which matches
+ * the next prefix. the clever part here is looking for
+ * a string 1 longer than the previous best match.
+ *
+ * follows the recommendation of limiting number of chains
+ * which are checked. this appears to be the best heuristic.
+ */
+static int
+lzmatch(int now, int then, uchar *p, uchar *es, ushort *nexts, uchar *hist, int runlen, int check, int *m)
+{
+ uchar *s, *t;
+ int ml, off, last;
+
+ ml = check;
+ if(runlen >= 8)
+ check >>= 2;
+ *m = 0;
+ if(p + runlen >= es)
+ return runlen;
+ last = 0;
+ for(; check-- > 0; then = nexts[then & (MaxOff-1)]){
+ off = (ushort)(now - then);
+ if(off <= last || off > MaxOff)
+ break;
+ s = p + runlen;
+ t = hist + (((p - hist) - off) & (HistBlock-1));
+ t += runlen;
+ for(; s >= p; s--){
+ if(*s != *t)
+ goto matchloop;
+ t--;
+ }
+
+ /*
+ * we have a new best match.
+ * extend it to it's maximum length
+ */
+ t += runlen + 2;
+ s += runlen + 2;
+ for(; s < es; s++){
+ if(*s != *t)
+ break;
+ t++;
+ }
+ runlen = s - p;
+ *m = off - 1;
+ if(s == es || runlen > ml)
+ break;
+matchloop:;
+ last = off;
+ }
+ return runlen;
+}
+
+static int
+lzcomp(LZstate *lz, LZblock *lzb, uchar *ep, ushort *parse, int finish)
+{
+ ulong cont, excost, *litlencount, *offcount;
+ uchar *p, *q, *s, *es;
+ ushort *nexts, *hash;
+ int v, i, h, runlen, n, now, then, m, prevlen, prevoff, maxdefer;
+
+ litlencount = lzb->litlencount;
+ offcount = lzb->offcount;
+ nexts = lz->nexts;
+ hash = lz->hash;
+ now = lz->now;
+
+ p = &lz->hist[lz->pos];
+ if(lz->prevlen != MinMatch - 1)
+ p++;
+
+ /*
+ * hash in the links for any hanging link positions,
+ * and calculate the hash for the current position.
+ */
+ n = MinMatch;
+ if(n > ep - p)
+ n = ep - p;
+ cont = 0;
+ for(i = 0; i < n - 1; i++){
+ m = now - ((MinMatch-1) - i);
+ if(m < lz->dot)
+ continue;
+ s = lz->hist + (((p - lz->hist) - (now - m)) & (HistBlock-1));
+
+ cont = (s[0] << 16) | (s[1] << 8) | s[2];
+ h = hashit(cont);
+ prevoff = 0;
+ for(then = hash[h]; ; then = nexts[then & (MaxOff-1)]){
+ v = (ushort)(now - then);
+ if(v <= prevoff || v >= (MinMatch-1) - i)
+ break;
+ prevoff = v;
+ }
+ if(then == (ushort)m)
+ continue;
+ nexts[m & (MaxOff-1)] = hash[h];
+ hash[h] = m;
+ }
+ for(i = 0; i < n; i++)
+ cont = (cont << 8) | p[i];
+
+ /*
+ * now must point to the index in the nexts array
+ * corresponding to p's position in the history
+ */
+ prevlen = lz->prevlen;
+ prevoff = lz->prevoff;
+ maxdefer = lz->maxcheck >> 2;
+ excost = 0;
+ v = lzb->lastv;
+ for(;;){
+ es = p + MaxMatch;
+ if(es > ep){
+ if(!finish || p >= ep)
+ break;
+ es = ep;
+ }
+
+ h = hashit(cont);
+ runlen = lzmatch(now, hash[h], p, es, nexts, lz->hist, prevlen, lz->maxcheck, &m);
+
+ /*
+ * back out of small matches too far in the past
+ */
+ if(runlen == MinMatch && m >= MinMatchMaxOff){
+ runlen = MinMatch - 1;
+ m = 0;
+ }
+
+ /*
+ * record the encoding and increment counts for huffman trees
+ * if we get a match, defer selecting it until we check for
+ * a longer match at the next position.
+ */
+ if(prevlen >= runlen && prevlen != MinMatch - 1){
+ /*
+ * old match at least as good; use that one
+ */
+ n = prevlen - MinMatch;
+ if(v || n){
+ *parse++ = v | LenFlag | (n << LenShift);
+ *parse++ = prevoff;
+ }else
+ *parse++ = prevoff << LenShift;
+ v = 0;
+
+ n = lencode[n];
+ litlencount[n]++;
+ excost += litlenextra[n - LenStart];
+
+ if(prevoff < MaxOffCode)
+ n = offcode[prevoff];
+ else
+ n = bigoffcode[prevoff >> 7];
+ offcount[n]++;
+ excost += offextra[n];
+
+ runlen = prevlen - 1;
+ prevlen = MinMatch - 1;
+ nmatches++;
+ }else if(runlen == MinMatch - 1){
+ /*
+ * no match; just put out the literal
+ */
+ if(++v == MaxLitRun){
+ *parse++ = v;
+ v = 0;
+ }
+ litlencount[*p]++;
+ nlits++;
+ runlen = 1;
+ }else{
+ if(prevlen != MinMatch - 1){
+ /*
+ * longer match now. output previous literal,
+ * update current match, and try again
+ */
+ if(++v == MaxLitRun){
+ *parse++ = v;
+ v = 0;
+ }
+ litlencount[p[-1]]++;
+ nlits++;
+ }
+
+ prevoff = m;
+
+ if(runlen < maxdefer){
+ prevlen = runlen;
+ runlen = 1;
+ }else{
+ n = runlen - MinMatch;
+ if(v || n){
+ *parse++ = v | LenFlag | (n << LenShift);
+ *parse++ = prevoff;
+ }else
+ *parse++ = prevoff << LenShift;
+ v = 0;
+
+ n = lencode[n];
+ litlencount[n]++;
+ excost += litlenextra[n - LenStart];
+
+ if(prevoff < MaxOffCode)
+ n = offcode[prevoff];
+ else
+ n = bigoffcode[prevoff >> 7];
+ offcount[n]++;
+ excost += offextra[n];
+
+ prevlen = MinMatch - 1;
+ nmatches++;
+ }
+ }
+
+ /*
+ * update the hash for the newly matched data
+ * this is constructed so the link for the old
+ * match in this position must be at the end of a chain,
+ * and will expire when this match is added, ie it will
+ * never be examined by the match loop.
+ * add to the hash chain only if we have the real hash data.
+ */
+ for(q = p + runlen; p != q; p++){
+ if(p + MinMatch <= ep){
+ h = hashit(cont);
+ nexts[now & (MaxOff-1)] = hash[h];
+ hash[h] = now;
+ if(p + MinMatch < ep)
+ cont = (cont << 8) | p[MinMatch];
+ }
+ now++;
+ }
+ }
+
+ /*
+ * we can just store away the lazy state and
+ * pick it up next time. the last block will have finish set
+ * so we won't have any pending matches
+ * however, we need to correct for how much we've encoded
+ */
+ if(prevlen != MinMatch - 1)
+ p--;
+
+ lzb->excost += excost;
+ lzb->eparse = parse;
+ lzb->lastv = v;
+
+ lz->now = now;
+ lz->prevlen = prevlen;
+ lz->prevoff = prevoff;
+
+ return p - &lz->hist[lz->pos];
+}
+
+/*
+ * make up the dynamic code tables, and return the number of bits
+ * needed to transmit them.
+ */
+static int
+huffcodes(Dyncode *dc, Huff *littab, Huff *offtab)
+{
+ Huff *codetab;
+ uchar *codes, *codeaux;
+ ulong codecount[Nclen], excost;
+ int i, n, m, v, c, nlit, noff, ncode, nclen;
+
+ codetab = dc->codetab;
+ codes = dc->codes;
+ codeaux = dc->codeaux;
+
+ /*
+ * trim the sizes of the tables
+ */
+ for(nlit = Nlitlen; nlit > 257 && littab[nlit-1].bits == 0; nlit--)
+ ;
+ for(noff = Noff; noff > 1 && offtab[noff-1].bits == 0; noff--)
+ ;
+
+ /*
+ * make the code-length code
+ */
+ for(i = 0; i < nlit; i++)
+ codes[i] = littab[i].bits;
+ for(i = 0; i < noff; i++)
+ codes[i + nlit] = offtab[i].bits;
+
+ /*
+ * run-length compress the code-length code
+ */
+ excost = 0;
+ c = 0;
+ ncode = nlit+noff;
+ for(i = 0; i < ncode; ){
+ n = i + 1;
+ v = codes[i];
+ while(n < ncode && v == codes[n])
+ n++;
+ n -= i;
+ i += n;
+ if(v == 0){
+ while(n >= 11){
+ m = n;
+ if(m > 138)
+ m = 138;
+ codes[c] = 18;
+ codeaux[c++] = m - 11;
+ n -= m;
+ excost += 7;
+ }
+ if(n >= 3){
+ codes[c] = 17;
+ codeaux[c++] = n - 3;
+ n = 0;
+ excost += 3;
+ }
+ }
+ while(n--){
+ codes[c++] = v;
+ while(n >= 3){
+ m = n;
+ if(m > 6)
+ m = 6;
+ codes[c] = 16;
+ codeaux[c++] = m - 3;
+ n -= m;
+ excost += 3;
+ }
+ }
+ }
+
+ memset(codecount, 0, sizeof codecount);
+ for(i = 0; i < c; i++)
+ codecount[codes[i]]++;
+ if(!mkgzprecode(codetab, codecount, Nclen, 8))
+ return -1;
+
+ for(nclen = Nclen; nclen > 4 && codetab[clenorder[nclen-1]].bits == 0; nclen--)
+ ;
+
+ dc->nlit = nlit;
+ dc->noff = noff;
+ dc->nclen = nclen;
+ dc->ncode = c;
+
+ return 5 + 5 + 4 + nclen * 3 + bitcost(codetab, codecount, Nclen) + excost;
+}
+
+static void
+wrdyncode(LZstate *out, Dyncode *dc)
+{
+ Huff *codetab;
+ uchar *codes, *codeaux;
+ int i, v, c;
+
+ /*
+ * write out header, then code length code lengths,
+ * and code lengths
+ */
+ lzput(out, dc->nlit-257, 5);
+ lzput(out, dc->noff-1, 5);
+ lzput(out, dc->nclen-4, 4);
+
+ codetab = dc->codetab;
+ for(i = 0; i < dc->nclen; i++)
+ lzput(out, codetab[clenorder[i]].bits, 3);
+
+ codes = dc->codes;
+ codeaux = dc->codeaux;
+ c = dc->ncode;
+ for(i = 0; i < c; i++){
+ v = codes[i];
+ lzput(out, codetab[v].encode, codetab[v].bits);
+ if(v >= 16){
+ if(v == 16)
+ lzput(out, codeaux[i], 2);
+ else if(v == 17)
+ lzput(out, codeaux[i], 3);
+ else /* v == 18 */
+ lzput(out, codeaux[i], 7);
+ }
+ }
+}
+
+static int
+bitcost(Huff *tab, ulong *count, int n)
+{
+ ulong tot;
+ int i;
+
+ tot = 0;
+ for(i = 0; i < n; i++)
+ tot += count[i] * tab[i].bits;
+ return tot;
+}
+
+static int
+mkgzprecode(Huff *tab, ulong *count, int n, int maxbits)
+{
+ ulong bitcount[MaxHuffBits];
+ int i, nbits;
+
+ nbits = mkprecode(tab, count, n, maxbits, bitcount);
+ for(i = 0; i < n; i++){
+ if(tab[i].bits == -1)
+ tab[i].bits = 0;
+ else if(tab[i].bits == 0){
+ if(nbits != 0 || bitcount[0] != 1)
+ return 0;
+ bitcount[1] = 1;
+ bitcount[0] = 0;
+ nbits = 1;
+ tab[i].bits = 1;
+ }
+ }
+ if(bitcount[0] != 0)
+ return 0;
+ return hufftabinit(tab, n, bitcount, nbits);
+}
+
+static int
+hufftabinit(Huff *tab, int n, ulong *bitcount, int nbits)
+{
+ ulong code, nc[MaxHuffBits];
+ int i, bits;
+
+ code = 0;
+ for(bits = 1; bits <= nbits; bits++){
+ code = (code + bitcount[bits-1]) << 1;
+ nc[bits] = code;
+ }
+
+ for(i = 0; i < n; i++){
+ bits = tab[i].bits;
+ if(bits){
+ code = nc[bits]++ << (16 - bits);
+ if(code & ~0xffff)
+ return 0;
+ tab[i].encode = revtab[code >> 8] | (revtab[code & 0xff] << 8);
+ }
+ }
+ return 1;
+}
+
+
+/*
+ * this should be in a library
+ */
+struct Chain
+{
+ ulong count; /* occurances of everything in the chain */
+ ushort leaf; /* leaves to the left of chain, or leaf value */
+ char col; /* ref count for collecting unused chains */
+ char gen; /* need to generate chains for next lower level */
+ Chain *up; /* Chain up in the lists */
+};
+
+struct Chains
+{
+ Chain *lists[(MaxHuffBits - 1) * 2];
+ ulong leafcount[MaxLeaf]; /* sorted list of leaf counts */
+ ushort leafmap[MaxLeaf]; /* map to actual leaf number */
+ int nleaf; /* number of leaves */
+ Chain chains[ChainMem];
+ Chain *echains;
+ Chain *free;
+ char col;
+ int nlists;
+};
+
+/*
+ * fast, low space overhead algorithm for max depth huffman type codes
+ *
+ * J. Katajainen, A. Moffat and A. Turpin, "A fast and space-economical
+ * algorithm for length-limited coding," Proc. Intl. Symp. on Algorithms
+ * and Computation, Cairns, Australia, Dec. 1995, Lecture Notes in Computer
+ * Science, Vol 1004, J. Staples, P. Eades, N. Katoh, and A. Moffat, eds.,
+ * pp 12-21, Springer Verlag, New York, 1995.
+ */
+static int
+mkprecode(Huff *tab, ulong *count, int n, int maxbits, ulong *bitcount)
+{
+ Chains cs;
+ Chain *c;
+ int i, m, em, bits;
+
+ /*
+ * set up the sorted list of leaves
+ */
+ m = 0;
+ for(i = 0; i < n; i++){
+ tab[i].bits = -1;
+ tab[i].encode = 0;
+ if(count[i] != 0){
+ cs.leafcount[m] = count[i];
+ cs.leafmap[m] = i;
+ m++;
+ }
+ }
+ if(m < 2){
+ if(m != 0){
+ tab[cs.leafmap[0]].bits = 0;
+ bitcount[0] = 1;
+ }else
+ bitcount[0] = 0;
+ return 0;
+ }
+ cs.nleaf = m;
+ leafsort(cs.leafcount, cs.leafmap, 0, m);
+
+ for(i = 0; i < m; i++)
+ cs.leafcount[i] = count[cs.leafmap[i]];
+
+ /*
+ * set up free list
+ */
+ cs.free = &cs.chains[2];
+ cs.echains = &cs.chains[ChainMem];
+ cs.col = 1;
+
+ /*
+ * initialize chains for each list
+ */
+ c = &cs.chains[0];
+ c->count = cs.leafcount[0];
+ c->leaf = 1;
+ c->col = cs.col;
+ c->up = nil;
+ c->gen = 0;
+ cs.chains[1] = cs.chains[0];
+ cs.chains[1].leaf = 2;
+ cs.chains[1].count = cs.leafcount[1];
+ for(i = 0; i < maxbits-1; i++){
+ cs.lists[i * 2] = &cs.chains[0];
+ cs.lists[i * 2 + 1] = &cs.chains[1];
+ }
+
+ cs.nlists = 2 * (maxbits - 1);
+ m = 2 * m - 2;
+ for(i = 2; i < m; i++)
+ nextchain(&cs, cs.nlists - 2);
+
+ bits = 0;
+ bitcount[0] = cs.nleaf;
+ for(c = cs.lists[cs.nlists - 1]; c != nil; c = c->up){
+ m = c->leaf;
+ bitcount[bits++] -= m;
+ bitcount[bits] = m;
+ }
+ m = 0;
+ for(i = bits; i >= 0; i--)
+ for(em = m + bitcount[i]; m < em; m++)
+ tab[cs.leafmap[m]].bits = i;
+
+ return bits;
+}
+
+/*
+ * calculate the next chain on the list
+ * we can always toss out the old chain
+ */
+static void
+nextchain(Chains *cs, int list)
+{
+ Chain *c, *oc;
+ int i, nleaf, sumc;
+
+ oc = cs->lists[list + 1];
+ cs->lists[list] = oc;
+ if(oc == nil)
+ return;
+
+ /*
+ * make sure we have all chains needed to make sumc
+ * note it is possible to generate only one of these,
+ * use twice that value for sumc, and then generate
+ * the second if that preliminary sumc would be chosen.
+ * however, this appears to be slower on current tests
+ */
+ if(oc->gen){
+ nextchain(cs, list - 2);
+ nextchain(cs, list - 2);
+ oc->gen = 0;
+ }
+
+ /*
+ * pick up the chain we're going to add;
+ * collect unused chains no free ones are left
+ */
+ for(c = cs->free; ; c++){
+ if(c >= cs->echains){
+ cs->col++;
+ for(i = 0; i < cs->nlists; i++)
+ for(c = cs->lists[i]; c != nil; c = c->up)
+ c->col = cs->col;
+ c = cs->chains;
+ }
+ if(c->col != cs->col)
+ break;
+ }
+
+ /*
+ * pick the cheapest of
+ * 1) the next package from the previous list
+ * 2) the next leaf
+ */
+ nleaf = oc->leaf;
+ sumc = 0;
+ if(list > 0 && cs->lists[list-1] != nil)
+ sumc = cs->lists[list-2]->count + cs->lists[list-1]->count;
+ if(sumc != 0 && (nleaf >= cs->nleaf || cs->leafcount[nleaf] > sumc)){
+ c->count = sumc;
+ c->leaf = oc->leaf;
+ c->up = cs->lists[list-1];
+ c->gen = 1;
+ }else if(nleaf >= cs->nleaf){
+ cs->lists[list + 1] = nil;
+ return;
+ }else{
+ c->leaf = nleaf + 1;
+ c->count = cs->leafcount[nleaf];
+ c->up = oc->up;
+ c->gen = 0;
+ }
+ cs->free = c + 1;
+
+ cs->lists[list + 1] = c;
+ c->col = cs->col;
+}
+
+static int
+pivot(ulong *c, int a, int n)
+{
+ int j, pi, pj, pk;
+
+ j = n/6;
+ pi = a + j; /* 1/6 */
+ j += j;
+ pj = pi + j; /* 1/2 */
+ pk = pj + j; /* 5/6 */
+ if(c[pi] < c[pj]){
+ if(c[pi] < c[pk]){
+ if(c[pj] < c[pk])
+ return pj;
+ return pk;
+ }
+ return pi;
+ }
+ if(c[pj] < c[pk]){
+ if(c[pi] < c[pk])
+ return pi;
+ return pk;
+ }
+ return pj;
+}
+
+static void
+leafsort(ulong *leafcount, ushort *leafmap, int a, int n)
+{
+ ulong t;
+ int j, pi, pj, pn;
+
+ while(n > 1){
+ if(n > 10){
+ pi = pivot(leafcount, a, n);
+ }else
+ pi = a + (n>>1);
+
+ t = leafcount[pi];
+ leafcount[pi] = leafcount[a];
+ leafcount[a] = t;
+ t = leafmap[pi];
+ leafmap[pi] = leafmap[a];
+ leafmap[a] = t;
+ pi = a;
+ pn = a + n;
+ pj = pn;
+ for(;;){
+ do
+ pi++;
+ while(pi < pn && (leafcount[pi] < leafcount[a] || leafcount[pi] == leafcount[a] && leafmap[pi] > leafmap[a]));
+ do
+ pj--;
+ while(pj > a && (leafcount[pj] > leafcount[a] || leafcount[pj] == leafcount[a] && leafmap[pj] < leafmap[a]));
+ if(pj < pi)
+ break;
+ t = leafcount[pi];
+ leafcount[pi] = leafcount[pj];
+ leafcount[pj] = t;
+ t = leafmap[pi];
+ leafmap[pi] = leafmap[pj];
+ leafmap[pj] = t;
+ }
+ t = leafcount[a];
+ leafcount[a] = leafcount[pj];
+ leafcount[pj] = t;
+ t = leafmap[a];
+ leafmap[a] = leafmap[pj];
+ leafmap[pj] = t;
+ j = pj - a;
+
+ n = n-j-1;
+ if(j >= n){
+ leafsort(leafcount, leafmap, a, j);
+ a += j+1;
+ }else{
+ leafsort(leafcount, leafmap, a + (j+1), n);
+ n = j;
+ }
+ }
+}
diff --git a/src/libflate/deflateblock.c b/src/libflate/deflateblock.c
new file mode 100644
index 00000000..b16c3f3b
--- /dev/null
+++ b/src/libflate/deflateblock.c
@@ -0,0 +1,56 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+typedef struct Block Block;
+
+struct Block
+{
+ uchar *pos;
+ uchar *limit;
+};
+
+static int
+blread(void *vb, void *buf, int n)
+{
+ Block *b;
+
+ b = vb;
+ if(n > b->limit - b->pos)
+ n = b->limit - b->pos;
+ memmove(buf, b->pos, n);
+ b->pos += n;
+ return n;
+}
+
+static int
+blwrite(void *vb, void *buf, int n)
+{
+ Block *b;
+
+ b = vb;
+
+ if(n > b->limit - b->pos)
+ n = b->limit - b->pos;
+ memmove(b->pos, buf, n);
+ b->pos += n;
+ return n;
+}
+
+int
+deflateblock(uchar *dst, int dsize, uchar *src, int ssize, int level, int debug)
+{
+ Block bd, bs;
+ int ok;
+
+ bs.pos = src;
+ bs.limit = src + ssize;
+
+ bd.pos = dst;
+ bd.limit = dst + dsize;
+
+ ok = deflate(&bd, blwrite, &bs, blread, level, debug);
+ if(ok != FlateOk)
+ return ok;
+ return bd.pos - dst;
+}
diff --git a/src/libflate/deflatezlib.c b/src/libflate/deflatezlib.c
new file mode 100644
index 00000000..037c355a
--- /dev/null
+++ b/src/libflate/deflatezlib.c
@@ -0,0 +1,60 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+#include "zlib.h"
+
+typedef struct ZRead ZRead;
+
+struct ZRead
+{
+ ulong adler;
+ void *rr;
+ int (*r)(void*, void*, int);
+};
+
+static int
+zlread(void *vzr, void *buf, int n)
+{
+ ZRead *zr;
+
+ zr = vzr;
+ n = (*zr->r)(zr->rr, buf, n);
+ if(n <= 0)
+ return n;
+ zr->adler = adler32(zr->adler, buf, n);
+ return n;
+}
+
+int
+deflatezlib(void *wr, int (*w)(void*, void*, int), void *rr, int (*r)(void*, void*, int), int level, int debug)
+{
+ ZRead zr;
+ uchar buf[4];
+ int ok;
+
+ buf[0] = ZlibDeflate | ZlibWin32k;
+
+ /* bogus zlib encoding of compression level */
+ buf[1] = ((level > 2) + (level > 5) + (level > 8)) << 6;
+
+ /* header check field */
+ buf[1] |= 31 - ((buf[0] << 8) | buf[1]) % 31;
+ if((*w)(wr, buf, 2) != 2)
+ return FlateOutputFail;
+
+ zr.rr = rr;
+ zr.r = r;
+ zr.adler = 1;
+ ok = deflate(wr, w, &zr, zlread, level, debug);
+ if(ok != FlateOk)
+ return ok;
+
+ buf[0] = zr.adler >> 24;
+ buf[1] = zr.adler >> 16;
+ buf[2] = zr.adler >> 8;
+ buf[3] = zr.adler;
+ if((*w)(wr, buf, 4) != 4)
+ return FlateOutputFail;
+
+ return FlateOk;
+}
diff --git a/src/libflate/deflatezlibblock.c b/src/libflate/deflatezlibblock.c
new file mode 100644
index 00000000..1c030dc3
--- /dev/null
+++ b/src/libflate/deflatezlibblock.c
@@ -0,0 +1,34 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+#include "zlib.h"
+
+int
+deflatezlibblock(uchar *dst, int dsize, uchar *src, int ssize, int level, int debug)
+{
+ ulong adler;
+ int n;
+
+ if(dsize < 6)
+ return FlateOutputFail;
+
+ n = deflateblock(dst + 2, dsize - 6, src, ssize, level, debug);
+ if(n < 0)
+ return n;
+
+ dst[0] = ZlibDeflate | ZlibWin32k;
+
+ /* bogus zlib encoding of compression level */
+ dst[1] = ((level > 2) + (level > 5) + (level > 8)) << 6;
+
+ /* header check field */
+ dst[1] |= 31 - ((dst[0] << 8) | dst[1]) % 31;
+
+ adler = adler32(1, src, ssize);
+ dst[n + 2] = adler >> 24;
+ dst[n + 3] = adler >> 16;
+ dst[n + 4] = adler >> 8;
+ dst[n + 5] = adler;
+
+ return n + 6;
+}
diff --git a/src/libflate/flateerr.c b/src/libflate/flateerr.c
new file mode 100644
index 00000000..e442fd5b
--- /dev/null
+++ b/src/libflate/flateerr.c
@@ -0,0 +1,23 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+char *
+flateerr(int err)
+{
+ switch(err){
+ case FlateOk:
+ return "no error";
+ case FlateNoMem:
+ return "out of memory";
+ case FlateInputFail:
+ return "input error";
+ case FlateOutputFail:
+ return "output error";
+ case FlateCorrupted:
+ return "corrupted data";
+ case FlateInternal:
+ return "internal error";
+ }
+ return "unknown error";
+}
diff --git a/src/libflate/inflate.c b/src/libflate/inflate.c
new file mode 100644
index 00000000..f979084c
--- /dev/null
+++ b/src/libflate/inflate.c
@@ -0,0 +1,693 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+enum {
+ HistorySize= 32*1024,
+ BufSize= 4*1024,
+ MaxHuffBits= 17, /* maximum bits in a encoded code */
+ Nlitlen= 288, /* number of litlen codes */
+ Noff= 32, /* number of offset codes */
+ Nclen= 19, /* number of codelen codes */
+ LenShift= 10, /* code = len<<LenShift|code */
+ LitlenBits= 7, /* number of bits in litlen decode table */
+ OffBits= 6, /* number of bits in offset decode table */
+ ClenBits= 6, /* number of bits in code len decode table */
+ MaxFlatBits= LitlenBits,
+ MaxLeaf= Nlitlen
+};
+
+typedef struct Input Input;
+typedef struct History History;
+typedef struct Huff Huff;
+
+struct Input
+{
+ int error; /* first error encountered, or FlateOk */
+ void *wr;
+ int (*w)(void*, void*, int);
+ void *getr;
+ int (*get)(void*);
+ ulong sreg;
+ int nbits;
+};
+
+struct History
+{
+ uchar his[HistorySize];
+ uchar *cp; /* current pointer in history */
+ int full; /* his has been filled up at least once */
+};
+
+struct Huff
+{
+ int maxbits; /* max bits for any code */
+ int minbits; /* min bits to get before looking in flat */
+ int flatmask; /* bits used in "flat" fast decoding table */
+ ulong flat[1<<MaxFlatBits];
+ ulong maxcode[MaxHuffBits];
+ ulong last[MaxHuffBits];
+ ulong decode[MaxLeaf];
+};
+
+/* litlen code words 257-285 extra bits */
+static int litlenextra[Nlitlen-257] =
+{
+/* 257 */ 0, 0, 0,
+/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+/* 280 */ 4, 5, 5, 5, 5, 0, 0, 0
+};
+
+static int litlenbase[Nlitlen-257];
+
+/* offset code word extra bits */
+static int offextra[Noff] =
+{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ 0, 0,
+};
+static int offbase[Noff];
+
+/* order code lengths */
+static int clenorder[Nclen] =
+{
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
+};
+
+/* for static huffman tables */
+static Huff litlentab;
+static Huff offtab;
+static uchar revtab[256];
+
+static int uncblock(Input *in, History*);
+static int fixedblock(Input *in, History*);
+static int dynamicblock(Input *in, History*);
+static int sregfill(Input *in, int n);
+static int sregunget(Input *in);
+static int decode(Input*, History*, Huff*, Huff*);
+static int hufftab(Huff*, char*, int, int);
+static int hdecsym(Input *in, Huff *h, int b);
+
+int
+inflateinit(void)
+{
+ char *len;
+ int i, j, base;
+
+ /* byte reverse table */
+ for(i=0; i<256; i++)
+ for(j=0; j<8; j++)
+ if(i & (1<<j))
+ revtab[i] |= 0x80 >> j;
+
+ for(i=257,base=3; i<Nlitlen; i++) {
+ litlenbase[i-257] = base;
+ base += 1<<litlenextra[i-257];
+ }
+ /* strange table entry in spec... */
+ litlenbase[285-257]--;
+
+ for(i=0,base=1; i<Noff; i++) {
+ offbase[i] = base;
+ base += 1<<offextra[i];
+ }
+
+ len = malloc(MaxLeaf);
+ if(len == nil)
+ return FlateNoMem;
+
+ /* static Litlen bit lengths */
+ for(i=0; i<144; i++)
+ len[i] = 8;
+ for(i=144; i<256; i++)
+ len[i] = 9;
+ for(i=256; i<280; i++)
+ len[i] = 7;
+ for(i=280; i<Nlitlen; i++)
+ len[i] = 8;
+
+ if(!hufftab(&litlentab, len, Nlitlen, MaxFlatBits))
+ return FlateInternal;
+
+ /* static Offset bit lengths */
+ for(i=0; i<Noff; i++)
+ len[i] = 5;
+
+ if(!hufftab(&offtab, len, Noff, MaxFlatBits))
+ return FlateInternal;
+ free(len);
+
+ return FlateOk;
+}
+
+int
+inflate(void *wr, int (*w)(void*, void*, int), void *getr, int (*get)(void*))
+{
+ History *his;
+ Input in;
+ int final, type;
+
+ his = malloc(sizeof(History));
+ if(his == nil)
+ return FlateNoMem;
+ his->cp = his->his;
+ his->full = 0;
+ in.getr = getr;
+ in.get = get;
+ in.wr = wr;
+ in.w = w;
+ in.nbits = 0;
+ in.sreg = 0;
+ in.error = FlateOk;
+
+ do {
+ if(!sregfill(&in, 3))
+ goto bad;
+ final = in.sreg & 0x1;
+ type = (in.sreg>>1) & 0x3;
+ in.sreg >>= 3;
+ in.nbits -= 3;
+ switch(type) {
+ default:
+ in.error = FlateCorrupted;
+ goto bad;
+ case 0:
+ /* uncompressed */
+ if(!uncblock(&in, his))
+ goto bad;
+ break;
+ case 1:
+ /* fixed huffman */
+ if(!fixedblock(&in, his))
+ goto bad;
+ break;
+ case 2:
+ /* dynamic huffman */
+ if(!dynamicblock(&in, his))
+ goto bad;
+ break;
+ }
+ } while(!final);
+
+ if(his->cp != his->his && (*w)(wr, his->his, his->cp - his->his) != his->cp - his->his) {
+ in.error = FlateOutputFail;
+ goto bad;
+ }
+
+ if(!sregunget(&in))
+ goto bad;
+
+ free(his);
+ if(in.error != FlateOk)
+ return FlateInternal;
+ return FlateOk;
+
+bad:
+ free(his);
+ if(in.error == FlateOk)
+ return FlateInternal;
+ return in.error;
+}
+
+static int
+uncblock(Input *in, History *his)
+{
+ int len, nlen, c;
+ uchar *hs, *hp, *he;
+
+ if(!sregunget(in))
+ return 0;
+ len = (*in->get)(in->getr);
+ len |= (*in->get)(in->getr)<<8;
+ nlen = (*in->get)(in->getr);
+ nlen |= (*in->get)(in->getr)<<8;
+ if(len != (~nlen&0xffff)) {
+ in->error = FlateCorrupted;
+ return 0;
+ }
+
+ hp = his->cp;
+ hs = his->his;
+ he = hs + HistorySize;
+
+ while(len > 0) {
+ c = (*in->get)(in->getr);
+ if(c < 0)
+ return 0;
+ *hp++ = c;
+ if(hp == he) {
+ his->full = 1;
+ if((*in->w)(in->wr, hs, HistorySize) != HistorySize) {
+ in->error = FlateOutputFail;
+ return 0;
+ }
+ hp = hs;
+ }
+ len--;
+ }
+
+ his->cp = hp;
+
+ return 1;
+}
+
+static int
+fixedblock(Input *in, History *his)
+{
+ return decode(in, his, &litlentab, &offtab);
+}
+
+static int
+dynamicblock(Input *in, History *his)
+{
+ Huff *lentab, *offtab;
+ char *len;
+ int i, j, n, c, nlit, ndist, nclen, res, nb;
+
+ if(!sregfill(in, 14))
+ return 0;
+ nlit = (in->sreg&0x1f) + 257;
+ ndist = ((in->sreg>>5) & 0x1f) + 1;
+ nclen = ((in->sreg>>10) & 0xf) + 4;
+ in->sreg >>= 14;
+ in->nbits -= 14;
+
+ if(nlit > Nlitlen || ndist > Noff || nlit < 257) {
+ in->error = FlateCorrupted;
+ return 0;
+ }
+
+ /* huff table header */
+ len = malloc(Nlitlen+Noff);
+ lentab = malloc(sizeof(Huff));
+ offtab = malloc(sizeof(Huff));
+ if(len == nil || lentab == nil || offtab == nil){
+ in->error = FlateNoMem;
+ goto bad;
+ }
+ for(i=0; i < Nclen; i++)
+ len[i] = 0;
+ for(i=0; i<nclen; i++) {
+ if(!sregfill(in, 3))
+ goto bad;
+ len[clenorder[i]] = in->sreg & 0x7;
+ in->sreg >>= 3;
+ in->nbits -= 3;
+ }
+
+ if(!hufftab(lentab, len, Nclen, ClenBits)){
+ in->error = FlateCorrupted;
+ goto bad;
+ }
+
+ n = nlit+ndist;
+ for(i=0; i<n;) {
+ nb = lentab->minbits;
+ for(;;){
+ if(in->nbits<nb && !sregfill(in, nb))
+ goto bad;
+ c = lentab->flat[in->sreg & lentab->flatmask];
+ nb = c & 0xff;
+ if(nb > in->nbits){
+ if(nb != 0xff)
+ continue;
+ c = hdecsym(in, lentab, c);
+ if(c < 0)
+ goto bad;
+ }else{
+ c >>= 8;
+ in->sreg >>= nb;
+ in->nbits -= nb;
+ }
+ break;
+ }
+
+ if(c < 16) {
+ j = 1;
+ } else if(c == 16) {
+ if(in->nbits<2 && !sregfill(in, 2))
+ goto bad;
+ j = (in->sreg&0x3)+3;
+ in->sreg >>= 2;
+ in->nbits -= 2;
+ if(i == 0) {
+ in->error = FlateCorrupted;
+ goto bad;
+ }
+ c = len[i-1];
+ } else if(c == 17) {
+ if(in->nbits<3 && !sregfill(in, 3))
+ goto bad;
+ j = (in->sreg&0x7)+3;
+ in->sreg >>= 3;
+ in->nbits -= 3;
+ c = 0;
+ } else if(c == 18) {
+ if(in->nbits<7 && !sregfill(in, 7))
+ goto bad;
+ j = (in->sreg&0x7f)+11;
+ in->sreg >>= 7;
+ in->nbits -= 7;
+ c = 0;
+ } else {
+ in->error = FlateCorrupted;
+ goto bad;
+ }
+
+ if(i+j > n) {
+ in->error = FlateCorrupted;
+ goto bad;
+ }
+
+ while(j) {
+ len[i] = c;
+ i++;
+ j--;
+ }
+ }
+
+ if(!hufftab(lentab, len, nlit, LitlenBits)
+ || !hufftab(offtab, &len[nlit], ndist, OffBits)){
+ in->error = FlateCorrupted;
+ goto bad;
+ }
+
+ res = decode(in, his, lentab, offtab);
+
+ free(len);
+ free(lentab);
+ free(offtab);
+
+ return res;
+
+bad:
+ free(len);
+ free(lentab);
+ free(offtab);
+ return 0;
+}
+
+static int
+decode(Input *in, History *his, Huff *litlentab, Huff *offtab)
+{
+ int len, off;
+ uchar *hs, *hp, *hq, *he;
+ int c;
+ int nb;
+
+ hs = his->his;
+ he = hs + HistorySize;
+ hp = his->cp;
+
+ for(;;) {
+ nb = litlentab->minbits;
+ for(;;){
+ if(in->nbits<nb && !sregfill(in, nb))
+ return 0;
+ c = litlentab->flat[in->sreg & litlentab->flatmask];
+ nb = c & 0xff;
+ if(nb > in->nbits){
+ if(nb != 0xff)
+ continue;
+ c = hdecsym(in, litlentab, c);
+ if(c < 0)
+ return 0;
+ }else{
+ c >>= 8;
+ in->sreg >>= nb;
+ in->nbits -= nb;
+ }
+ break;
+ }
+
+ if(c < 256) {
+ /* literal */
+ *hp++ = c;
+ if(hp == he) {
+ his->full = 1;
+ if((*in->w)(in->wr, hs, HistorySize) != HistorySize) {
+ in->error = FlateOutputFail;
+ return 0;
+ }
+ hp = hs;
+ }
+ continue;
+ }
+
+ if(c == 256)
+ break;
+
+ if(c > 285) {
+ in->error = FlateCorrupted;
+ return 0;
+ }
+
+ c -= 257;
+ nb = litlenextra[c];
+ if(in->nbits < nb && !sregfill(in, nb))
+ return 0;
+ len = litlenbase[c] + (in->sreg & ((1<<nb)-1));
+ in->sreg >>= nb;
+ in->nbits -= nb;
+
+ /* get offset */
+ nb = offtab->minbits;
+ for(;;){
+ if(in->nbits<nb && !sregfill(in, nb))
+ return 0;
+ c = offtab->flat[in->sreg & offtab->flatmask];
+ nb = c & 0xff;
+ if(nb > in->nbits){
+ if(nb != 0xff)
+ continue;
+ c = hdecsym(in, offtab, c);
+ if(c < 0)
+ return 0;
+ }else{
+ c >>= 8;
+ in->sreg >>= nb;
+ in->nbits -= nb;
+ }
+ break;
+ }
+
+ if(c > 29) {
+ in->error = FlateCorrupted;
+ return 0;
+ }
+
+ nb = offextra[c];
+ if(in->nbits < nb && !sregfill(in, nb))
+ return 0;
+
+ off = offbase[c] + (in->sreg & ((1<<nb)-1));
+ in->sreg >>= nb;
+ in->nbits -= nb;
+
+ hq = hp - off;
+ if(hq < hs) {
+ if(!his->full) {
+ in->error = FlateCorrupted;
+ return 0;
+ }
+ hq += HistorySize;
+ }
+
+ /* slow but correct */
+ while(len) {
+ *hp = *hq;
+ hq++;
+ hp++;
+ if(hq >= he)
+ hq = hs;
+ if(hp == he) {
+ his->full = 1;
+ if((*in->w)(in->wr, hs, HistorySize) != HistorySize) {
+ in->error = FlateOutputFail;
+ return 0;
+ }
+ hp = hs;
+ }
+ len--;
+ }
+
+ }
+
+ his->cp = hp;
+
+ return 1;
+}
+
+static int
+revcode(int c, int b)
+{
+ /* shift encode up so it starts on bit 15 then reverse */
+ c <<= (16-b);
+ c = revtab[c>>8] | (revtab[c&0xff]<<8);
+ return c;
+}
+
+/*
+ * construct the huffman decoding arrays and a fast lookup table.
+ * the fast lookup is a table indexed by the next flatbits bits,
+ * which returns the symbol matched and the number of bits consumed,
+ * or the minimum number of bits needed and 0xff if more than flatbits
+ * bits are needed.
+ *
+ * flatbits can be longer than the smallest huffman code,
+ * because shorter codes are assigned smaller lexical prefixes.
+ * this means assuming zeros for the next few bits will give a
+ * conservative answer, in the sense that it will either give the
+ * correct answer, or return the minimum number of bits which
+ * are needed for an answer.
+ */
+static int
+hufftab(Huff *h, char *hb, int maxleaf, int flatbits)
+{
+ ulong bitcount[MaxHuffBits];
+ ulong c, fc, ec, mincode, code, nc[MaxHuffBits];
+ int i, b, minbits, maxbits;
+
+ for(i = 0; i < MaxHuffBits; i++)
+ bitcount[i] = 0;
+ maxbits = -1;
+ minbits = MaxHuffBits + 1;
+ for(i=0; i < maxleaf; i++){
+ b = hb[i];
+ if(b){
+ bitcount[b]++;
+ if(b < minbits)
+ minbits = b;
+ if(b > maxbits)
+ maxbits = b;
+ }
+ }
+
+ h->maxbits = maxbits;
+ if(maxbits <= 0){
+ h->maxbits = 0;
+ h->minbits = 0;
+ h->flatmask = 0;
+ return 1;
+ }
+ code = 0;
+ c = 0;
+ for(b = 0; b <= maxbits; b++){
+ h->last[b] = c;
+ c += bitcount[b];
+ mincode = code << 1;
+ nc[b] = mincode;
+ code = mincode + bitcount[b];
+ if(code > (1 << b))
+ return 0;
+ h->maxcode[b] = code - 1;
+ h->last[b] += code - 1;
+ }
+
+ if(flatbits > maxbits)
+ flatbits = maxbits;
+ h->flatmask = (1 << flatbits) - 1;
+ if(minbits > flatbits)
+ minbits = flatbits;
+ h->minbits = minbits;
+
+ b = 1 << flatbits;
+ for(i = 0; i < b; i++)
+ h->flat[i] = ~0;
+
+ /*
+ * initialize the flat table to include the minimum possible
+ * bit length for each code prefix
+ */
+ for(b = maxbits; b > flatbits; b--){
+ code = h->maxcode[b];
+ if(code == -1)
+ break;
+ mincode = code + 1 - bitcount[b];
+ mincode >>= b - flatbits;
+ code >>= b - flatbits;
+ for(; mincode <= code; mincode++)
+ h->flat[revcode(mincode, flatbits)] = (b << 8) | 0xff;
+ }
+
+ for(i = 0; i < maxleaf; i++){
+ b = hb[i];
+ if(b <= 0)
+ continue;
+ c = nc[b]++;
+ if(b <= flatbits){
+ code = (i << 8) | b;
+ ec = (c + 1) << (flatbits - b);
+ if(ec > (1<<flatbits))
+ return 0; /* this is actually an internal error */
+ for(fc = c << (flatbits - b); fc < ec; fc++)
+ h->flat[revcode(fc, flatbits)] = code;
+ }
+ if(b > minbits){
+ c = h->last[b] - c;
+ if(c >= maxleaf)
+ return 0;
+ h->decode[c] = i;
+ }
+ }
+ return 1;
+}
+
+static int
+hdecsym(Input *in, Huff *h, int nb)
+{
+ long c;
+
+ if((nb & 0xff) == 0xff)
+ nb = nb >> 8;
+ else
+ nb = nb & 0xff;
+ for(; nb <= h->maxbits; nb++){
+ if(in->nbits<nb && !sregfill(in, nb))
+ return -1;
+ c = revtab[in->sreg&0xff]<<8;
+ c |= revtab[(in->sreg>>8)&0xff];
+ c >>= (16-nb);
+ if(c <= h->maxcode[nb]){
+ in->sreg >>= nb;
+ in->nbits -= nb;
+ return h->decode[h->last[nb] - c];
+ }
+ }
+ in->error = FlateCorrupted;
+ return -1;
+}
+
+static int
+sregfill(Input *in, int n)
+{
+ int c;
+
+ while(n > in->nbits) {
+ c = (*in->get)(in->getr);
+ if(c < 0){
+ in->error = FlateInputFail;
+ return 0;
+ }
+ in->sreg |= c<<in->nbits;
+ in->nbits += 8;
+ }
+ return 1;
+}
+
+static int
+sregunget(Input *in)
+{
+ if(in->nbits >= 8) {
+ in->error = FlateInternal;
+ return 0;
+ }
+
+ /* throw other bits on the floor */
+ in->nbits = 0;
+ in->sreg = 0;
+ return 1;
+}
diff --git a/src/libflate/inflateblock.c b/src/libflate/inflateblock.c
new file mode 100644
index 00000000..77077b62
--- /dev/null
+++ b/src/libflate/inflateblock.c
@@ -0,0 +1,54 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+
+typedef struct Block Block;
+
+struct Block
+{
+ uchar *pos;
+ uchar *limit;
+};
+
+static int
+blgetc(void *vb)
+{
+ Block *b;
+
+ b = vb;
+ if(b->pos >= b->limit)
+ return -1;
+ return *b->pos++;
+}
+
+static int
+blwrite(void *vb, void *buf, int n)
+{
+ Block *b;
+
+ b = vb;
+
+ if(n > b->limit - b->pos)
+ n = b->limit - b->pos;
+ memmove(b->pos, buf, n);
+ b->pos += n;
+ return n;
+}
+
+int
+inflateblock(uchar *dst, int dsize, uchar *src, int ssize)
+{
+ Block bd, bs;
+ int ok;
+
+ bs.pos = src;
+ bs.limit = src + ssize;
+
+ bd.pos = dst;
+ bd.limit = dst + dsize;
+
+ ok = inflate(&bd, blwrite, &bs, blgetc);
+ if(ok != FlateOk)
+ return ok;
+ return bd.pos - dst;
+}
diff --git a/src/libflate/inflatezlib.c b/src/libflate/inflatezlib.c
new file mode 100644
index 00000000..3f31f10a
--- /dev/null
+++ b/src/libflate/inflatezlib.c
@@ -0,0 +1,66 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+#include "zlib.h"
+
+typedef struct ZWrite ZWrite;
+
+struct ZWrite
+{
+ ulong adler;
+ void *wr;
+ int (*w)(void*, void*, int);
+};
+
+static int
+zlwrite(void *vzw, void *buf, int n)
+{
+ ZWrite *zw;
+
+ zw = vzw;
+ zw->adler = adler32(zw->adler, buf, n);
+ n = (*zw->w)(zw->wr, buf, n);
+ if(n <= 0)
+ return n;
+ return n;
+}
+
+int
+inflatezlib(void *wr, int (*w)(void*, void*, int), void *getr, int (*get)(void*))
+{
+ ZWrite zw;
+ ulong v;
+ int c, i;
+
+ c = (*get)(getr);
+ if(c < 0)
+ return FlateInputFail;
+ i = (*get)(getr);
+ if(i < 0)
+ return FlateInputFail;
+
+ if(((c << 8) | i) % 31)
+ return FlateCorrupted;
+ if((c & ZlibMeth) != ZlibDeflate
+ || (c & ZlibCInfo) > ZlibWin32k)
+ return FlateCorrupted;
+
+ zw.wr = wr;
+ zw.w = w;
+ zw.adler = 1;
+ i = inflate(&zw, zlwrite, getr, get);
+ if(i != FlateOk)
+ return i;
+
+ v = 0;
+ for(i = 0; i < 4; i++){
+ c = (*get)(getr);
+ if(c < 0)
+ return FlateInputFail;
+ v = (v << 8) | c;
+ }
+ if(zw.adler != v)
+ return FlateCorrupted;
+
+ return FlateOk;
+}
diff --git a/src/libflate/inflatezlibblock.c b/src/libflate/inflatezlibblock.c
new file mode 100644
index 00000000..477bb4b0
--- /dev/null
+++ b/src/libflate/inflatezlibblock.c
@@ -0,0 +1,68 @@
+#include <u.h>
+#include <libc.h>
+#include <flate.h>
+#include "zlib.h"
+
+typedef struct Block Block;
+
+struct Block
+{
+ uchar *pos;
+ uchar *limit;
+};
+
+static int
+blgetc(void *vb)
+{
+ Block *b;
+
+ b = vb;
+ if(b->pos >= b->limit)
+ return -1;
+ return *b->pos++;
+}
+
+static int
+blwrite(void *vb, void *buf, int n)
+{
+ Block *b;
+
+ b = vb;
+
+ if(n > b->limit - b->pos)
+ n = b->limit - b->pos;
+ memmove(b->pos, buf, n);
+ b->pos += n;
+ return n;
+}
+
+int
+inflatezlibblock(uchar *dst, int dsize, uchar *src, int ssize)
+{
+ Block bd, bs;
+ int ok;
+
+ if(ssize < 6)
+ return FlateInputFail;
+
+ if(((src[0] << 8) | src[1]) % 31)
+ return FlateCorrupted;
+ if((src[0] & ZlibMeth) != ZlibDeflate
+ || (src[0] & ZlibCInfo) > ZlibWin32k)
+ return FlateCorrupted;
+
+ bs.pos = src + 2;
+ bs.limit = src + ssize - 6;
+
+ bd.pos = dst;
+ bd.limit = dst + dsize;
+
+ ok = inflate(&bd, blwrite, &bs, blgetc);
+ if(ok != FlateOk)
+ return ok;
+
+ if(adler32(1, dst, bs.pos - dst) != ((bs.pos[0] << 24) | (bs.pos[1] << 16) | (bs.pos[2] << 8) | bs.pos[3]))
+ return FlateCorrupted;
+
+ return bd.pos - dst;
+}
diff --git a/src/libflate/mkfile b/src/libflate/mkfile
new file mode 100644
index 00000000..074e9d07
--- /dev/null
+++ b/src/libflate/mkfile
@@ -0,0 +1,22 @@
+PLAN9=../..
+<$PLAN9/src/mkhdr
+
+LIB=libflate.a
+OFILES=\
+ deflate.$O\
+ deflatezlib.$O\
+ deflateblock.$O\
+ deflatezlibblock.$O\
+ inflate.$O\
+ inflatezlib.$O\
+ inflateblock.$O\
+ inflatezlibblock.$O\
+ flateerr.$O\
+ crc.$O\
+ adler.$O\
+
+HFILES=\
+ $PLAN9/include/flate.h\
+ zlib.h\
+
+<$PLAN9/src/mksyslib
diff --git a/src/libflate/zlib.h b/src/libflate/zlib.h
new file mode 100644
index 00000000..9669d86a
--- /dev/null
+++ b/src/libflate/zlib.h
@@ -0,0 +1,11 @@
+/*
+ * zlib header fields
+ */
+enum
+{
+ ZlibMeth = 0x0f, /* mask of compression methods */
+ ZlibDeflate = 0x08,
+
+ ZlibCInfo = 0xf0, /* mask of compression aux. info */
+ ZlibWin32k = 0x70, /* 32k history window */
+};