aboutsummaryrefslogtreecommitdiff
path: root/archival/libarchive/bz
diff options
context:
space:
mode:
Diffstat (limited to 'archival/libarchive/bz')
-rw-r--r--archival/libarchive/bz/LICENSE44
-rw-r--r--archival/libarchive/bz/README90
-rw-r--r--archival/libarchive/bz/blocksort.c1072
-rw-r--r--archival/libarchive/bz/bzlib.c431
-rw-r--r--archival/libarchive/bz/bzlib.h65
-rw-r--r--archival/libarchive/bz/bzlib_private.h219
-rw-r--r--archival/libarchive/bz/compress.c685
-rw-r--r--archival/libarchive/bz/huffman.c229
8 files changed, 2835 insertions, 0 deletions
diff --git a/archival/libarchive/bz/LICENSE b/archival/libarchive/bz/LICENSE
new file mode 100644
index 000000000..da4346520
--- /dev/null
+++ b/archival/libarchive/bz/LICENSE
@@ -0,0 +1,44 @@
+bzip2 applet in busybox is based on lightly-modified source
+of bzip2 version 1.0.4. bzip2 source is distributed
+under the following conditions (copied verbatim from LICENSE file)
+===========================================================
+
+
+This program, "bzip2", the associated library "libbzip2", and all
+documentation, are copyright (C) 1996-2006 Julian R Seward. All
+rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Julian Seward, Cambridge, UK.
+jseward@bzip.org
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
diff --git a/archival/libarchive/bz/README b/archival/libarchive/bz/README
new file mode 100644
index 000000000..fffd47b8a
--- /dev/null
+++ b/archival/libarchive/bz/README
@@ -0,0 +1,90 @@
+This file is an abridged version of README from bzip2 1.0.4
+Build instructions (which are not relevant to busyboxed bzip2)
+are removed.
+===========================================================
+
+
+This is the README for bzip2/libzip2.
+This version is fully compatible with the previous public releases.
+
+------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in this file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------
+
+Please read and be aware of the following:
+
+
+WARNING:
+
+ This program and library (attempts to) compress data by
+ performing several non-trivial transformations on it.
+ Unless you are 100% familiar with *all* the algorithms
+ contained herein, and with the consequences of modifying them,
+ you should NOT meddle with the compression or decompression
+ machinery. Incorrect changes can and very likely *will*
+ lead to disastrous loss of data.
+
+
+DISCLAIMER:
+
+ I TAKE NO RESPONSIBILITY FOR ANY LOSS OF DATA ARISING FROM THE
+ USE OF THIS PROGRAM/LIBRARY, HOWSOEVER CAUSED.
+
+ Every compression of a file implies an assumption that the
+ compressed file can be decompressed to reproduce the original.
+ Great efforts in design, coding and testing have been made to
+ ensure that this program works correctly. However, the complexity
+ of the algorithms, and, in particular, the presence of various
+ special cases in the code which occur with very low but non-zero
+ probability make it impossible to rule out the possibility of bugs
+ remaining in the program. DO NOT COMPRESS ANY DATA WITH THIS
+ PROGRAM UNLESS YOU ARE PREPARED TO ACCEPT THE POSSIBILITY, HOWEVER
+ SMALL, THAT THE DATA WILL NOT BE RECOVERABLE.
+
+ That is not to say this program is inherently unreliable.
+ Indeed, I very much hope the opposite is true. bzip2/libbzip2
+ has been carefully constructed and extensively tested.
+
+
+PATENTS:
+
+ To the best of my knowledge, bzip2/libbzip2 does not use any
+ patented algorithms. However, I do not have the resources
+ to carry out a patent search. Therefore I cannot give any
+ guarantee of the above statement.
+
+
+I hope you find bzip2 useful. Feel free to contact me at
+ jseward@bzip.org
+if you have any suggestions or queries. Many people mailed me with
+comments, suggestions and patches after the releases of bzip-0.15,
+bzip-0.21, and bzip2 versions 0.1pl2, 0.9.0, 0.9.5, 1.0.0, 1.0.1,
+1.0.2 and 1.0.3, and the changes in bzip2 are largely a result of this
+feedback. I thank you for your comments.
+
+bzip2's "home" is http://www.bzip.org/
+
+Julian Seward
+jseward@bzip.org
+Cambridge, UK.
+
+18 July 1996 (version 0.15)
+25 August 1996 (version 0.21)
+ 7 August 1997 (bzip2, version 0.1)
+29 August 1997 (bzip2, version 0.1pl2)
+23 August 1998 (bzip2, version 0.9.0)
+ 8 June 1999 (bzip2, version 0.9.5)
+ 4 Sept 1999 (bzip2, version 0.9.5d)
+ 5 May 2000 (bzip2, version 1.0pre8)
+30 December 2001 (bzip2, version 1.0.2pre1)
+15 February 2005 (bzip2, version 1.0.3)
+20 December 2006 (bzip2, version 1.0.4)
diff --git a/archival/libarchive/bz/blocksort.c b/archival/libarchive/bz/blocksort.c
new file mode 100644
index 000000000..f70c3701d
--- /dev/null
+++ b/archival/libarchive/bz/blocksort.c
@@ -0,0 +1,1072 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Block sorting machinery ---*/
+/*--- blocksort.c ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+/* #include "bzlib_private.h" */
+
+#define mswap(zz1, zz2) \
+{ \
+ int32_t zztmp = zz1; \
+ zz1 = zz2; \
+ zz2 = zztmp; \
+}
+
+static
+/* No measurable speed gain with inlining */
+/* ALWAYS_INLINE */
+void mvswap(uint32_t* ptr, int32_t zzp1, int32_t zzp2, int32_t zzn)
+{
+ while (zzn > 0) {
+ mswap(ptr[zzp1], ptr[zzp2]);
+ zzp1++;
+ zzp2++;
+ zzn--;
+ }
+}
+
+static
+ALWAYS_INLINE
+int32_t mmin(int32_t a, int32_t b)
+{
+ return (a < b) ? a : b;
+}
+
+
+/*---------------------------------------------*/
+/*--- Fallback O(N log(N)^2) sorting ---*/
+/*--- algorithm, for repetitive blocks ---*/
+/*---------------------------------------------*/
+
+/*---------------------------------------------*/
+static
+inline
+void fallbackSimpleSort(uint32_t* fmap,
+ uint32_t* eclass,
+ int32_t lo,
+ int32_t hi)
+{
+ int32_t i, j, tmp;
+ uint32_t ec_tmp;
+
+ if (lo == hi) return;
+
+ if (hi - lo > 3) {
+ for (i = hi-4; i >= lo; i--) {
+ tmp = fmap[i];
+ ec_tmp = eclass[tmp];
+ for (j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4)
+ fmap[j-4] = fmap[j];
+ fmap[j-4] = tmp;
+ }
+ }
+
+ for (i = hi-1; i >= lo; i--) {
+ tmp = fmap[i];
+ ec_tmp = eclass[tmp];
+ for (j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++)
+ fmap[j-1] = fmap[j];
+ fmap[j-1] = tmp;
+ }
+}
+
+
+/*---------------------------------------------*/
+#define fpush(lz,hz) { \
+ stackLo[sp] = lz; \
+ stackHi[sp] = hz; \
+ sp++; \
+}
+
+#define fpop(lz,hz) { \
+ sp--; \
+ lz = stackLo[sp]; \
+ hz = stackHi[sp]; \
+}
+
+#define FALLBACK_QSORT_SMALL_THRESH 10
+#define FALLBACK_QSORT_STACK_SIZE 100
+
+static
+void fallbackQSort3(uint32_t* fmap,
+ uint32_t* eclass,
+ int32_t loSt,
+ int32_t hiSt)
+{
+ int32_t unLo, unHi, ltLo, gtHi, n, m;
+ int32_t sp, lo, hi;
+ uint32_t med, r, r3;
+ int32_t stackLo[FALLBACK_QSORT_STACK_SIZE];
+ int32_t stackHi[FALLBACK_QSORT_STACK_SIZE];
+
+ r = 0;
+
+ sp = 0;
+ fpush(loSt, hiSt);
+
+ while (sp > 0) {
+ AssertH(sp < FALLBACK_QSORT_STACK_SIZE - 1, 1004);
+
+ fpop(lo, hi);
+ if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
+ fallbackSimpleSort(fmap, eclass, lo, hi);
+ continue;
+ }
+
+ /* Random partitioning. Median of 3 sometimes fails to
+ * avoid bad cases. Median of 9 seems to help but
+ * looks rather expensive. This too seems to work but
+ * is cheaper. Guidance for the magic constants
+ * 7621 and 32768 is taken from Sedgewick's algorithms
+ * book, chapter 35.
+ */
+ r = ((r * 7621) + 1) % 32768;
+ r3 = r % 3;
+ if (r3 == 0)
+ med = eclass[fmap[lo]];
+ else if (r3 == 1)
+ med = eclass[fmap[(lo+hi)>>1]];
+ else
+ med = eclass[fmap[hi]];
+
+ unLo = ltLo = lo;
+ unHi = gtHi = hi;
+
+ while (1) {
+ while (1) {
+ if (unLo > unHi) break;
+ n = (int32_t)eclass[fmap[unLo]] - (int32_t)med;
+ if (n == 0) {
+ mswap(fmap[unLo], fmap[ltLo]);
+ ltLo++;
+ unLo++;
+ continue;
+ };
+ if (n > 0) break;
+ unLo++;
+ }
+ while (1) {
+ if (unLo > unHi) break;
+ n = (int32_t)eclass[fmap[unHi]] - (int32_t)med;
+ if (n == 0) {
+ mswap(fmap[unHi], fmap[gtHi]);
+ gtHi--; unHi--;
+ continue;
+ };
+ if (n < 0) break;
+ unHi--;
+ }
+ if (unLo > unHi) break;
+ mswap(fmap[unLo], fmap[unHi]); unLo++; unHi--;
+ }
+
+ AssertD(unHi == unLo-1, "fallbackQSort3(2)");
+
+ if (gtHi < ltLo) continue;
+
+ n = mmin(ltLo-lo, unLo-ltLo); mvswap(fmap, lo, unLo-n, n);
+ m = mmin(hi-gtHi, gtHi-unHi); mvswap(fmap, unLo, hi-m+1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ if (n - lo > hi - m) {
+ fpush(lo, n);
+ fpush(m, hi);
+ } else {
+ fpush(m, hi);
+ fpush(lo, n);
+ }
+ }
+}
+
+#undef fpush
+#undef fpop
+#undef FALLBACK_QSORT_SMALL_THRESH
+#undef FALLBACK_QSORT_STACK_SIZE
+
+
+/*---------------------------------------------*/
+/* Pre:
+ * nblock > 0
+ * eclass exists for [0 .. nblock-1]
+ * ((uint8_t*)eclass) [0 .. nblock-1] holds block
+ * ptr exists for [0 .. nblock-1]
+ *
+ * Post:
+ * ((uint8_t*)eclass) [0 .. nblock-1] holds block
+ * All other areas of eclass destroyed
+ * fmap [0 .. nblock-1] holds sorted order
+ * bhtab[0 .. 2+(nblock/32)] destroyed
+*/
+
+#define SET_BH(zz) bhtab[(zz) >> 5] |= (1 << ((zz) & 31))
+#define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31))
+#define ISSET_BH(zz) (bhtab[(zz) >> 5] & (1 << ((zz) & 31)))
+#define WORD_BH(zz) bhtab[(zz) >> 5]
+#define UNALIGNED_BH(zz) ((zz) & 0x01f)
+
+static
+void fallbackSort(uint32_t* fmap,
+ uint32_t* eclass,
+ uint32_t* bhtab,
+ int32_t nblock)
+{
+ int32_t ftab[257];
+ int32_t ftabCopy[256];
+ int32_t H, i, j, k, l, r, cc, cc1;
+ int32_t nNotDone;
+ int32_t nBhtab;
+ uint8_t* eclass8 = (uint8_t*)eclass;
+
+ /*
+ * Initial 1-char radix sort to generate
+ * initial fmap and initial BH bits.
+ */
+ for (i = 0; i < 257; i++) ftab[i] = 0;
+ for (i = 0; i < nblock; i++) ftab[eclass8[i]]++;
+ for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i];
+
+ j = ftab[0]; /* bbox: optimized */
+ for (i = 1; i < 257; i++) {
+ j += ftab[i];
+ ftab[i] = j;
+ }
+
+ for (i = 0; i < nblock; i++) {
+ j = eclass8[i];
+ k = ftab[j] - 1;
+ ftab[j] = k;
+ fmap[k] = i;
+ }
+
+ nBhtab = 2 + ((uint32_t)nblock / 32); /* bbox: unsigned div is easier */
+ for (i = 0; i < nBhtab; i++) bhtab[i] = 0;
+ for (i = 0; i < 256; i++) SET_BH(ftab[i]);
+
+ /*
+ * Inductively refine the buckets. Kind-of an
+ * "exponential radix sort" (!), inspired by the
+ * Manber-Myers suffix array construction algorithm.
+ */
+
+ /*-- set sentinel bits for block-end detection --*/
+ for (i = 0; i < 32; i++) {
+ SET_BH(nblock + 2*i);
+ CLEAR_BH(nblock + 2*i + 1);
+ }
+
+ /*-- the log(N) loop --*/
+ H = 1;
+ while (1) {
+ j = 0;
+ for (i = 0; i < nblock; i++) {
+ if (ISSET_BH(i))
+ j = i;
+ k = fmap[i] - H;
+ if (k < 0)
+ k += nblock;
+ eclass[k] = j;
+ }
+
+ nNotDone = 0;
+ r = -1;
+ while (1) {
+
+ /*-- find the next non-singleton bucket --*/
+ k = r + 1;
+ while (ISSET_BH(k) && UNALIGNED_BH(k))
+ k++;
+ if (ISSET_BH(k)) {
+ while (WORD_BH(k) == 0xffffffff) k += 32;
+ while (ISSET_BH(k)) k++;
+ }
+ l = k - 1;
+ if (l >= nblock)
+ break;
+ while (!ISSET_BH(k) && UNALIGNED_BH(k))
+ k++;
+ if (!ISSET_BH(k)) {
+ while (WORD_BH(k) == 0x00000000) k += 32;
+ while (!ISSET_BH(k)) k++;
+ }
+ r = k - 1;
+ if (r >= nblock)
+ break;
+
+ /*-- now [l, r] bracket current bucket --*/
+ if (r > l) {
+ nNotDone += (r - l + 1);
+ fallbackQSort3(fmap, eclass, l, r);
+
+ /*-- scan bucket and generate header bits-- */
+ cc = -1;
+ for (i = l; i <= r; i++) {
+ cc1 = eclass[fmap[i]];
+ if (cc != cc1) {
+ SET_BH(i);
+ cc = cc1;
+ };
+ }
+ }
+ }
+
+ H *= 2;
+ if (H > nblock || nNotDone == 0)
+ break;
+ }
+
+ /*
+ * Reconstruct the original block in
+ * eclass8 [0 .. nblock-1], since the
+ * previous phase destroyed it.
+ */
+ j = 0;
+ for (i = 0; i < nblock; i++) {
+ while (ftabCopy[j] == 0)
+ j++;
+ ftabCopy[j]--;
+ eclass8[fmap[i]] = (uint8_t)j;
+ }
+ AssertH(j < 256, 1005);
+}
+
+#undef SET_BH
+#undef CLEAR_BH
+#undef ISSET_BH
+#undef WORD_BH
+#undef UNALIGNED_BH
+
+
+/*---------------------------------------------*/
+/*--- The main, O(N^2 log(N)) sorting ---*/
+/*--- algorithm. Faster for "normal" ---*/
+/*--- non-repetitive blocks. ---*/
+/*---------------------------------------------*/
+
+/*---------------------------------------------*/
+static
+NOINLINE
+int mainGtU(
+ uint32_t i1,
+ uint32_t i2,
+ uint8_t* block,
+ uint16_t* quadrant,
+ uint32_t nblock,
+ int32_t* budget)
+{
+ int32_t k;
+ uint8_t c1, c2;
+ uint16_t s1, s2;
+
+/* Loop unrolling here is actually very useful
+ * (generated code is much simpler),
+ * code size increase is only 270 bytes (i386)
+ * but speeds up compression 10% overall
+ */
+
+#if CONFIG_BZIP2_FEATURE_SPEED >= 1
+
+#define TIMES_8(code) \
+ code; code; code; code; \
+ code; code; code; code;
+#define TIMES_12(code) \
+ code; code; code; code; \
+ code; code; code; code; \
+ code; code; code; code;
+
+#else
+
+#define TIMES_8(code) \
+{ \
+ int nn = 8; \
+ do { \
+ code; \
+ } while (--nn); \
+}
+#define TIMES_12(code) \
+{ \
+ int nn = 12; \
+ do { \
+ code; \
+ } while (--nn); \
+}
+
+#endif
+
+ AssertD(i1 != i2, "mainGtU");
+ TIMES_12(
+ c1 = block[i1]; c2 = block[i2];
+ if (c1 != c2) return (c1 > c2);
+ i1++; i2++;
+ )
+
+ k = nblock + 8;
+
+ do {
+ TIMES_8(
+ c1 = block[i1]; c2 = block[i2];
+ if (c1 != c2) return (c1 > c2);
+ s1 = quadrant[i1]; s2 = quadrant[i2];
+ if (s1 != s2) return (s1 > s2);
+ i1++; i2++;
+ )
+
+ if (i1 >= nblock) i1 -= nblock;
+ if (i2 >= nblock) i2 -= nblock;
+
+ (*budget)--;
+ k -= 8;
+ } while (k >= 0);
+
+ return False;
+}
+#undef TIMES_8
+#undef TIMES_12
+
+/*---------------------------------------------*/
+/*
+ * Knuth's increments seem to work better
+ * than Incerpi-Sedgewick here. Possibly
+ * because the number of elems to sort is
+ * usually small, typically <= 20.
+ */
+static
+const int32_t incs[14] = {
+ 1, 4, 13, 40, 121, 364, 1093, 3280,
+ 9841, 29524, 88573, 265720,
+ 797161, 2391484
+};
+
+static
+void mainSimpleSort(uint32_t* ptr,
+ uint8_t* block,
+ uint16_t* quadrant,
+ int32_t nblock,
+ int32_t lo,
+ int32_t hi,
+ int32_t d,
+ int32_t* budget)
+{
+ int32_t i, j, h, bigN, hp;
+ uint32_t v;
+
+ bigN = hi - lo + 1;
+ if (bigN < 2) return;
+
+ hp = 0;
+ while (incs[hp] < bigN) hp++;
+ hp--;
+
+ for (; hp >= 0; hp--) {
+ h = incs[hp];
+
+ i = lo + h;
+ while (1) {
+ /*-- copy 1 --*/
+ if (i > hi) break;
+ v = ptr[i];
+ j = i;
+ while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
+ ptr[j] = ptr[j-h];
+ j = j - h;
+ if (j <= (lo + h - 1)) break;
+ }
+ ptr[j] = v;
+ i++;
+
+/* 1.5% overall speedup, +290 bytes */
+#if CONFIG_BZIP2_FEATURE_SPEED >= 3
+ /*-- copy 2 --*/
+ if (i > hi) break;
+ v = ptr[i];
+ j = i;
+ while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
+ ptr[j] = ptr[j-h];
+ j = j - h;
+ if (j <= (lo + h - 1)) break;
+ }
+ ptr[j] = v;
+ i++;
+
+ /*-- copy 3 --*/
+ if (i > hi) break;
+ v = ptr[i];
+ j = i;
+ while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
+ ptr[j] = ptr[j-h];
+ j = j - h;
+ if (j <= (lo + h - 1)) break;
+ }
+ ptr[j] = v;
+ i++;
+#endif
+ if (*budget < 0) return;
+ }
+ }
+}
+
+
+/*---------------------------------------------*/
+/*
+ * The following is an implementation of
+ * an elegant 3-way quicksort for strings,
+ * described in a paper "Fast Algorithms for
+ * Sorting and Searching Strings", by Robert
+ * Sedgewick and Jon L. Bentley.
+ */
+
+static
+ALWAYS_INLINE
+uint8_t mmed3(uint8_t a, uint8_t b, uint8_t c)
+{
+ uint8_t t;
+ if (a > b) {
+ t = a;
+ a = b;
+ b = t;
+ };
+ /* here b >= a */
+ if (b > c) {
+ b = c;
+ if (a > b)
+ b = a;
+ }
+ return b;
+}
+
+#define mpush(lz,hz,dz) \
+{ \
+ stackLo[sp] = lz; \
+ stackHi[sp] = hz; \
+ stackD [sp] = dz; \
+ sp++; \
+}
+
+#define mpop(lz,hz,dz) \
+{ \
+ sp--; \
+ lz = stackLo[sp]; \
+ hz = stackHi[sp]; \
+ dz = stackD [sp]; \
+}
+
+#define mnextsize(az) (nextHi[az] - nextLo[az])
+
+#define mnextswap(az,bz) \
+{ \
+ int32_t tz; \
+ tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \
+ tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \
+ tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; \
+}
+
+#define MAIN_QSORT_SMALL_THRESH 20
+#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT)
+#define MAIN_QSORT_STACK_SIZE 100
+
+static NOINLINE
+void mainQSort3(uint32_t* ptr,
+ uint8_t* block,
+ uint16_t* quadrant,
+ int32_t nblock,
+ int32_t loSt,
+ int32_t hiSt,
+ int32_t dSt,
+ int32_t* budget)
+{
+ int32_t unLo, unHi, ltLo, gtHi, n, m, med;
+ int32_t sp, lo, hi, d;
+
+ int32_t stackLo[MAIN_QSORT_STACK_SIZE];
+ int32_t stackHi[MAIN_QSORT_STACK_SIZE];
+ int32_t stackD [MAIN_QSORT_STACK_SIZE];
+
+ int32_t nextLo[3];
+ int32_t nextHi[3];
+ int32_t nextD [3];
+
+ sp = 0;
+ mpush(loSt, hiSt, dSt);
+
+ while (sp > 0) {
+ AssertH(sp < MAIN_QSORT_STACK_SIZE - 2, 1001);
+
+ mpop(lo, hi, d);
+ if (hi - lo < MAIN_QSORT_SMALL_THRESH
+ || d > MAIN_QSORT_DEPTH_THRESH
+ ) {
+ mainSimpleSort(ptr, block, quadrant, nblock, lo, hi, d, budget);
+ if (*budget < 0)
+ return;
+ continue;
+ }
+ med = (int32_t) mmed3(block[ptr[lo ] + d],
+ block[ptr[hi ] + d],
+ block[ptr[(lo+hi) >> 1] + d]);
+
+ unLo = ltLo = lo;
+ unHi = gtHi = hi;
+
+ while (1) {
+ while (1) {
+ if (unLo > unHi)
+ break;
+ n = ((int32_t)block[ptr[unLo]+d]) - med;
+ if (n == 0) {
+ mswap(ptr[unLo], ptr[ltLo]);
+ ltLo++;
+ unLo++;
+ continue;
+ };
+ if (n > 0) break;
+ unLo++;
+ }
+ while (1) {
+ if (unLo > unHi)
+ break;
+ n = ((int32_t)block[ptr[unHi]+d]) - med;
+ if (n == 0) {
+ mswap(ptr[unHi], ptr[gtHi]);
+ gtHi--;
+ unHi--;
+ continue;
+ };
+ if (n < 0) break;
+ unHi--;
+ }
+ if (unLo > unHi)
+ break;
+ mswap(ptr[unLo], ptr[unHi]);
+ unLo++;
+ unHi--;
+ }
+
+ AssertD(unHi == unLo-1, "mainQSort3(2)");
+
+ if (gtHi < ltLo) {
+ mpush(lo, hi, d + 1);
+ continue;
+ }
+
+ n = mmin(ltLo-lo, unLo-ltLo); mvswap(ptr, lo, unLo-n, n);
+ m = mmin(hi-gtHi, gtHi-unHi); mvswap(ptr, unLo, hi-m+1, m);
+
+ n = lo + unLo - ltLo - 1;
+ m = hi - (gtHi - unHi) + 1;
+
+ nextLo[0] = lo; nextHi[0] = n; nextD[0] = d;
+ nextLo[1] = m; nextHi[1] = hi; nextD[1] = d;
+ nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1;
+
+ if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1);
+ if (mnextsize(1) < mnextsize(2)) mnextswap(1, 2);
+ if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1);
+
+ AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)");
+ AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)");
+
+ mpush(nextLo[0], nextHi[0], nextD[0]);
+ mpush(nextLo[1], nextHi[1], nextD[1]);
+ mpush(nextLo[2], nextHi[2], nextD[2]);
+ }
+}
+
+#undef mpush
+#undef mpop
+#undef mnextsize
+#undef mnextswap
+#undef MAIN_QSORT_SMALL_THRESH
+#undef MAIN_QSORT_DEPTH_THRESH
+#undef MAIN_QSORT_STACK_SIZE
+
+
+/*---------------------------------------------*/
+/* Pre:
+ * nblock > N_OVERSHOOT
+ * block32 exists for [0 .. nblock-1 +N_OVERSHOOT]
+ * ((uint8_t*)block32) [0 .. nblock-1] holds block
+ * ptr exists for [0 .. nblock-1]
+ *
+ * Post:
+ * ((uint8_t*)block32) [0 .. nblock-1] holds block
+ * All other areas of block32 destroyed
+ * ftab[0 .. 65536] destroyed
+ * ptr [0 .. nblock-1] holds sorted order
+ * if (*budget < 0), sorting was abandoned
+ */
+
+#define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8])
+#define SETMASK (1 << 21)
+#define CLEARMASK (~(SETMASK))
+
+static NOINLINE
+void mainSort(EState* state,
+ uint32_t* ptr,
+ uint8_t* block,
+ uint16_t* quadrant,
+ uint32_t* ftab,
+ int32_t nblock,
+ int32_t* budget)
+{
+ int32_t i, j, k, ss, sb;
+ uint8_t c1;
+ int32_t numQSorted;
+ uint16_t s;
+ Bool bigDone[256];
+ /* bbox: moved to EState to save stack
+ int32_t runningOrder[256];
+ int32_t copyStart[256];
+ int32_t copyEnd [256];
+ */
+#define runningOrder (state->mainSort__runningOrder)
+#define copyStart (state->mainSort__copyStart)
+#define copyEnd (state->mainSort__copyEnd)
+
+ /*-- set up the 2-byte frequency table --*/
+ /* was: for (i = 65536; i >= 0; i--) ftab[i] = 0; */
+ memset(ftab, 0, 65537 * sizeof(ftab[0]));
+
+ j = block[0] << 8;
+ i = nblock - 1;
+/* 3%, +300 bytes */
+#if CONFIG_BZIP2_FEATURE_SPEED >= 2
+ for (; i >= 3; i -= 4) {
+ quadrant[i] = 0;
+ j = (j >> 8) | (((uint16_t)block[i]) << 8);
+ ftab[j]++;
+ quadrant[i-1] = 0;
+ j = (j >> 8) | (((uint16_t)block[i-1]) << 8);
+ ftab[j]++;
+ quadrant[i-2] = 0;
+ j = (j >> 8) | (((uint16_t)block[i-2]) << 8);
+ ftab[j]++;
+ quadrant[i-3] = 0;
+ j = (j >> 8) | (((uint16_t)block[i-3]) << 8);
+ ftab[j]++;
+ }
+#endif
+ for (; i >= 0; i--) {
+ quadrant[i] = 0;
+ j = (j >> 8) | (((uint16_t)block[i]) << 8);
+ ftab[j]++;
+ }
+
+ /*-- (emphasises close relationship of block & quadrant) --*/
+ for (i = 0; i < BZ_N_OVERSHOOT; i++) {
+ block [nblock+i] = block[i];
+ quadrant[nblock+i] = 0;
+ }
+
+ /*-- Complete the initial radix sort --*/
+ j = ftab[0]; /* bbox: optimized */
+ for (i = 1; i <= 65536; i++) {
+ j += ftab[i];
+ ftab[i] = j;
+ }
+
+ s = block[0] << 8;
+ i = nblock - 1;
+#if CONFIG_BZIP2_FEATURE_SPEED >= 2
+ for (; i >= 3; i -= 4) {
+ s = (s >> 8) | (block[i] << 8);
+ j = ftab[s] - 1;
+ ftab[s] = j;
+ ptr[j] = i;
+ s = (s >> 8) | (block[i-1] << 8);
+ j = ftab[s] - 1;
+ ftab[s] = j;
+ ptr[j] = i-1;
+ s = (s >> 8) | (block[i-2] << 8);
+ j = ftab[s] - 1;
+ ftab[s] = j;
+ ptr[j] = i-2;
+ s = (s >> 8) | (block[i-3] << 8);
+ j = ftab[s] - 1;
+ ftab[s] = j;
+ ptr[j] = i-3;
+ }
+#endif
+ for (; i >= 0; i--) {
+ s = (s >> 8) | (block[i] << 8);
+ j = ftab[s] - 1;
+ ftab[s] = j;
+ ptr[j] = i;
+ }
+
+ /*
+ * Now ftab contains the first loc of every small bucket.
+ * Calculate the running order, from smallest to largest
+ * big bucket.
+ */
+ for (i = 0; i <= 255; i++) {
+ bigDone [i] = False;
+ runningOrder[i] = i;
+ }
+
+ {
+ int32_t vv;
+ /* bbox: was: int32_t h = 1; */
+ /* do h = 3 * h + 1; while (h <= 256); */
+ uint32_t h = 364;
+
+ do {
+ /*h = h / 3;*/
+ h = (h * 171) >> 9; /* bbox: fast h/3 */
+ for (i = h; i <= 255; i++) {
+ vv = runningOrder[i];
+ j = i;
+ while (BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv)) {
+ runningOrder[j] = runningOrder[j-h];
+ j = j - h;
+ if (j <= (h - 1))
+ goto zero;
+ }
+ zero:
+ runningOrder[j] = vv;
+ }
+ } while (h != 1);
+ }
+
+ /*
+ * The main sorting loop.
+ */
+
+ numQSorted = 0;
+
+ for (i = 0; i <= 255; i++) {
+
+ /*
+ * Process big buckets, starting with the least full.
+ * Basically this is a 3-step process in which we call
+ * mainQSort3 to sort the small buckets [ss, j], but
+ * also make a big effort to avoid the calls if we can.
+ */
+ ss = runningOrder[i];
+
+ /*
+ * Step 1:
+ * Complete the big bucket [ss] by quicksorting
+ * any unsorted small buckets [ss, j], for j != ss.
+ * Hopefully previous pointer-scanning phases have already
+ * completed many of the small buckets [ss, j], so
+ * we don't have to sort them at all.
+ */
+ for (j = 0; j <= 255; j++) {
+ if (j != ss) {
+ sb = (ss << 8) + j;
+ if (!(ftab[sb] & SETMASK)) {
+ int32_t lo = ftab[sb] & CLEARMASK;
+ int32_t hi = (ftab[sb+1] & CLEARMASK) - 1;
+ if (hi > lo) {
+ mainQSort3(
+ ptr, block, quadrant, nblock,
+ lo, hi, BZ_N_RADIX, budget
+ );
+ if (*budget < 0) return;
+ numQSorted += (hi - lo + 1);
+ }
+ }
+ ftab[sb] |= SETMASK;
+ }
+ }
+
+ AssertH(!bigDone[ss], 1006);
+
+ /*
+ * Step 2:
+ * Now scan this big bucket [ss] so as to synthesise the
+ * sorted order for small buckets [t, ss] for all t,
+ * including, magically, the bucket [ss,ss] too.
+ * This will avoid doing Real Work in subsequent Step 1's.
+ */
+ {
+ for (j = 0; j <= 255; j++) {
+ copyStart[j] = ftab[(j << 8) + ss] & CLEARMASK;
+ copyEnd [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1;
+ }
+ for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) {
+ k = ptr[j] - 1;
+ if (k < 0)
+ k += nblock;
+ c1 = block[k];
+ if (!bigDone[c1])
+ ptr[copyStart[c1]++] = k;
+ }
+ for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) {
+ k = ptr[j]-1;
+ if (k < 0)
+ k += nblock;
+ c1 = block[k];
+ if (!bigDone[c1])
+ ptr[copyEnd[c1]--] = k;
+ }
+ }
+
+ /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1.
+ * Necessity for this case is demonstrated by compressing
+ * a sequence of approximately 48.5 million of character
+ * 251; 1.0.0/1.0.1 will then die here. */
+ AssertH((copyStart[ss]-1 == copyEnd[ss]) \
+ || (copyStart[ss] == 0 && copyEnd[ss] == nblock-1), 1007);
+
+ for (j = 0; j <= 255; j++)
+ ftab[(j << 8) + ss] |= SETMASK;
+
+ /*
+ * Step 3:
+ * The [ss] big bucket is now done. Record this fact,
+ * and update the quadrant descriptors. Remember to
+ * update quadrants in the overshoot area too, if
+ * necessary. The "if (i < 255)" test merely skips
+ * this updating for the last bucket processed, since
+ * updating for the last bucket is pointless.
+ *
+ * The quadrant array provides a way to incrementally
+ * cache sort orderings, as they appear, so as to
+ * make subsequent comparisons in fullGtU() complete
+ * faster. For repetitive blocks this makes a big
+ * difference (but not big enough to be able to avoid
+ * the fallback sorting mechanism, exponential radix sort).
+ *
+ * The precise meaning is: at all times:
+ *
+ * for 0 <= i < nblock and 0 <= j <= nblock
+ *
+ * if block[i] != block[j],
+ *
+ * then the relative values of quadrant[i] and
+ * quadrant[j] are meaningless.
+ *
+ * else {
+ * if quadrant[i] < quadrant[j]
+ * then the string starting at i lexicographically
+ * precedes the string starting at j
+ *
+ * else if quadrant[i] > quadrant[j]
+ * then the string starting at j lexicographically
+ * precedes the string starting at i
+ *
+ * else
+ * the relative ordering of the strings starting
+ * at i and j has not yet been determined.
+ * }
+ */
+ bigDone[ss] = True;
+
+ if (i < 255) {
+ int32_t bbStart = ftab[ss << 8] & CLEARMASK;
+ int32_t bbSize = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart;
+ int32_t shifts = 0;
+
+ while ((bbSize >> shifts) > 65534) shifts++;
+
+ for (j = bbSize-1; j >= 0; j--) {
+ int32_t a2update = ptr[bbStart + j];
+ uint16_t qVal = (uint16_t)(j >> shifts);
+ quadrant[a2update] = qVal;
+ if (a2update < BZ_N_OVERSHOOT)
+ quadrant[a2update + nblock] = qVal;
+ }
+ AssertH(((bbSize-1) >> shifts) <= 65535, 1002);
+ }
+ }
+#undef runningOrder
+#undef copyStart
+#undef copyEnd
+}
+
+#undef BIGFREQ
+#undef SETMASK
+#undef CLEARMASK
+
+
+/*---------------------------------------------*/
+/* Pre:
+ * nblock > 0
+ * arr2 exists for [0 .. nblock-1 +N_OVERSHOOT]
+ * ((uint8_t*)arr2)[0 .. nblock-1] holds block
+ * arr1 exists for [0 .. nblock-1]
+ *
+ * Post:
+ * ((uint8_t*)arr2) [0 .. nblock-1] holds block
+ * All other areas of block destroyed
+ * ftab[0 .. 65536] destroyed
+ * arr1[0 .. nblock-1] holds sorted order
+ */
+static NOINLINE
+void BZ2_blockSort(EState* s)
+{
+ /* In original bzip2 1.0.4, it's a parameter, but 30
+ * (which was the default) should work ok. */
+ enum { wfact = 30 };
+
+ uint32_t* ptr = s->ptr;
+ uint8_t* block = s->block;
+ uint32_t* ftab = s->ftab;
+ int32_t nblock = s->nblock;
+ uint16_t* quadrant;
+ int32_t budget;
+ int32_t i;
+
+ if (nblock < 10000) {
+ fallbackSort(s->arr1, s->arr2, ftab, nblock);
+ } else {
+ /* Calculate the location for quadrant, remembering to get
+ * the alignment right. Assumes that &(block[0]) is at least
+ * 2-byte aligned -- this should be ok since block is really
+ * the first section of arr2.
+ */
+ i = nblock + BZ_N_OVERSHOOT;
+ if (i & 1) i++;
+ quadrant = (uint16_t*)(&(block[i]));
+
+ /* (wfact-1) / 3 puts the default-factor-30
+ * transition point at very roughly the same place as
+ * with v0.1 and v0.9.0.
+ * Not that it particularly matters any more, since the
+ * resulting compressed stream is now the same regardless
+ * of whether or not we use the main sort or fallback sort.
+ */
+ budget = nblock * ((wfact-1) / 3);
+
+ mainSort(s, ptr, block, quadrant, ftab, nblock, &budget);
+ if (budget < 0) {
+ fallbackSort(s->arr1, s->arr2, ftab, nblock);
+ }
+ }
+
+ s->origPtr = -1;
+ for (i = 0; i < s->nblock; i++)
+ if (ptr[i] == 0) {
+ s->origPtr = i;
+ break;
+ };
+
+ AssertH(s->origPtr != -1, 1003);
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end blocksort.c ---*/
+/*-------------------------------------------------------------*/
diff --git a/archival/libarchive/bz/bzlib.c b/archival/libarchive/bz/bzlib.c
new file mode 100644
index 000000000..b3beeabed
--- /dev/null
+++ b/archival/libarchive/bz/bzlib.c
@@ -0,0 +1,431 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Library top-level functions. ---*/
+/*--- bzlib.c ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+/* CHANGES
+ * 0.9.0 -- original version.
+ * 0.9.0a/b -- no changes in this file.
+ * 0.9.0c -- made zero-length BZ_FLUSH work correctly in bzCompress().
+ * fixed bzWrite/bzRead to ignore zero-length requests.
+ * fixed bzread to correctly handle read requests after EOF.
+ * wrong parameter order in call to bzDecompressInit in
+ * bzBuffToBuffDecompress. Fixed.
+ */
+
+/* #include "bzlib_private.h" */
+
+/*---------------------------------------------------*/
+/*--- Compression stuff ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+#if BZ_LIGHT_DEBUG
+static
+void bz_assert_fail(int errcode)
+{
+ /* if (errcode == 1007) bb_error_msg_and_die("probably bad RAM"); */
+ bb_error_msg_and_die("internal error %d", errcode);
+}
+#endif
+
+/*---------------------------------------------------*/
+static
+void prepare_new_block(EState* s)
+{
+ int i;
+ s->nblock = 0;
+ s->numZ = 0;
+ s->state_out_pos = 0;
+ BZ_INITIALISE_CRC(s->blockCRC);
+ /* inlined memset would be nice to have here */
+ for (i = 0; i < 256; i++)
+ s->inUse[i] = 0;
+ s->blockNo++;
+}
+
+
+/*---------------------------------------------------*/
+static
+ALWAYS_INLINE
+void init_RL(EState* s)
+{
+ s->state_in_ch = 256;
+ s->state_in_len = 0;
+}
+
+
+static
+int isempty_RL(EState* s)
+{
+ return (s->state_in_ch >= 256 || s->state_in_len <= 0);
+}
+
+
+/*---------------------------------------------------*/
+static
+void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k)
+{
+ int32_t n;
+ EState* s;
+
+ s = xzalloc(sizeof(EState));
+ s->strm = strm;
+
+ n = 100000 * blockSize100k;
+ s->arr1 = xmalloc(n * sizeof(uint32_t));
+ s->mtfv = (uint16_t*)s->arr1;
+ s->ptr = (uint32_t*)s->arr1;
+ s->arr2 = xmalloc((n + BZ_N_OVERSHOOT) * sizeof(uint32_t));
+ s->block = (uint8_t*)s->arr2;
+ s->ftab = xmalloc(65537 * sizeof(uint32_t));
+
+ s->crc32table = crc32_filltable(NULL, 1);
+
+ s->state = BZ_S_INPUT;
+ s->mode = BZ_M_RUNNING;
+ s->blockSize100k = blockSize100k;
+ s->nblockMAX = n - 19;
+
+ strm->state = s;
+ /*strm->total_in = 0;*/
+ strm->total_out = 0;
+ init_RL(s);
+ prepare_new_block(s);
+}
+
+
+/*---------------------------------------------------*/
+static
+void add_pair_to_block(EState* s)
+{
+ int32_t i;
+ uint8_t ch = (uint8_t)(s->state_in_ch);
+ for (i = 0; i < s->state_in_len; i++) {
+ BZ_UPDATE_CRC(s, s->blockCRC, ch);
+ }
+ s->inUse[s->state_in_ch] = 1;
+ switch (s->state_in_len) {
+ case 3:
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ /* fall through */
+ case 2:
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ /* fall through */
+ case 1:
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ break;
+ default:
+ s->inUse[s->state_in_len - 4] = 1;
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ s->block[s->nblock] = (uint8_t)ch; s->nblock++;
+ s->block[s->nblock] = (uint8_t)(s->state_in_len - 4);
+ s->nblock++;
+ break;
+ }
+}
+
+
+/*---------------------------------------------------*/
+static
+void flush_RL(EState* s)
+{
+ if (s->state_in_ch < 256) add_pair_to_block(s);
+ init_RL(s);
+}
+
+
+/*---------------------------------------------------*/
+#define ADD_CHAR_TO_BLOCK(zs, zchh0) \
+{ \
+ uint32_t zchh = (uint32_t)(zchh0); \
+ /*-- fast track the common case --*/ \
+ if (zchh != zs->state_in_ch && zs->state_in_len == 1) { \
+ uint8_t ch = (uint8_t)(zs->state_in_ch); \
+ BZ_UPDATE_CRC(zs, zs->blockCRC, ch); \
+ zs->inUse[zs->state_in_ch] = 1; \
+ zs->block[zs->nblock] = (uint8_t)ch; \
+ zs->nblock++; \
+ zs->state_in_ch = zchh; \
+ } \
+ else \
+ /*-- general, uncommon cases --*/ \
+ if (zchh != zs->state_in_ch || zs->state_in_len == 255) { \
+ if (zs->state_in_ch < 256) \
+ add_pair_to_block(zs); \
+ zs->state_in_ch = zchh; \
+ zs->state_in_len = 1; \
+ } else { \
+ zs->state_in_len++; \
+ } \
+}
+
+
+/*---------------------------------------------------*/
+static
+void /*Bool*/ copy_input_until_stop(EState* s)
+{
+ /*Bool progress_in = False;*/
+
+#ifdef SAME_CODE_AS_BELOW
+ if (s->mode == BZ_M_RUNNING) {
+ /*-- fast track the common case --*/
+ while (1) {
+ /*-- no input? --*/
+ if (s->strm->avail_in == 0) break;
+ /*-- block full? --*/
+ if (s->nblock >= s->nblockMAX) break;
+ /*progress_in = True;*/
+ ADD_CHAR_TO_BLOCK(s, (uint32_t)(*(uint8_t*)(s->strm->next_in)));
+ s->strm->next_in++;
+ s->strm->avail_in--;
+ /*s->strm->total_in++;*/
+ }
+ } else
+#endif
+ {
+ /*-- general, uncommon case --*/
+ while (1) {
+ /*-- no input? --*/
+ if (s->strm->avail_in == 0) break;
+ /*-- block full? --*/
+ if (s->nblock >= s->nblockMAX) break;
+ //# /*-- flush/finish end? --*/
+ //# if (s->avail_in_expect == 0) break;
+ /*progress_in = True;*/
+ ADD_CHAR_TO_BLOCK(s, *(uint8_t*)(s->strm->next_in));
+ s->strm->next_in++;
+ s->strm->avail_in--;
+ /*s->strm->total_in++;*/
+ //# s->avail_in_expect--;
+ }
+ }
+ /*return progress_in;*/
+}
+
+
+/*---------------------------------------------------*/
+static
+void /*Bool*/ copy_output_until_stop(EState* s)
+{
+ /*Bool progress_out = False;*/
+
+ while (1) {
+ /*-- no output space? --*/
+ if (s->strm->avail_out == 0) break;
+
+ /*-- block done? --*/
+ if (s->state_out_pos >= s->numZ) break;
+
+ /*progress_out = True;*/
+ *(s->strm->next_out) = s->zbits[s->state_out_pos];
+ s->state_out_pos++;
+ s->strm->avail_out--;
+ s->strm->next_out++;
+ s->strm->total_out++;
+ }
+ /*return progress_out;*/
+}
+
+
+/*---------------------------------------------------*/
+static
+void /*Bool*/ handle_compress(bz_stream *strm)
+{
+ /*Bool progress_in = False;*/
+ /*Bool progress_out = False;*/
+ EState* s = strm->state;
+
+ while (1) {
+ if (s->state == BZ_S_OUTPUT) {
+ /*progress_out |=*/ copy_output_until_stop(s);
+ if (s->state_out_pos < s->numZ) break;
+ if (s->mode == BZ_M_FINISHING
+ //# && s->avail_in_expect == 0
+ && s->strm->avail_in == 0
+ && isempty_RL(s))
+ break;
+ prepare_new_block(s);
+ s->state = BZ_S_INPUT;
+#ifdef FLUSH_IS_UNUSED
+ if (s->mode == BZ_M_FLUSHING
+ && s->avail_in_expect == 0
+ && isempty_RL(s))
+ break;
+#endif
+ }
+
+ if (s->state == BZ_S_INPUT) {
+ /*progress_in |=*/ copy_input_until_stop(s);
+ //#if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) {
+ if (s->mode != BZ_M_RUNNING && s->strm->avail_in == 0) {
+ flush_RL(s);
+ BZ2_compressBlock(s, (s->mode == BZ_M_FINISHING));
+ s->state = BZ_S_OUTPUT;
+ } else
+ if (s->nblock >= s->nblockMAX) {
+ BZ2_compressBlock(s, 0);
+ s->state = BZ_S_OUTPUT;
+ } else
+ if (s->strm->avail_in == 0) {
+ break;
+ }
+ }
+ }
+
+ /*return progress_in || progress_out;*/
+}
+
+
+/*---------------------------------------------------*/
+static
+int BZ2_bzCompress(bz_stream *strm, int action)
+{
+ /*Bool progress;*/
+ EState* s;
+
+ s = strm->state;
+
+ switch (s->mode) {
+ case BZ_M_RUNNING:
+ if (action == BZ_RUN) {
+ /*progress =*/ handle_compress(strm);
+ /*return progress ? BZ_RUN_OK : BZ_PARAM_ERROR;*/
+ return BZ_RUN_OK;
+ }
+#ifdef FLUSH_IS_UNUSED
+ else
+ if (action == BZ_FLUSH) {
+ //#s->avail_in_expect = strm->avail_in;
+ s->mode = BZ_M_FLUSHING;
+ goto case_BZ_M_FLUSHING;
+ }
+#endif
+ else
+ /*if (action == BZ_FINISH)*/ {
+ //#s->avail_in_expect = strm->avail_in;
+ s->mode = BZ_M_FINISHING;
+ goto case_BZ_M_FINISHING;
+ }
+
+#ifdef FLUSH_IS_UNUSED
+ case_BZ_M_FLUSHING:
+ case BZ_M_FLUSHING:
+ /*if (s->avail_in_expect != s->strm->avail_in)
+ return BZ_SEQUENCE_ERROR;*/
+ /*progress =*/ handle_compress(strm);
+ if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
+ return BZ_FLUSH_OK;
+ s->mode = BZ_M_RUNNING;
+ return BZ_RUN_OK;
+#endif
+
+ case_BZ_M_FINISHING:
+ /*case BZ_M_FINISHING:*/
+ default:
+ /*if (s->avail_in_expect != s->strm->avail_in)
+ return BZ_SEQUENCE_ERROR;*/
+ /*progress =*/ handle_compress(strm);
+ /*if (!progress) return BZ_SEQUENCE_ERROR;*/
+ //#if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
+ //# return BZ_FINISH_OK;
+ if (s->strm->avail_in > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
+ return BZ_FINISH_OK;
+ /*s->mode = BZ_M_IDLE;*/
+ return BZ_STREAM_END;
+ }
+ /* return BZ_OK; --not reached--*/
+}
+
+
+/*---------------------------------------------------*/
+#if ENABLE_FEATURE_CLEAN_UP
+static
+void BZ2_bzCompressEnd(bz_stream *strm)
+{
+ EState* s;
+
+ s = strm->state;
+ free(s->arr1);
+ free(s->arr2);
+ free(s->ftab);
+ free(s->crc32table);
+ free(strm->state);
+}
+#endif
+
+
+/*---------------------------------------------------*/
+/*--- Misc convenience stuff ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+#ifdef EXAMPLE_CODE_FOR_MEM_TO_MEM_COMPRESSION
+static
+int BZ2_bzBuffToBuffCompress(char* dest,
+ unsigned int* destLen,
+ char* source,
+ unsigned int sourceLen,
+ int blockSize100k)
+{
+ bz_stream strm;
+ int ret;
+
+ if (dest == NULL || destLen == NULL
+ || source == NULL
+ || blockSize100k < 1 || blockSize100k > 9
+ ) {
+ return BZ_PARAM_ERROR;
+ }
+
+ BZ2_bzCompressInit(&strm, blockSize100k);
+
+ strm.next_in = source;
+ strm.next_out = dest;
+ strm.avail_in = sourceLen;
+ strm.avail_out = *destLen;
+
+ ret = BZ2_bzCompress(&strm, BZ_FINISH);
+ if (ret == BZ_FINISH_OK) goto output_overflow;
+ if (ret != BZ_STREAM_END) goto errhandler;
+
+ /* normal termination */
+ *destLen -= strm.avail_out;
+ BZ2_bzCompressEnd(&strm);
+ return BZ_OK;
+
+ output_overflow:
+ BZ2_bzCompressEnd(&strm);
+ return BZ_OUTBUFF_FULL;
+
+ errhandler:
+ BZ2_bzCompressEnd(&strm);
+ return ret;
+}
+#endif
+
+/*-------------------------------------------------------------*/
+/*--- end bzlib.c ---*/
+/*-------------------------------------------------------------*/
diff --git a/archival/libarchive/bz/bzlib.h b/archival/libarchive/bz/bzlib.h
new file mode 100644
index 000000000..1bb811c4a
--- /dev/null
+++ b/archival/libarchive/bz/bzlib.h
@@ -0,0 +1,65 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Public header file for the library. ---*/
+/*--- bzlib.h ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+#define BZ_RUN 0
+#define BZ_FLUSH 1
+#define BZ_FINISH 2
+
+#define BZ_OK 0
+#define BZ_RUN_OK 1
+#define BZ_FLUSH_OK 2
+#define BZ_FINISH_OK 3
+#define BZ_STREAM_END 4
+#define BZ_SEQUENCE_ERROR (-1)
+#define BZ_PARAM_ERROR (-2)
+#define BZ_MEM_ERROR (-3)
+#define BZ_DATA_ERROR (-4)
+#define BZ_DATA_ERROR_MAGIC (-5)
+#define BZ_IO_ERROR (-6)
+#define BZ_UNEXPECTED_EOF (-7)
+#define BZ_OUTBUFF_FULL (-8)
+#define BZ_CONFIG_ERROR (-9)
+
+typedef struct bz_stream {
+ void *state;
+ char *next_in;
+ char *next_out;
+ unsigned avail_in;
+ unsigned avail_out;
+ /*unsigned long long total_in;*/
+ unsigned long long total_out;
+} bz_stream;
+
+/*-- Core (low-level) library functions --*/
+
+static void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k);
+static int BZ2_bzCompress(bz_stream *strm, int action);
+#if ENABLE_FEATURE_CLEAN_UP
+static void BZ2_bzCompressEnd(bz_stream *strm);
+#endif
+
+/*-------------------------------------------------------------*/
+/*--- end bzlib.h ---*/
+/*-------------------------------------------------------------*/
diff --git a/archival/libarchive/bz/bzlib_private.h b/archival/libarchive/bz/bzlib_private.h
new file mode 100644
index 000000000..6430ce407
--- /dev/null
+++ b/archival/libarchive/bz/bzlib_private.h
@@ -0,0 +1,219 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Private header file for the library. ---*/
+/*--- bzlib_private.h ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+/* #include "bzlib.h" */
+
+/*-- General stuff. --*/
+
+typedef unsigned char Bool;
+
+#define True ((Bool)1)
+#define False ((Bool)0)
+
+#if BZ_LIGHT_DEBUG
+static void bz_assert_fail(int errcode) NORETURN;
+#define AssertH(cond, errcode) \
+do { \
+ if (!(cond)) \
+ bz_assert_fail(errcode); \
+} while (0)
+#else
+#define AssertH(cond, msg) do { } while (0)
+#endif
+
+#if BZ_DEBUG
+#define AssertD(cond, msg) \
+do { \
+ if (!(cond)) \
+ bb_error_msg_and_die("(debug build): internal error %s", msg); \
+} while (0)
+#else
+#define AssertD(cond, msg) do { } while (0)
+#endif
+
+
+/*-- Header bytes. --*/
+
+#define BZ_HDR_B 0x42 /* 'B' */
+#define BZ_HDR_Z 0x5a /* 'Z' */
+#define BZ_HDR_h 0x68 /* 'h' */
+#define BZ_HDR_0 0x30 /* '0' */
+
+#define BZ_HDR_BZh0 0x425a6830
+
+/*-- Constants for the back end. --*/
+
+#define BZ_MAX_ALPHA_SIZE 258
+#define BZ_MAX_CODE_LEN 23
+
+#define BZ_RUNA 0
+#define BZ_RUNB 1
+
+#define BZ_N_GROUPS 6
+#define BZ_G_SIZE 50
+#define BZ_N_ITERS 4
+
+#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE))
+
+
+/*-- Stuff for doing CRCs. --*/
+
+#define BZ_INITIALISE_CRC(crcVar) \
+{ \
+ crcVar = 0xffffffffL; \
+}
+
+#define BZ_FINALISE_CRC(crcVar) \
+{ \
+ crcVar = ~(crcVar); \
+}
+
+#define BZ_UPDATE_CRC(s, crcVar, cha) \
+{ \
+ crcVar = (crcVar << 8) ^ s->crc32table[(crcVar >> 24) ^ ((uint8_t)cha)]; \
+}
+
+
+/*-- States and modes for compression. --*/
+
+#define BZ_M_IDLE 1
+#define BZ_M_RUNNING 2
+#define BZ_M_FLUSHING 3
+#define BZ_M_FINISHING 4
+
+#define BZ_S_OUTPUT 1
+#define BZ_S_INPUT 2
+
+#define BZ_N_RADIX 2
+#define BZ_N_QSORT 12
+#define BZ_N_SHELL 18
+#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2)
+
+
+/*-- Structure holding all the compression-side stuff. --*/
+
+typedef struct EState {
+ /* pointer back to the struct bz_stream */
+ bz_stream *strm;
+
+ /* mode this stream is in, and whether inputting */
+ /* or outputting data */
+ int32_t mode;
+ int32_t state;
+
+ /* remembers avail_in when flush/finish requested */
+/* bbox: not needed, strm->avail_in always has the same value */
+/* commented out with '//#' throughout the code */
+ /* uint32_t avail_in_expect; */
+
+ /* for doing the block sorting */
+ int32_t origPtr;
+ uint32_t *arr1;
+ uint32_t *arr2;
+ uint32_t *ftab;
+
+ /* aliases for arr1 and arr2 */
+ uint32_t *ptr;
+ uint8_t *block;
+ uint16_t *mtfv;
+ uint8_t *zbits;
+
+ /* guess what */
+ uint32_t *crc32table;
+
+ /* run-length-encoding of the input */
+ uint32_t state_in_ch;
+ int32_t state_in_len;
+
+ /* input and output limits and current posns */
+ int32_t nblock;
+ int32_t nblockMAX;
+ int32_t numZ;
+ int32_t state_out_pos;
+
+ /* the buffer for bit stream creation */
+ uint32_t bsBuff;
+ int32_t bsLive;
+
+ /* block and combined CRCs */
+ uint32_t blockCRC;
+ uint32_t combinedCRC;
+
+ /* misc administratium */
+ int32_t blockNo;
+ int32_t blockSize100k;
+
+ /* stuff for coding the MTF values */
+ int32_t nMTF;
+
+ /* map of bytes used in block */
+ int32_t nInUse;
+ Bool inUse[256] ALIGNED(sizeof(long));
+ uint8_t unseqToSeq[256];
+
+ /* stuff for coding the MTF values */
+ int32_t mtfFreq [BZ_MAX_ALPHA_SIZE];
+ uint8_t selector [BZ_MAX_SELECTORS];
+ uint8_t selectorMtf[BZ_MAX_SELECTORS];
+
+ uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+
+ /* stack-saving measures: these can be local, but they are too big */
+ int32_t sendMTFValues__code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+ int32_t sendMTFValues__rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+#if CONFIG_BZIP2_FEATURE_SPEED >= 5
+ /* second dimension: only 3 needed; 4 makes index calculations faster */
+ uint32_t sendMTFValues__len_pack[BZ_MAX_ALPHA_SIZE][4];
+#endif
+ int32_t BZ2_hbMakeCodeLengths__heap [BZ_MAX_ALPHA_SIZE + 2];
+ int32_t BZ2_hbMakeCodeLengths__weight[BZ_MAX_ALPHA_SIZE * 2];
+ int32_t BZ2_hbMakeCodeLengths__parent[BZ_MAX_ALPHA_SIZE * 2];
+
+ int32_t mainSort__runningOrder[256];
+ int32_t mainSort__copyStart[256];
+ int32_t mainSort__copyEnd[256];
+} EState;
+
+
+/*-- compression. --*/
+
+static void
+BZ2_blockSort(EState*);
+
+static void
+BZ2_compressBlock(EState*, int);
+
+static void
+BZ2_bsInitWrite(EState*);
+
+static void
+BZ2_hbAssignCodes(int32_t*, uint8_t*, int32_t, int32_t, int32_t);
+
+static void
+BZ2_hbMakeCodeLengths(EState*, uint8_t*, int32_t*, int32_t, int32_t);
+
+/*-------------------------------------------------------------*/
+/*--- end bzlib_private.h ---*/
+/*-------------------------------------------------------------*/
diff --git a/archival/libarchive/bz/compress.c b/archival/libarchive/bz/compress.c
new file mode 100644
index 000000000..6f1c70a08
--- /dev/null
+++ b/archival/libarchive/bz/compress.c
@@ -0,0 +1,685 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Compression machinery (not incl block sorting) ---*/
+/*--- compress.c ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+/* CHANGES
+ * 0.9.0 -- original version.
+ * 0.9.0a/b -- no changes in this file.
+ * 0.9.0c -- changed setting of nGroups in sendMTFValues()
+ * so as to do a bit better on small files
+*/
+
+/* #include "bzlib_private.h" */
+
+/*---------------------------------------------------*/
+/*--- Bit stream I/O ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+static
+void BZ2_bsInitWrite(EState* s)
+{
+ s->bsLive = 0;
+ s->bsBuff = 0;
+}
+
+
+/*---------------------------------------------------*/
+static NOINLINE
+void bsFinishWrite(EState* s)
+{
+ while (s->bsLive > 0) {
+ s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
+ s->numZ++;
+ s->bsBuff <<= 8;
+ s->bsLive -= 8;
+ }
+}
+
+
+/*---------------------------------------------------*/
+static
+/* Helps only on level 5, on other levels hurts. ? */
+#if CONFIG_BZIP2_FEATURE_SPEED >= 5
+ALWAYS_INLINE
+#endif
+void bsW(EState* s, int32_t n, uint32_t v)
+{
+ while (s->bsLive >= 8) {
+ s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
+ s->numZ++;
+ s->bsBuff <<= 8;
+ s->bsLive -= 8;
+ }
+ s->bsBuff |= (v << (32 - s->bsLive - n));
+ s->bsLive += n;
+}
+
+
+/*---------------------------------------------------*/
+static
+void bsPutU32(EState* s, unsigned u)
+{
+ bsW(s, 8, (u >> 24) & 0xff);
+ bsW(s, 8, (u >> 16) & 0xff);
+ bsW(s, 8, (u >> 8) & 0xff);
+ bsW(s, 8, u & 0xff);
+}
+
+
+/*---------------------------------------------------*/
+static
+void bsPutU16(EState* s, unsigned u)
+{
+ bsW(s, 8, (u >> 8) & 0xff);
+ bsW(s, 8, u & 0xff);
+}
+
+
+/*---------------------------------------------------*/
+/*--- The back end proper ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+static
+void makeMaps_e(EState* s)
+{
+ int i;
+ s->nInUse = 0;
+ for (i = 0; i < 256; i++) {
+ if (s->inUse[i]) {
+ s->unseqToSeq[i] = s->nInUse;
+ s->nInUse++;
+ }
+ }
+}
+
+
+/*---------------------------------------------------*/
+static NOINLINE
+void generateMTFValues(EState* s)
+{
+ uint8_t yy[256];
+ int32_t i, j;
+ int32_t zPend;
+ int32_t wr;
+ int32_t EOB;
+
+ /*
+ * After sorting (eg, here),
+ * s->arr1[0 .. s->nblock-1] holds sorted order,
+ * and
+ * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
+ * holds the original block data.
+ *
+ * The first thing to do is generate the MTF values,
+ * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
+ *
+ * Because there are strictly fewer or equal MTF values
+ * than block values, ptr values in this area are overwritten
+ * with MTF values only when they are no longer needed.
+ *
+ * The final compressed bitstream is generated into the
+ * area starting at &((uint8_t*)s->arr2)[s->nblock]
+ *
+ * These storage aliases are set up in bzCompressInit(),
+ * except for the last one, which is arranged in
+ * compressBlock().
+ */
+ uint32_t* ptr = s->ptr;
+ uint8_t* block = s->block;
+ uint16_t* mtfv = s->mtfv;
+
+ makeMaps_e(s);
+ EOB = s->nInUse+1;
+
+ for (i = 0; i <= EOB; i++)
+ s->mtfFreq[i] = 0;
+
+ wr = 0;
+ zPend = 0;
+ for (i = 0; i < s->nInUse; i++)
+ yy[i] = (uint8_t) i;
+
+ for (i = 0; i < s->nblock; i++) {
+ uint8_t ll_i;
+ AssertD(wr <= i, "generateMTFValues(1)");
+ j = ptr[i] - 1;
+ if (j < 0)
+ j += s->nblock;
+ ll_i = s->unseqToSeq[block[j]];
+ AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
+
+ if (yy[0] == ll_i) {
+ zPend++;
+ } else {
+ if (zPend > 0) {
+ zPend--;
+ while (1) {
+ if (zPend & 1) {
+ mtfv[wr] = BZ_RUNB; wr++;
+ s->mtfFreq[BZ_RUNB]++;
+ } else {
+ mtfv[wr] = BZ_RUNA; wr++;
+ s->mtfFreq[BZ_RUNA]++;
+ }
+ if (zPend < 2) break;
+ zPend = (uint32_t)(zPend - 2) / 2;
+ /* bbox: unsigned div is easier */
+ };
+ zPend = 0;
+ }
+ {
+ register uint8_t rtmp;
+ register uint8_t* ryy_j;
+ register uint8_t rll_i;
+ rtmp = yy[1];
+ yy[1] = yy[0];
+ ryy_j = &(yy[1]);
+ rll_i = ll_i;
+ while (rll_i != rtmp) {
+ register uint8_t rtmp2;
+ ryy_j++;
+ rtmp2 = rtmp;
+ rtmp = *ryy_j;
+ *ryy_j = rtmp2;
+ };
+ yy[0] = rtmp;
+ j = ryy_j - &(yy[0]);
+ mtfv[wr] = j+1;
+ wr++;
+ s->mtfFreq[j+1]++;
+ }
+ }
+ }
+
+ if (zPend > 0) {
+ zPend--;
+ while (1) {
+ if (zPend & 1) {
+ mtfv[wr] = BZ_RUNB;
+ wr++;
+ s->mtfFreq[BZ_RUNB]++;
+ } else {
+ mtfv[wr] = BZ_RUNA;
+ wr++;
+ s->mtfFreq[BZ_RUNA]++;
+ }
+ if (zPend < 2)
+ break;
+ zPend = (uint32_t)(zPend - 2) / 2;
+ /* bbox: unsigned div is easier */
+ };
+ zPend = 0;
+ }
+
+ mtfv[wr] = EOB;
+ wr++;
+ s->mtfFreq[EOB]++;
+
+ s->nMTF = wr;
+}
+
+
+/*---------------------------------------------------*/
+#define BZ_LESSER_ICOST 0
+#define BZ_GREATER_ICOST 15
+
+static NOINLINE
+void sendMTFValues(EState* s)
+{
+ int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
+ int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
+ int32_t nGroups, nBytes;
+
+ /*
+ * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+ * is a global since the decoder also needs it.
+ *
+ * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+ * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+ * are also globals only used in this proc.
+ * Made global to keep stack frame size small.
+ */
+#define code sendMTFValues__code
+#define rfreq sendMTFValues__rfreq
+#define len_pack sendMTFValues__len_pack
+
+ uint16_t cost[BZ_N_GROUPS];
+ int32_t fave[BZ_N_GROUPS];
+
+ uint16_t* mtfv = s->mtfv;
+
+ alphaSize = s->nInUse + 2;
+ for (t = 0; t < BZ_N_GROUPS; t++)
+ for (v = 0; v < alphaSize; v++)
+ s->len[t][v] = BZ_GREATER_ICOST;
+
+ /*--- Decide how many coding tables to use ---*/
+ AssertH(s->nMTF > 0, 3001);
+ if (s->nMTF < 200) nGroups = 2; else
+ if (s->nMTF < 600) nGroups = 3; else
+ if (s->nMTF < 1200) nGroups = 4; else
+ if (s->nMTF < 2400) nGroups = 5; else
+ nGroups = 6;
+
+ /*--- Generate an initial set of coding tables ---*/
+ {
+ int32_t nPart, remF, tFreq, aFreq;
+
+ nPart = nGroups;
+ remF = s->nMTF;
+ gs = 0;
+ while (nPart > 0) {
+ tFreq = remF / nPart;
+ ge = gs - 1;
+ aFreq = 0;
+ while (aFreq < tFreq && ge < alphaSize-1) {
+ ge++;
+ aFreq += s->mtfFreq[ge];
+ }
+
+ if (ge > gs
+ && nPart != nGroups && nPart != 1
+ && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
+ ) {
+ aFreq -= s->mtfFreq[ge];
+ ge--;
+ }
+
+ for (v = 0; v < alphaSize; v++)
+ if (v >= gs && v <= ge)
+ s->len[nPart-1][v] = BZ_LESSER_ICOST;
+ else
+ s->len[nPart-1][v] = BZ_GREATER_ICOST;
+
+ nPart--;
+ gs = ge + 1;
+ remF -= aFreq;
+ }
+ }
+
+ /*
+ * Iterate up to BZ_N_ITERS times to improve the tables.
+ */
+ for (iter = 0; iter < BZ_N_ITERS; iter++) {
+ for (t = 0; t < nGroups; t++)
+ fave[t] = 0;
+
+ for (t = 0; t < nGroups; t++)
+ for (v = 0; v < alphaSize; v++)
+ s->rfreq[t][v] = 0;
+
+#if CONFIG_BZIP2_FEATURE_SPEED >= 5
+ /*
+ * Set up an auxiliary length table which is used to fast-track
+ * the common case (nGroups == 6).
+ */
+ if (nGroups == 6) {
+ for (v = 0; v < alphaSize; v++) {
+ s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
+ s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
+ s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
+ }
+ }
+#endif
+ nSelectors = 0;
+ totc = 0;
+ gs = 0;
+ while (1) {
+ /*--- Set group start & end marks. --*/
+ if (gs >= s->nMTF)
+ break;
+ ge = gs + BZ_G_SIZE - 1;
+ if (ge >= s->nMTF)
+ ge = s->nMTF-1;
+
+ /*
+ * Calculate the cost of this group as coded
+ * by each of the coding tables.
+ */
+ for (t = 0; t < nGroups; t++)
+ cost[t] = 0;
+#if CONFIG_BZIP2_FEATURE_SPEED >= 5
+ if (nGroups == 6 && 50 == ge-gs+1) {
+ /*--- fast track the common case ---*/
+ register uint32_t cost01, cost23, cost45;
+ register uint16_t icv;
+ cost01 = cost23 = cost45 = 0;
+#define BZ_ITER(nn) \
+ icv = mtfv[gs+(nn)]; \
+ cost01 += s->len_pack[icv][0]; \
+ cost23 += s->len_pack[icv][1]; \
+ cost45 += s->len_pack[icv][2];
+ BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
+ BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
+ BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
+ BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
+ BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
+ BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
+ BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
+ BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
+ BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
+ BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
+#undef BZ_ITER
+ cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
+ cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
+ cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
+
+ } else
+#endif
+ {
+ /*--- slow version which correctly handles all situations ---*/
+ for (i = gs; i <= ge; i++) {
+ uint16_t icv = mtfv[i];
+ for (t = 0; t < nGroups; t++)
+ cost[t] += s->len[t][icv];
+ }
+ }
+ /*
+ * Find the coding table which is best for this group,
+ * and record its identity in the selector table.
+ */
+ /*bc = 999999999;*/
+ /*bt = -1;*/
+ bc = cost[0];
+ bt = 0;
+ for (t = 1 /*0*/; t < nGroups; t++) {
+ if (cost[t] < bc) {
+ bc = cost[t];
+ bt = t;
+ }
+ }
+ totc += bc;
+ fave[bt]++;
+ s->selector[nSelectors] = bt;
+ nSelectors++;
+
+ /*
+ * Increment the symbol frequencies for the selected table.
+ */
+/* 1% faster compress. +800 bytes */
+#if CONFIG_BZIP2_FEATURE_SPEED >= 4
+ if (nGroups == 6 && 50 == ge-gs+1) {
+ /*--- fast track the common case ---*/
+#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
+ BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
+ BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
+ BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
+ BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
+ BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
+ BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
+ BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
+ BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
+ BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
+ BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
+#undef BZ_ITUR
+ gs = ge + 1;
+ } else
+#endif
+ {
+ /*--- slow version which correctly handles all situations ---*/
+ while (gs <= ge) {
+ s->rfreq[bt][mtfv[gs]]++;
+ gs++;
+ }
+ /* already is: gs = ge + 1; */
+ }
+ }
+
+ /*
+ * Recompute the tables based on the accumulated frequencies.
+ */
+ /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
+ * comment in huffman.c for details. */
+ for (t = 0; t < nGroups; t++)
+ BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
+ }
+
+ AssertH(nGroups < 8, 3002);
+ AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
+
+ /*--- Compute MTF values for the selectors. ---*/
+ {
+ uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
+
+ for (i = 0; i < nGroups; i++)
+ pos[i] = i;
+ for (i = 0; i < nSelectors; i++) {
+ ll_i = s->selector[i];
+ j = 0;
+ tmp = pos[j];
+ while (ll_i != tmp) {
+ j++;
+ tmp2 = tmp;
+ tmp = pos[j];
+ pos[j] = tmp2;
+ };
+ pos[0] = tmp;
+ s->selectorMtf[i] = j;
+ }
+ };
+
+ /*--- Assign actual codes for the tables. --*/
+ for (t = 0; t < nGroups; t++) {
+ minLen = 32;
+ maxLen = 0;
+ for (i = 0; i < alphaSize; i++) {
+ if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
+ if (s->len[t][i] < minLen) minLen = s->len[t][i];
+ }
+ AssertH(!(maxLen > 17 /*20*/), 3004);
+ AssertH(!(minLen < 1), 3005);
+ BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
+ }
+
+ /*--- Transmit the mapping table. ---*/
+ {
+ /* bbox: optimized a bit more than in bzip2 */
+ int inUse16 = 0;
+ for (i = 0; i < 16; i++) {
+ if (sizeof(long) <= 4) {
+ inUse16 = inUse16*2 +
+ ((*(uint32_t*)&(s->inUse[i * 16 + 0])
+ | *(uint32_t*)&(s->inUse[i * 16 + 4])
+ | *(uint32_t*)&(s->inUse[i * 16 + 8])
+ | *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
+ } else { /* Our CPU can do better */
+ inUse16 = inUse16*2 +
+ ((*(uint64_t*)&(s->inUse[i * 16 + 0])
+ | *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
+ }
+ }
+
+ nBytes = s->numZ;
+ bsW(s, 16, inUse16);
+
+ inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
+ for (i = 0; i < 16; i++) {
+ if (inUse16 < 0) {
+ unsigned v16 = 0;
+ for (j = 0; j < 16; j++)
+ v16 = v16*2 + s->inUse[i * 16 + j];
+ bsW(s, 16, v16);
+ }
+ inUse16 <<= 1;
+ }
+ }
+
+ /*--- Now the selectors. ---*/
+ nBytes = s->numZ;
+ bsW(s, 3, nGroups);
+ bsW(s, 15, nSelectors);
+ for (i = 0; i < nSelectors; i++) {
+ for (j = 0; j < s->selectorMtf[i]; j++)
+ bsW(s, 1, 1);
+ bsW(s, 1, 0);
+ }
+
+ /*--- Now the coding tables. ---*/
+ nBytes = s->numZ;
+
+ for (t = 0; t < nGroups; t++) {
+ int32_t curr = s->len[t][0];
+ bsW(s, 5, curr);
+ for (i = 0; i < alphaSize; i++) {
+ while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
+ while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
+ bsW(s, 1, 0);
+ }
+ }
+
+ /*--- And finally, the block data proper ---*/
+ nBytes = s->numZ;
+ selCtr = 0;
+ gs = 0;
+ while (1) {
+ if (gs >= s->nMTF)
+ break;
+ ge = gs + BZ_G_SIZE - 1;
+ if (ge >= s->nMTF)
+ ge = s->nMTF-1;
+ AssertH(s->selector[selCtr] < nGroups, 3006);
+
+/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
+#if 0
+ if (nGroups == 6 && 50 == ge-gs+1) {
+ /*--- fast track the common case ---*/
+ uint16_t mtfv_i;
+ uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
+ int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
+#define BZ_ITAH(nn) \
+ mtfv_i = mtfv[gs+(nn)]; \
+ bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
+ BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
+ BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
+ BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
+ BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
+ BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
+ BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
+ BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
+ BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
+ BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
+ BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
+#undef BZ_ITAH
+ gs = ge+1;
+ } else
+#endif
+ {
+ /*--- slow version which correctly handles all situations ---*/
+ /* code is bit bigger, but moves multiply out of the loop */
+ uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
+ int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
+ while (gs <= ge) {
+ bsW(s,
+ s_len_sel_selCtr[mtfv[gs]],
+ s_code_sel_selCtr[mtfv[gs]]
+ );
+ gs++;
+ }
+ /* already is: gs = ge+1; */
+ }
+ selCtr++;
+ }
+ AssertH(selCtr == nSelectors, 3007);
+#undef code
+#undef rfreq
+#undef len_pack
+}
+
+
+/*---------------------------------------------------*/
+static
+void BZ2_compressBlock(EState* s, int is_last_block)
+{
+ if (s->nblock > 0) {
+ BZ_FINALISE_CRC(s->blockCRC);
+ s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
+ s->combinedCRC ^= s->blockCRC;
+ if (s->blockNo > 1)
+ s->numZ = 0;
+
+ BZ2_blockSort(s);
+ }
+
+ s->zbits = &((uint8_t*)s->arr2)[s->nblock];
+
+ /*-- If this is the first block, create the stream header. --*/
+ if (s->blockNo == 1) {
+ BZ2_bsInitWrite(s);
+ /*bsPutU8(s, BZ_HDR_B);*/
+ /*bsPutU8(s, BZ_HDR_Z);*/
+ /*bsPutU8(s, BZ_HDR_h);*/
+ /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
+ bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
+ }
+
+ if (s->nblock > 0) {
+ /*bsPutU8(s, 0x31);*/
+ /*bsPutU8(s, 0x41);*/
+ /*bsPutU8(s, 0x59);*/
+ /*bsPutU8(s, 0x26);*/
+ bsPutU32(s, 0x31415926);
+ /*bsPutU8(s, 0x53);*/
+ /*bsPutU8(s, 0x59);*/
+ bsPutU16(s, 0x5359);
+
+ /*-- Now the block's CRC, so it is in a known place. --*/
+ bsPutU32(s, s->blockCRC);
+
+ /*
+ * Now a single bit indicating (non-)randomisation.
+ * As of version 0.9.5, we use a better sorting algorithm
+ * which makes randomisation unnecessary. So always set
+ * the randomised bit to 'no'. Of course, the decoder
+ * still needs to be able to handle randomised blocks
+ * so as to maintain backwards compatibility with
+ * older versions of bzip2.
+ */
+ bsW(s, 1, 0);
+
+ bsW(s, 24, s->origPtr);
+ generateMTFValues(s);
+ sendMTFValues(s);
+ }
+
+ /*-- If this is the last block, add the stream trailer. --*/
+ if (is_last_block) {
+ /*bsPutU8(s, 0x17);*/
+ /*bsPutU8(s, 0x72);*/
+ /*bsPutU8(s, 0x45);*/
+ /*bsPutU8(s, 0x38);*/
+ bsPutU32(s, 0x17724538);
+ /*bsPutU8(s, 0x50);*/
+ /*bsPutU8(s, 0x90);*/
+ bsPutU16(s, 0x5090);
+ bsPutU32(s, s->combinedCRC);
+ bsFinishWrite(s);
+ }
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end compress.c ---*/
+/*-------------------------------------------------------------*/
diff --git a/archival/libarchive/bz/huffman.c b/archival/libarchive/bz/huffman.c
new file mode 100644
index 000000000..676b1af66
--- /dev/null
+++ b/archival/libarchive/bz/huffman.c
@@ -0,0 +1,229 @@
+/*
+ * bzip2 is written by Julian Seward <jseward@bzip.org>.
+ * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
+ * See README and LICENSE files in this directory for more information.
+ */
+
+/*-------------------------------------------------------------*/
+/*--- Huffman coding low-level stuff ---*/
+/*--- huffman.c ---*/
+/*-------------------------------------------------------------*/
+
+/* ------------------------------------------------------------------
+This file is part of bzip2/libbzip2, a program and library for
+lossless, block-sorting data compression.
+
+bzip2/libbzip2 version 1.0.4 of 20 December 2006
+Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+
+Please read the WARNING, DISCLAIMER and PATENTS sections in the
+README file.
+
+This program is released under the terms of the license contained
+in the file LICENSE.
+------------------------------------------------------------------ */
+
+/* #include "bzlib_private.h" */
+
+/*---------------------------------------------------*/
+#define WEIGHTOF(zz0) ((zz0) & 0xffffff00)
+#define DEPTHOF(zz1) ((zz1) & 0x000000ff)
+#define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3))
+
+#define ADDWEIGHTS(zw1,zw2) \
+ (WEIGHTOF(zw1)+WEIGHTOF(zw2)) | \
+ (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2)))
+
+#define UPHEAP(z) \
+{ \
+ int32_t zz, tmp; \
+ zz = z; \
+ tmp = heap[zz]; \
+ while (weight[tmp] < weight[heap[zz >> 1]]) { \
+ heap[zz] = heap[zz >> 1]; \
+ zz >>= 1; \
+ } \
+ heap[zz] = tmp; \
+}
+
+
+/* 90 bytes, 0.3% of overall compress speed */
+#if CONFIG_BZIP2_FEATURE_SPEED >= 1
+
+/* macro works better than inline (gcc 4.2.1) */
+#define DOWNHEAP1(heap, weight, Heap) \
+{ \
+ int32_t zz, yy, tmp; \
+ zz = 1; \
+ tmp = heap[zz]; \
+ while (1) { \
+ yy = zz << 1; \
+ if (yy > nHeap) \
+ break; \
+ if (yy < nHeap \
+ && weight[heap[yy+1]] < weight[heap[yy]]) \
+ yy++; \
+ if (weight[tmp] < weight[heap[yy]]) \
+ break; \
+ heap[zz] = heap[yy]; \
+ zz = yy; \
+ } \
+ heap[zz] = tmp; \
+}
+
+#else
+
+static
+void DOWNHEAP1(int32_t *heap, int32_t *weight, int32_t nHeap)
+{
+ int32_t zz, yy, tmp;
+ zz = 1;
+ tmp = heap[zz];
+ while (1) {
+ yy = zz << 1;
+ if (yy > nHeap)
+ break;
+ if (yy < nHeap
+ && weight[heap[yy + 1]] < weight[heap[yy]])
+ yy++;
+ if (weight[tmp] < weight[heap[yy]])
+ break;
+ heap[zz] = heap[yy];
+ zz = yy;
+ }
+ heap[zz] = tmp;
+}
+
+#endif
+
+/*---------------------------------------------------*/
+static
+void BZ2_hbMakeCodeLengths(EState *s,
+ uint8_t *len,
+ int32_t *freq,
+ int32_t alphaSize,
+ int32_t maxLen)
+{
+ /*
+ * Nodes and heap entries run from 1. Entry 0
+ * for both the heap and nodes is a sentinel.
+ */
+ int32_t nNodes, nHeap, n1, n2, i, j, k;
+ Bool tooLong;
+
+ /* bbox: moved to EState to save stack
+ int32_t heap [BZ_MAX_ALPHA_SIZE + 2];
+ int32_t weight[BZ_MAX_ALPHA_SIZE * 2];
+ int32_t parent[BZ_MAX_ALPHA_SIZE * 2];
+ */
+#define heap (s->BZ2_hbMakeCodeLengths__heap)
+#define weight (s->BZ2_hbMakeCodeLengths__weight)
+#define parent (s->BZ2_hbMakeCodeLengths__parent)
+
+ for (i = 0; i < alphaSize; i++)
+ weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
+
+ while (1) {
+ nNodes = alphaSize;
+ nHeap = 0;
+
+ heap[0] = 0;
+ weight[0] = 0;
+ parent[0] = -2;
+
+ for (i = 1; i <= alphaSize; i++) {
+ parent[i] = -1;
+ nHeap++;
+ heap[nHeap] = i;
+ UPHEAP(nHeap);
+ }
+
+ AssertH(nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001);
+
+ while (nHeap > 1) {
+ n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap);
+ n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap);
+ nNodes++;
+ parent[n1] = parent[n2] = nNodes;
+ weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]);
+ parent[nNodes] = -1;
+ nHeap++;
+ heap[nHeap] = nNodes;
+ UPHEAP(nHeap);
+ }
+
+ AssertH(nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002);
+
+ tooLong = False;
+ for (i = 1; i <= alphaSize; i++) {
+ j = 0;
+ k = i;
+ while (parent[k] >= 0) {
+ k = parent[k];
+ j++;
+ }
+ len[i-1] = j;
+ if (j > maxLen)
+ tooLong = True;
+ }
+
+ if (!tooLong)
+ break;
+
+ /* 17 Oct 04: keep-going condition for the following loop used
+ to be 'i < alphaSize', which missed the last element,
+ theoretically leading to the possibility of the compressor
+ looping. However, this count-scaling step is only needed if
+ one of the generated Huffman code words is longer than
+ maxLen, which up to and including version 1.0.2 was 20 bits,
+ which is extremely unlikely. In version 1.0.3 maxLen was
+ changed to 17 bits, which has minimal effect on compression
+ ratio, but does mean this scaling step is used from time to
+ time, enough to verify that it works.
+
+ This means that bzip2-1.0.3 and later will only produce
+ Huffman codes with a maximum length of 17 bits. However, in
+ order to preserve backwards compatibility with bitstreams
+ produced by versions pre-1.0.3, the decompressor must still
+ handle lengths of up to 20. */
+
+ for (i = 1; i <= alphaSize; i++) {
+ j = weight[i] >> 8;
+ /* bbox: yes, it is a signed division.
+ * don't replace with shift! */
+ j = 1 + (j / 2);
+ weight[i] = j << 8;
+ }
+ }
+#undef heap
+#undef weight
+#undef parent
+}
+
+
+/*---------------------------------------------------*/
+static
+void BZ2_hbAssignCodes(int32_t *code,
+ uint8_t *length,
+ int32_t minLen,
+ int32_t maxLen,
+ int32_t alphaSize)
+{
+ int32_t n, vec, i;
+
+ vec = 0;
+ for (n = minLen; n <= maxLen; n++) {
+ for (i = 0; i < alphaSize; i++) {
+ if (length[i] == n) {
+ code[i] = vec;
+ vec++;
+ };
+ }
+ vec <<= 1;
+ }
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end huffman.c ---*/
+/*-------------------------------------------------------------*/