diff options
author | Isaac Dunham <idunham@lavabit.com> | 2013-04-12 20:08:47 -0500 |
---|---|---|
committer | Isaac Dunham <idunham@lavabit.com> | 2013-04-12 20:08:47 -0500 |
commit | 4ffface11f7857683ddb1f935fb05809821458ab (patch) | |
tree | a4d16c824fddc8447099b0124dd97598f96659fb /toys/pending | |
parent | ff8b9ed551bdf942d20553ec6f0b687ba04e0ed9 (diff) | |
download | toybox-4ffface11f7857683ddb1f935fb05809821458ab.tar.gz |
xzcat: more cleanup and some shrinking.
-drop __always_inline (c. 1.5k of binary size),
-remove the xz_dec_bcj_end and memzero macros
(memeq is left because of negative returns),
-disable XZ_DEC_SINGLE and XZ_DEC_PREALLOC, since we aren't using them.
(160 bytes)
-Merge xz_dec_lzma2_end into xz_dec_end
-Move xz_crc32 to where it's defined.
(That does not seem to be duplicated elsewhere in toybox.)
Diffstat (limited to 'toys/pending')
-rw-r--r-- | toys/pending/xzcat.c | 99 |
1 files changed, 34 insertions, 65 deletions
diff --git a/toys/pending/xzcat.c b/toys/pending/xzcat.c index 8ed36ecc..5e96cf6f 100644 --- a/toys/pending/xzcat.c +++ b/toys/pending/xzcat.c @@ -43,9 +43,8 @@ config XZCAT * * It is possible to enable support only for a subset of the above * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, - * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled - * with support for all operation modes, but the preboot code may - * be built with fewer features to minimize code size. + * or XZ_DEC_DYNALLOC. xzcat uses only XZ_DEC_DYNALLOC, + * so that is the default. */ enum xz_mode { XZ_SINGLE, @@ -236,7 +235,19 @@ void xz_dec_end(struct xz_dec *s); * calculation, the third argument must be zero. To continue the calculation, * the previously returned value is passed as the third argument. */ -uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +static uint32_t xz_crc32_table[256]; + +uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) +{ + crc = ~crc; + + while (size != 0) { + crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); + --size; + } + + return ~crc; +} /* * This must be called before any other xz_* function (but after crc_init()) @@ -281,7 +292,6 @@ uint64_t xz_crc64(const uint8_t *buf, size_t size, uint64_t crc) } // END xz.h -static uint32_t xz_crc32_table[256]; static uint8_t in[BUFSIZ]; static uint8_t out[BUFSIZ]; @@ -395,31 +405,12 @@ error: #define memeq(a, b, size) (memcmp(a, b, size) == 0) -#define memzero(buf, size) memset(buf, 0, size) #ifndef min # define min(x, y) ((x) < (y) ? (x) : (y)) #endif #define min_t(type, x, y) min(x, y) -/* - * Some functions have been marked with __always_inline to keep the - * performance reasonable even when the compiler is optimizing for - * small code size. You may be able to save a few bytes by #defining - * __always_inline to plain inline, but don't complain if the code - * becomes slow. - * - * NOTE: System headers on GNU/Linux may #define this macro already, - * so if you want to change it, you need to #undef it first. - */ -#ifndef __always_inline -# ifdef __GNUC__ -# define __always_inline \ - inline __attribute__((__always_inline__)) -# else -# define __always_inline inline -# endif -#endif /* Inline functions to access unaligned unsigned 32-bit integers */ #ifndef get_unaligned_le32 @@ -471,13 +462,12 @@ static inline void put_unaligned_be32(uint32_t val, uint8_t *buf) # define get_le32 get_unaligned_le32 #endif -/* If no specific decoding mode is requested, enable support for all modes. */ -#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ - && !defined(XZ_DEC_DYNALLOC) -# define XZ_DEC_SINGLE -# define XZ_DEC_PREALLOC -# define XZ_DEC_DYNALLOC -#endif + +#define XZ_DEC_DYNALLOC +/* DYNALLOC is what we use, but we may want these later. +#define XZ_DEC_SINGLE +#define XZ_DEC_PREALLOC +*/ /* * The DEC_IS_foo(mode) macros are used in "if" statements. If only some @@ -543,9 +533,6 @@ enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b); -/* Free the memory allocated for the LZMA2 decoder. */ -void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); - #ifdef XZ_DEC_BCJ /* * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before @@ -569,24 +556,10 @@ enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2, struct xz_buf *b); - -/* Free the memory allocated for the BCJ filters. */ -#define xz_dec_bcj_end(s) free(s) #endif // END "xz_private.h" -uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) -{ - crc = ~crc; - - while (size != 0) { - crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); - --size; - } - - return ~crc; -} @@ -1820,7 +1793,7 @@ static inline int rc_is_finished(const struct rc_dec *rc) } /* Read the next input byte if needed. */ -static __always_inline void rc_normalize(struct rc_dec *rc) +static inline void rc_normalize(struct rc_dec *rc) { if (rc->range < RC_TOP_VALUE) { rc->range <<= RC_SHIFT_BITS; @@ -1839,7 +1812,7 @@ static __always_inline void rc_normalize(struct rc_dec *rc) * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) */ -static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) +static inline int rc_bit(struct rc_dec *rc, uint16_t *prob) { uint32_t bound; int bit; @@ -1861,7 +1834,7 @@ static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) } /* Decode a bittree starting from the most significant bit. */ -static __always_inline uint32_t rc_bittree(struct rc_dec *rc, +static inline uint32_t rc_bittree(struct rc_dec *rc, uint16_t *probs, uint32_t limit) { uint32_t symbol = 1; @@ -1877,7 +1850,7 @@ static __always_inline uint32_t rc_bittree(struct rc_dec *rc, } /* Decode a bittree starting from the least significant bit. */ -static __always_inline void rc_bittree_reverse(struct rc_dec *rc, +static inline void rc_bittree_reverse(struct rc_dec *rc, uint16_t *probs, uint32_t *dest, uint32_t limit) { @@ -2204,7 +2177,7 @@ static int lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); if (s->temp.size + tmp == s->lzma2.compressed) { - memzero(s->temp.buf + s->temp.size + tmp, + memset(s->temp.buf + s->temp.size + tmp, 0, sizeof(s->temp.buf) - s->temp.size - tmp); s->rc.in_limit = s->temp.size + tmp; @@ -2506,13 +2479,6 @@ enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) return XZ_OK; } -void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) -{ - if (DEC_IS_MULTI(s->dict.mode)) - free(s->dict.buf); - - free(s); -} /* * .xz Stream decoder */ @@ -3342,7 +3308,7 @@ struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) error_lzma2: #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + free(s->bcj); error_bcj: #endif free(s); @@ -3355,8 +3321,8 @@ void xz_dec_reset(struct xz_dec *s) s->allow_buf_error = 0; s->pos = 0; s->crc = 0; - memzero(&s->block, sizeof(s->block)); - memzero(&s->index, sizeof(s->index)); + memset(&s->block, 0, sizeof(s->block)); + memset(&s->index, 0, sizeof(s->index)); s->temp.pos = 0; s->temp.size = STREAM_HEADER_SIZE; } @@ -3364,9 +3330,12 @@ void xz_dec_reset(struct xz_dec *s) void xz_dec_end(struct xz_dec *s) { if (s != NULL) { - xz_dec_lzma2_end(s->lzma2); + if (DEC_IS_MULTI((s->lzma2)->dict.mode)) + free((s->lzma2)->dict.buf); + free(s->lzma2); + #ifdef XZ_DEC_BCJ - xz_dec_bcj_end(s->bcj); + free(s->bcj); #endif free(s); } |