aboutsummaryrefslogtreecommitdiff
path: root/archival/libunarchive/rangecoder.h
blob: b806eebf6e075068c2a78dd9adf8c14aa291d01b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#include "libbb.h"

#ifdef CONFIG_FEATURE_LZMA_FAST
#  define speed_inline ATTRIBUTE_ALWAYS_INLINE
#else
#  define speed_inline
#endif


typedef struct {
	int fd;
	uint8_t *ptr;
	uint8_t *buffer;
	uint8_t *buffer_end;
	int buffer_size;
	uint32_t code;
	uint32_t range;
	uint32_t bound;
} rc_t;


#define RC_TOP_BITS 24
#define RC_MOVE_BITS 5
#define RC_MODEL_TOTAL_BITS 11


/* Called twice: once at startup and once in rc_normalize() */
static void rc_read(rc_t * rc)
{
	rc->buffer_size = read(rc->fd, rc->buffer, rc->buffer_size);
	if (rc->buffer_size <= 0)
		bb_error_msg_and_die("unexpected EOF");
	rc->ptr = rc->buffer;
	rc->buffer_end = rc->buffer + rc->buffer_size;
}

/* Called once */
static void rc_init(rc_t * rc, int fd, int buffer_size)
{
	int i;

	rc->fd = fd;
	rc->buffer = xmalloc(buffer_size);
	rc->buffer_size = buffer_size;
	rc->buffer_end = rc->buffer + rc->buffer_size;
	rc->ptr = rc->buffer_end;

	rc->code = 0;
	rc->range = 0xFFFFFFFF;
	for (i = 0; i < 5; i++) {
		if (rc->ptr >= rc->buffer_end)
			rc_read(rc);
		rc->code = (rc->code << 8) | *rc->ptr++;
	}
}

/* Called once. TODO: bb_maybe_free() */
static ATTRIBUTE_ALWAYS_INLINE void rc_free(rc_t * rc)
{
	if (ENABLE_FEATURE_CLEAN_UP)
		free(rc->buffer);
}

/* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
static void rc_do_normalize(rc_t * rc)
{
	if (rc->ptr >= rc->buffer_end)
		rc_read(rc);
	rc->range <<= 8;
	rc->code = (rc->code << 8) | *rc->ptr++;
}
static ATTRIBUTE_ALWAYS_INLINE void rc_normalize(rc_t * rc)
{
	if (rc->range < (1 << RC_TOP_BITS)) {
		rc_do_normalize(rc);
	}
}

/* Called 9 times */
/* Why rc_is_bit_0_helper exists?
 * Because we want to always expose (rc->code < rc->bound) to optimizer
 */
static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p)
{
	rc_normalize(rc);
	rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
	return rc->bound;
}
static ATTRIBUTE_ALWAYS_INLINE int rc_is_bit_0(rc_t * rc, uint16_t * p)
{
	uint32_t t = rc_is_bit_0_helper(rc, p);
	return rc->code < t;
}

/* Called ~10 times, but very small, thus inlined */
static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p)
{
	rc->range = rc->bound;
	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
}
static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p)
{
	rc->range -= rc->bound;
	rc->code -= rc->bound;
	*p -= *p >> RC_MOVE_BITS;
}

/* Called 4 times in unlzma loop */
static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
{
	if (rc_is_bit_0(rc, p)) {
		rc_update_bit_0(rc, p);
		*symbol *= 2;
		return 0;
	} else {
		rc_update_bit_1(rc, p);
		*symbol = *symbol * 2 + 1;
		return 1;
	}
}

/* Called once */
static ATTRIBUTE_ALWAYS_INLINE int rc_direct_bit(rc_t * rc)
{
	rc_normalize(rc);
	rc->range >>= 1;
	if (rc->code >= rc->range) {
		rc->code -= rc->range;
		return 1;
	}
	return 0;
}

/* Called twice */
static speed_inline void
rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
{
	int i = num_levels;

	*symbol = 1;
	while (i--)
		rc_get_bit(rc, p + *symbol, symbol);
	*symbol -= 1 << num_levels;
}

/* vi:set ts=4: */