12 // Optimize for 64 bits. We might want to replace this for 32-bit machines
14 typedef uint64_t bitreservoir_t;
15 typedef uint32_t bitreservoir_fill_t;
17 static inline bitreservoir_fill_t read_bitreservoir_fill(uint8_t* source)
19 return ntohl(*(bitreservoir_fill_t*)(source));
22 static const unsigned BITRESERVOIR_SIZE = 8 * sizeof(bitreservoir_t);
23 static const unsigned BITRESERVOIR_FILL_SIZE = 8 * sizeof(bitreservoir_fill_t);
24 static const unsigned BYTERESERVOIR_SIZE = 4096;
26 // A data source for efficient reading of bit-level data.
28 // Short-term bit reservoir; holds up to 64 bits. When it's empty,
29 // it needs to get refilled from the medium-term bit reservoir.
31 unsigned bits_available;
33 // Medium-term bit reservoir; holds a few kilobytes of spare data.
34 // When this is empty, it needs to be refilled from the input
37 uint8_t* byte_read_ptr;
38 unsigned bytes_available;
40 // Some clients will purposedly read a bit ahead of the stream, causing
41 // problems at EOF. Thus, the client is allowed to request that we pad
42 // the end stream with a few bytes after the source reports EOF.
43 int padding_bytes_available;
46 input_func_t* input_func;
50 void init_bit_source(struct bit_source* source, input_func_t* input_func,
51 unsigned padding_bytes, void* userdata);
53 // Internal function. Do not use.
54 void possibly_refill_slow_path(struct bit_source* source, unsigned num_bits);
56 // Make sure there's at least NUM_BITS available in the short-term bit reservoir.
57 // You usually want to call this before read_bits(). The reason it's separate
58 // is that if you want two reads and you know the size of both, it's faster to
59 // refill A+B, read A, read B than refill A, read A, refill B, read B.
60 static inline void possibly_refill(struct bit_source* source, unsigned num_bits)
62 assert(num_bits <= BITRESERVOIR_FILL_SIZE + 1);
64 if (source->bits_available >= num_bits) {
65 // Fast path (~90% of invocations?)
69 // Slower path (~99% of remaining invocations?)
70 assert(source->bits_available + BITRESERVOIR_FILL_SIZE < BITRESERVOIR_SIZE);
71 if (source->bytes_available >= sizeof(bitreservoir_fill_t)) {
72 bitreservoir_fill_t fill = read_bitreservoir_fill(source->byte_read_ptr);
73 source->byte_read_ptr += sizeof(bitreservoir_fill_t);
74 source->bytes_available -= sizeof(bitreservoir_fill_t);
75 source->bits |= (bitreservoir_t)fill << (BITRESERVOIR_SIZE - BITRESERVOIR_FILL_SIZE - source->bits_available);
76 source->bits_available += BITRESERVOIR_FILL_SIZE;
80 // Slow path: Refill from data source.
81 // Should not be inlined, so split into a separate function.
82 possibly_refill_slow_path(source, num_bits);
85 static inline unsigned read_bits(struct bit_source* source, unsigned num_bits)
87 assert(source->bits_available >= num_bits);
88 unsigned ret = (source->bits >> (BITRESERVOIR_SIZE - num_bits));
89 source->bits <<= num_bits;
90 source->bits_available -= num_bits;
94 static inline unsigned peek_bits(struct bit_source* source, unsigned num_bits)
96 assert(source->bits_available >= num_bits);
97 return (source->bits >> (BITRESERVOIR_SIZE - num_bits));
100 #endif /* !defined(_BITSOURCE_H) */