13 // Optimize for 64 bits. We might want to replace this for 32-bit machines
15 typedef uint64_t bitreservoir_t;
16 typedef uint32_t bitreservoir_fill_t;
18 // Note: We return bitreservoir_t here, so we can get implicit zero extension on amd64.
19 static inline bitreservoir_t read_bitreservoir_fill(uint8_t* source)
21 #if defined(__GNUC__) && defined(__x86_64__)
23 asm("bswapl %1" : "=r" (ret) : "0" (*(bitreservoir_fill_t*)(source)));
25 #elif defined(__GNUC__) && defined(__i386__)
26 bitreservoir_fill_t ret;
27 asm("bswapl %1" : "=r" (ret) : "0" (*(bitreservoir_fill_t*)(source)));
30 return ntohl(*(bitreservoir_fill_t*)(source));
34 static const unsigned BITRESERVOIR_SIZE = 8 * sizeof(bitreservoir_t);
35 static const unsigned BITRESERVOIR_FILL_SIZE = 8 * sizeof(bitreservoir_fill_t);
36 static const unsigned BYTERESERVOIR_SIZE = 4096;
38 // A data source for efficient reading of bit-level data.
40 // Short-term bit reservoir; holds up to 64 bits. When it's empty,
41 // it needs to get refilled from the medium-term bit reservoir.
43 unsigned bits_available;
45 // Medium-term bit reservoir; holds a few kilobytes of spare data.
46 // When this is empty, it needs to be refilled from the input
49 uint8_t* byte_read_ptr;
50 unsigned bytes_available;
52 // Some clients will purposedly read a bit ahead of the stream, causing
53 // problems at EOF. Thus, the client is allowed to request that we pad
54 // the end stream with a few bytes after the source reports EOF.
55 int padding_bytes_available;
58 input_func_t* input_func;
63 void init_bit_source(struct bit_source* source, input_func_t* input_func,
64 unsigned padding_bytes, void* userdata);
66 // Internal function. Do not use.
67 void possibly_refill_slow_path(struct bit_source* source, unsigned num_bits);
69 // Make sure there's at least NUM_BITS available in the short-term bit reservoir.
70 // You usually want to call this before read_bits(). The reason it's separate
71 // is that if you want two reads and you know the size of both, it's faster to
72 // refill A+B, read A, read B than refill A, read A, refill B, read B.
73 static inline void possibly_refill(struct bit_source* source, unsigned num_bits)
75 assert(num_bits <= BITRESERVOIR_FILL_SIZE + 1);
77 if (source->bits_available >= num_bits) {
78 // Fast path (~90% of invocations?)
82 // Slower path (~99% of remaining invocations?)
83 assert(source->bits_available + BITRESERVOIR_FILL_SIZE < BITRESERVOIR_SIZE);
84 if (source->bytes_available >= sizeof(bitreservoir_fill_t)) {
85 bitreservoir_t fill = read_bitreservoir_fill(source->byte_read_ptr);
86 source->byte_read_ptr += sizeof(bitreservoir_fill_t);
87 source->bytes_available -= sizeof(bitreservoir_fill_t);
88 source->bits |= (bitreservoir_t)fill << (BITRESERVOIR_SIZE - BITRESERVOIR_FILL_SIZE - source->bits_available);
89 source->bits_available += BITRESERVOIR_FILL_SIZE;
93 // Slow path: Refill from data source.
94 // Should not be inlined, so split into a separate function.
95 possibly_refill_slow_path(source, num_bits);
98 static inline unsigned read_bits(struct bit_source* source, unsigned num_bits)
100 assert(source->bits_available >= num_bits);
101 unsigned ret = (source->bits >> (BITRESERVOIR_SIZE - num_bits));
102 source->bits <<= num_bits;
103 source->bits_available -= num_bits;
107 static inline unsigned peek_bits(struct bit_source* source, unsigned num_bits)
109 assert(source->bits_available >= num_bits);
110 return (source->bits >> (BITRESERVOIR_SIZE - num_bits));
113 #endif /* !defined(_BITSOURCE_H) */