13 // Optimize for 64 bits. We might want to replace this for 32-bit machines
15 typedef uint64_t bitreservoir_t;
16 typedef uint32_t bitreservoir_fill_t;
18 // Note: We return bitreservoir_t here, so we can get implicit zero extension on amd64.
19 static inline bitreservoir_t read_bitreservoir_fill(uint8_t* source)
21 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
23 asm("bswapl %1" : "=r" (ret) : "0" (*(bitreservoir_fill_t*)(source)));
26 return ntohl(*(bitreservoir_fill_t*)(source));
30 static const unsigned BITRESERVOIR_SIZE = 8 * sizeof(bitreservoir_t);
31 static const unsigned BITRESERVOIR_FILL_SIZE = 8 * sizeof(bitreservoir_fill_t);
32 static const unsigned BYTERESERVOIR_SIZE = 4096;
34 // A data source for efficient reading of bit-level data.
36 // Short-term bit reservoir; holds up to 64 bits. When it's empty,
37 // it needs to get refilled from the medium-term bit reservoir.
39 unsigned bits_available;
41 // Medium-term bit reservoir; holds a few kilobytes of spare data.
42 // When this is empty, it needs to be refilled from the input
45 uint8_t* byte_read_ptr;
46 unsigned bytes_available;
48 // Some clients will purposedly read a bit ahead of the stream, causing
49 // problems at EOF. Thus, the client is allowed to request that we pad
50 // the end stream with a few bytes after the source reports EOF.
51 int padding_bytes_available;
54 input_func_t* input_func;
59 void init_bit_source(struct bit_source* source, input_func_t* input_func,
60 unsigned padding_bytes, void* userdata);
62 // Internal function. Do not use.
63 void possibly_refill_slow_path(struct bit_source* source, unsigned num_bits);
65 // Make sure there's at least NUM_BITS available in the short-term bit reservoir.
66 // You usually want to call this before read_bits(). The reason it's separate
67 // is that if you want two reads and you know the size of both, it's faster to
68 // refill A+B, read A, read B than refill A, read A, refill B, read B.
69 static inline void possibly_refill(struct bit_source* source, unsigned num_bits)
71 assert(num_bits <= BITRESERVOIR_FILL_SIZE + 1);
73 if (source->bits_available >= num_bits) {
74 // Fast path (~90% of invocations?)
78 // Slower path (~99% of remaining invocations?)
79 assert(source->bits_available + BITRESERVOIR_FILL_SIZE < BITRESERVOIR_SIZE);
80 if (source->bytes_available >= sizeof(bitreservoir_fill_t)) {
81 bitreservoir_t fill = read_bitreservoir_fill(source->byte_read_ptr);
82 source->byte_read_ptr += sizeof(bitreservoir_fill_t);
83 source->bytes_available -= sizeof(bitreservoir_fill_t);
84 source->bits |= (bitreservoir_t)fill << (BITRESERVOIR_SIZE - BITRESERVOIR_FILL_SIZE - source->bits_available);
85 source->bits_available += BITRESERVOIR_FILL_SIZE;
89 // Slow path: Refill from data source.
90 // Should not be inlined, so split into a separate function.
91 possibly_refill_slow_path(source, num_bits);
94 static inline unsigned read_bits(struct bit_source* source, unsigned num_bits)
96 assert(source->bits_available >= num_bits);
97 unsigned ret = (source->bits >> (BITRESERVOIR_SIZE - num_bits));
98 source->bits <<= num_bits;
99 source->bits_available -= num_bits;
103 static inline unsigned peek_bits(struct bit_source* source, unsigned num_bits)
105 assert(source->bits_available >= num_bits);
106 return (source->bits >> (BITRESERVOIR_SIZE - num_bits));
109 #endif /* !defined(_BITSOURCE_H) */