return ntohl(*(bitreservoir_fill_t*)(source));
}
-static const unsigned bitreservoir_size = 8 * sizeof(bitreservoir_t);
-static const unsigned bitreservoir_fill_size = 8 * sizeof(bitreservoir_fill_t);
-static const unsigned bytereservoir_size = 4096;
+static const unsigned BITRESERVOIR_SIZE = 8 * sizeof(bitreservoir_t);
+static const unsigned BITRESERVOIR_FILL_SIZE = 8 * sizeof(bitreservoir_fill_t);
+static const unsigned BYTERESERVOIR_SIZE = 4096;
-// A function to read bytes from some input source.
+// A function to read bytes from some input source. The bytes should be
+// already unstuffed (and thus without markers).
// A return value of -1 indicates error, a return value of 0 indicates EOF.
typedef ssize_t (input_func_t)(void*, uint8_t*, size_t);
-struct data_source {
+// A data source for efficient reading of bit-level data.
+struct bit_source {
// Short-term bit reservoir; holds up to 64 bits. When it's empty,
// it needs to get refilled from the medium-term bit reservoir.
bitreservoir_t bits;
input_func_t* input_func;
void* userdata;
};
-
-void init_data_source(struct data_source* source, input_func_t* input_func, void* userdata);
+
+void init_bit_source(struct bit_source* source, input_func_t* input_func, void* userdata);
// Internal function. Do not use.
-void possibly_refill_slow_path(struct data_source* source, unsigned num_bits);
+void possibly_refill_slow_path(struct bit_source* source, unsigned num_bits);
// Make sure there's at least NUM_BITS available in the short-term bit reservoir.
// You usually want to call this before read_bits(). The reason it's separate
// is that if you want two reads and you know the size of both, it's faster to
// refill A+B, read A, read B than refill A, read A, refill B, read B.
-static inline void possibly_refill(struct data_source* source, unsigned num_bits)
+static inline void possibly_refill(struct bit_source* source, unsigned num_bits)
{
- assert(num_bits <= bitreservoir_fill_size + 1);
+ assert(num_bits <= BITRESERVOIR_FILL_SIZE + 1);
if (source->bits_available >= num_bits) {
// Fast path (~90% of invocations?)
}
// Slower path (~99% of remaining invocations?)
- assert(source->bits_available + bitreservoir_fill_size < bitreservoir_size);
+ assert(source->bits_available + BITRESERVOIR_FILL_SIZE < BITRESERVOIR_SIZE);
if (source->bytes_available >= sizeof(bitreservoir_fill_t)) {
bitreservoir_fill_t fill = read_bitreservoir_fill(source->bytes);
source->bytes += sizeof(bitreservoir_fill_t);
source->bytes_available -= sizeof(bitreservoir_fill_t);
- source->bits |= (bitreservoir_t)fill << (bitreservoir_size - bitreservoir_fill_size - source->bits_available);
- source->bits_available += bitreservoir_fill_size;
+ source->bits |= (bitreservoir_t)fill << (BITRESERVOIR_SIZE - BITRESERVOIR_FILL_SIZE - source->bits_available);
+ source->bits_available += BITRESERVOIR_FILL_SIZE;
return;
}
possibly_refill_slow_path(source, num_bits);
}
-static inline unsigned read_bits(struct data_source* source, unsigned num_bits)
+static inline unsigned read_bits(struct bit_source* source, unsigned num_bits)
{
assert(source->bits_available >= num_bits);
- unsigned ret = (source->bits >> (bitreservoir_size - num_bits));
+ unsigned ret = (source->bits >> (BITRESERVOIR_SIZE - num_bits));
source->bits <<= num_bits;
source->bits_available -= num_bits;
return ret;
}
+static inline unsigned peek_bits(struct bit_source* source, unsigned num_bits)
+{
+ assert(source->bits_available >= num_bits);
+ return (source->bits >> (BITRESERVOIR_SIZE - num_bits));
+}
+
#endif /* !defined(_INPUT_H) */