X-Git-Url: https://git.sesse.net/?p=fjl;a=blobdiff_plain;f=dehuff.h;h=2669034554c25fb16362ace9879f5fa280be9592;hp=728c62315c5570c9b3fb6f1771368d219330a2a3;hb=19a58db08afd149a862506936093423db756a2dc;hpb=0b02847989970a190c2cfaec4d1abaa1f616284a diff --git a/dehuff.h b/dehuff.h index 728c623..2669034 100644 --- a/dehuff.h +++ b/dehuff.h @@ -48,12 +48,11 @@ void read_huffman_tables(huffman_tables_t* dst, input_func_t* input_func, void* unsigned read_huffman_symbol_slow_path(const struct huffman_table* table, struct bit_source* source); -static inline unsigned read_huffman_symbol(const struct huffman_table* table, - struct bit_source* source) +static inline unsigned read_huffman_symbol_no_refill( + const struct huffman_table* table, + struct bit_source* source) { - // FIXME: We can read past the end of the stream here in some edge - // cases. We need to define some guarantees in the layers above. - possibly_refill(source, DEHUF_TABLE_BITS); + assert(source->bits_available >= DEHUF_TABLE_BITS); unsigned lookup = peek_bits(source, DEHUF_TABLE_BITS); int code = table->lookup_table_codes[lookup]; int length = table->lookup_table_length[lookup]; @@ -61,9 +60,45 @@ static inline unsigned read_huffman_symbol(const struct huffman_table* table, if (code == DEHUF_SLOW_PATH) { return read_huffman_symbol_slow_path(table, source); } - + read_bits(source, length); return code; } +static inline unsigned read_huffman_symbol(const struct huffman_table* table, + struct bit_source* source) +{ + possibly_refill(source, DEHUF_TABLE_BITS); + return read_huffman_symbol_no_refill(table, source); +} + +// procedure EXTEND (figure F.12) + +// Fast lookup table for (1 << (bits - 1)). +// The table actually helps, since the load can go in parallel with the shift +// operation below. +static const int bit_thresholds[16] = { + 0, 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14 +}; + +static inline unsigned extend(int val, unsigned bits) +{ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + // GCC should ideally be able to figure out that the conditional move is better, but + // it doesn't for various reasons, and this is pretty important for speed, so we hardcode. + asm("cmp %1, %0 ; cmovl %2, %0" + : "+r" (val) + : "g" (bit_thresholds[bits]), + "r" (val + (-1 << bits) + 1) + : "cc"); + return val; +#else + if (val < bit_thresholds[bits]) { + return val + (-1 << bits) + 1; + } else { + return val; + } +#endif +} + #endif /* !defined(_DEHUFF_H) */