X-Git-Url: https://git.sesse.net/?p=fjl;a=blobdiff_plain;f=dehuff.h;h=7621f6095e99791feb6138e81c2574ae9dda6bce;hp=b79a79ace3fe976b2f38d8bf375b980b90ccafe7;hb=a4009687c73083dd0290285a065740a83e27e855;hpb=47de6c270a336574dce220cde780a802a513d113 diff --git a/dehuff.h b/dehuff.h index b79a79a..7621f60 100644 --- a/dehuff.h +++ b/dehuff.h @@ -5,10 +5,14 @@ #include #include -// A function to read bytes from some input source. The bytes should be -// already unstuffed (and thus without markers). -// A return value of -1 indicates error, a return value of 0 indicates EOF. -typedef ssize_t (raw_input_func_t)(void*, uint8_t*, size_t); +#include "bytesource.h" +#include "bitsource.h" + +// About 99% of all Huffman codes are <= 8 bits long (see codelen.txt), +// and it's what libjpeg uses. Thus, it seems like a reasonable size. +#define DEHUF_TABLE_BITS 8 +#define DEHUF_TABLE_SIZE (1 << DEHUF_TABLE_BITS) +static const int DEHUF_SLOW_PATH = -1; struct huffman_table { unsigned num_codes[17]; // BITS @@ -20,6 +24,15 @@ struct huffman_table { int maxcode[16]; int mincode[16]; unsigned valptr[16]; + + // Lookup table for fast decoding; given eight bits, + // return the symbol and length in bits. For longer codes, + // DEHUF_SLOW_PATH is returned. + + // Note that the codes we return are 8-bit, but the type of + // the lookup tables is int to avoid extra zero extending. + int lookup_table_codes[DEHUF_TABLE_SIZE]; + int lookup_table_length[DEHUF_TABLE_SIZE]; }; enum coefficient_class { @@ -29,6 +42,64 @@ enum coefficient_class { }; typedef struct huffman_table huffman_tables_t[NUM_COEFF_CLASSES][4]; -void read_huffman_tables(huffman_tables_t* dst, raw_input_func_t* input_func, void* userdata); +// Read Huffman tables from a stream, and compute the derived values. +void read_huffman_tables(huffman_tables_t* dst, input_func_t* input_func, void* userdata); + +unsigned read_huffman_symbol_slow_path(const struct huffman_table* table, + struct bit_source* source); + +static inline unsigned read_huffman_symbol_no_refill( + const struct huffman_table* table, + struct bit_source* source) +{ + assert(source->bits_available >= DEHUF_TABLE_BITS); + unsigned lookup = peek_bits(source, DEHUF_TABLE_BITS); + int code = table->lookup_table_codes[lookup]; + int length = table->lookup_table_length[lookup]; + + if (code == DEHUF_SLOW_PATH) { + return read_huffman_symbol_slow_path(table, source); + } + + read_bits(source, length); + return code; +} + +static inline unsigned read_huffman_symbol(const struct huffman_table* table, + struct bit_source* source) +{ + possibly_refill(source, DEHUF_TABLE_BITS); + return read_huffman_symbol_no_refill(table, source); +} + +// procedure EXTEND (figure F.12) + +// Fast lookup table for (1 << (bits - 1)). +// The table actually helps, since the load can go in parallel with the shift +// operation below. +static const int bit_thresholds[16] = { + 0, 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14 +}; + +static inline unsigned extend(int val, unsigned bits) +{ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + // GCC should ideally be able to figure out that the conditional move is better, but + // it doesn't for various reasons, and this is pretty important for speed, so we hardcode. + asm("cmp %2, %0 ; cmovl %3, %0" + : "=r" (val) + : "0" (val), + "g" (bit_thresholds[bits]), + "r" (val + (-1 << bits) + 1) + : "cc"); + return val; +#else + if (val < bit_thresholds[bits]) { + return val + (-1 << bits) + 1; + } else { + return val; + } +#endif +} #endif /* !defined(_DEHUFF_H) */