#ifndef X264_BS_H
#define X264_BS_H
+typedef struct
+{
+ uint8_t i_bits;
+ uint8_t i_size;
+} vlc_t;
+
+typedef struct
+{
+ uint16_t i_bits;
+ uint8_t i_size;
+ /* Next level table to use */
+ uint8_t i_next;
+} vlc_large_t;
+
typedef struct bs_s
{
uint8_t *p_start;
int i_bits_encoded; /* RD only */
} bs_t;
+typedef struct
+{
+ int last;
+ int16_t level[16];
+ uint8_t run[16];
+} x264_run_level_t;
+
+extern const vlc_t x264_coeff0_token[5];
+extern const vlc_t x264_coeff_token[5][16*4];
+extern const vlc_t x264_total_zeros[15][16];
+extern const vlc_t x264_total_zeros_dc[3][4];
+extern const vlc_t x264_run_before[7][16];
+
+/* A larger level table size theoretically could help a bit at extremely
+ * high bitrates, but the cost in cache is usually too high for it to be
+ * useful.
+ * This size appears to be optimal for QP18 encoding on a Nehalem CPU.
+ * FIXME: Do further testing? */
+#define LEVEL_TABLE_SIZE 128
+extern vlc_large_t x264_level_token[7][LEVEL_TABLE_SIZE];
+
static inline void bs_init( bs_t *s, void *p_data, int i_data )
{
int offset = ((intptr_t)p_data & (WORD_SIZE-1));
s->i_left -= i_count;
if( s->i_left <= 32 )
{
+#ifdef WORDS_BIGENDIAN
+ *(uint32_t*)s->p = s->cur_bits >> (32 - s->i_left);
+#else
*(uint32_t*)s->p = endian_fix( s->cur_bits << s->i_left );
+#endif
s->i_left += 32;
s->p += 4;
}
int tmp = ++val;
if( tmp >= 0x10000 )
{
- bs_write32( s, 0 );
+ size = 32;
tmp >>= 16;
}
if( tmp >= 0x100 )
{
- size = 16;
+ size += 16;
tmp >>= 8;
}
size += x264_ue_size_tab[tmp];
- bs_write( s, size, val );
+ bs_write( s, size>>1, 0 );
+ bs_write( s, (size>>1)+1, val );
}
/* Only works on values under 255. */