1 /*****************************************************************************
2 * cavlc.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *****************************************************************************/
24 #include "common/common.h"
25 #include "macroblock.h"
27 static const uint8_t intra4x4_cbp_to_golomb[48]=
29 3, 29, 30, 17, 31, 18, 37, 8, 32, 38, 19, 9, 20, 10, 11, 2,
30 16, 33, 34, 21, 35, 22, 39, 4, 36, 40, 23, 5, 24, 6, 7, 1,
31 41, 42, 43, 25, 44, 26, 46, 12, 45, 47, 27, 13, 28, 14, 15, 0
33 static const uint8_t inter_cbp_to_golomb[48]=
35 0, 2, 3, 7, 4, 8, 17, 13, 5, 18, 9, 14, 10, 15, 16, 11,
36 1, 32, 33, 36, 34, 37, 44, 40, 35, 45, 38, 41, 39, 42, 43, 19,
37 6, 24, 25, 20, 26, 21, 46, 28, 27, 47, 22, 29, 23, 30, 31, 12
39 static const uint8_t mb_type_b_to_golomb[3][9]=
41 { 4, 8, 12, 10, 6, 14, 16, 18, 20 }, /* D_16x8 */
42 { 5, 9, 13, 11, 7, 15, 17, 19, 21 }, /* D_8x16 */
43 { 1, -1, -1, -1, 2, -1, -1, -1, 3 } /* D_16x16 */
45 static const uint8_t sub_mb_type_p_to_golomb[4]=
49 static const uint8_t sub_mb_type_b_to_golomb[13]=
51 10, 4, 5, 1, 11, 6, 7, 2, 12, 8, 9, 3, 0
54 #define BLOCK_INDEX_CHROMA_DC (-1)
55 #define BLOCK_INDEX_LUMA_DC (-2)
57 static inline void bs_write_vlc( bs_t *s, vlc_t v )
59 bs_write( s, v.i_size, v.i_bits );
62 /****************************************************************************
63 * block_residual_write_cavlc:
64 ****************************************************************************/
65 static void block_residual_write_cavlc( x264_t *h, bs_t *s, int i_idx, int16_t *l, int i_count )
67 int level[16], run[16];
68 int i_total, i_trailing;
75 /* first find i_last */
76 for( i_last = i_count-1; i_last >= 3; i_last -= 4 )
77 if( *(uint64_t*)(l+i_last-3) )
79 while( i_last >= 0 && l[i_last] == 0 )
85 i_total_zero = i_last + 1;
91 /* level and run and total */
95 level[idx] = l[i_last];
96 while( --i_last >= 0 && l[i_last] == 0 )
104 i_trailing = X264_MIN(3, idx);
105 for( idx = 0; idx < i_trailing; idx++ )
107 if( (unsigned)(level[idx]+1) > 2 )
113 i_sign |= level[idx] < 0;
118 if( i_idx == BLOCK_INDEX_CHROMA_DC )
120 bs_write_vlc( s, x264_coeff_token[4][i_total*4+i_trailing] );
124 /* x264_mb_predict_non_zero_code return 0 <-> (16+16+1)>>1 = 16 */
125 static const int ct_index[17] = {0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,3 };
126 int nC = x264_mb_predict_non_zero_code( h, i_idx == BLOCK_INDEX_LUMA_DC ? 0 : i_idx );
127 bs_write_vlc( s, x264_coeff_token[ct_index[nC]][i_total*4+i_trailing] );
133 i_suffix_length = i_total > 10 && i_trailing < 3 ? 1 : 0;
136 bs_write( s, i_trailing, i_sign );
138 for( i = i_trailing; i < i_total; i++ )
140 int mask = level[i] >> 15;
141 int abs_level = (level[i]^mask)-mask;
142 int i_level_code = abs_level*2-mask-2;
144 if( i == i_trailing && i_trailing < 3 )
145 i_level_code -= 2; /* as level[i] can't be 1 for the first one if i_trailing < 3 */
147 if( ( i_level_code >> i_suffix_length ) < 14 )
149 bs_write( s, (i_level_code >> i_suffix_length) + 1 + i_suffix_length,
150 (1<<i_suffix_length) + (i_level_code & ((1<<i_suffix_length)-1)) );
152 else if( i_suffix_length == 0 && i_level_code < 30 )
154 bs_write( s, 19, (1<<4) + (i_level_code - 14) );
156 else if( i_suffix_length > 0 && ( i_level_code >> i_suffix_length ) == 14 )
158 bs_write( s, 15 + i_suffix_length,
159 (1<<i_suffix_length) + (i_level_code & ((1<<i_suffix_length)-1)) );
163 int i_level_prefix = 15;
164 i_level_code -= 15 << i_suffix_length;
165 if( i_suffix_length == 0 )
168 /* If the prefix size exceeds 15, High Profile is required. */
169 if( i_level_code >= 1<<12 )
171 if( h->sps->i_profile_idc >= PROFILE_HIGH )
173 while( i_level_code > 1<<(i_level_prefix-3) )
175 i_level_code -= 1<<(i_level_prefix-3);
182 /* Weight highly against overflows. */
183 s->i_bits_encoded += 1000000;
185 x264_log(h, X264_LOG_WARNING, "OVERFLOW levelcode=%d is only allowed in High Profile", i_level_code );
186 /* clip level, preserving sign */
187 i_level_code = (1<<12) - 2 + (i_level_code & 1);
191 bs_write( s, i_level_prefix + 1, 1 );
192 bs_write( s, i_level_prefix - 3, i_level_code & ((1<<(i_level_prefix-3))-1) );
195 if( i_suffix_length == 0 )
197 if( abs_level > (3 << (i_suffix_length-1)) && i_suffix_length < 6 )
201 if( i_total < i_count )
203 if( i_idx == BLOCK_INDEX_CHROMA_DC )
204 bs_write_vlc( s, x264_total_zeros_dc[i_total-1][i_total_zero] );
206 bs_write_vlc( s, x264_total_zeros[i_total-1][i_total_zero] );
209 for( i = 0; i < i_total-1 && i_total_zero > 0; i++ )
211 int i_zl = X264_MIN( i_total_zero - 1, 6 );
212 bs_write_vlc( s, x264_run_before[i_zl][run[i]] );
213 i_total_zero -= run[i];
217 static void cavlc_qp_delta( x264_t *h, bs_t *s )
219 int i_dqp = h->mb.i_qp - h->mb.i_last_qp;
221 /* Avoid writing a delta quant if we have an empty i16x16 block, e.g. in a completely flat background area */
222 if( h->mb.i_type == I_16x16 && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma)
223 && !array_non_zero(h->dct.luma16x16_dc) )
226 h->mb.i_qp = h->mb.i_last_qp;
235 else if( i_dqp > 25 )
238 bs_write_se( s, i_dqp );
241 static void cavlc_mb_mvd( x264_t *h, bs_t *s, int i_list, int idx, int width )
243 DECLARE_ALIGNED_4( int16_t mvp[2] );
244 x264_mb_predict_mv( h, i_list, idx, width, mvp );
245 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[idx]][0] - mvp[0] );
246 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[idx]][1] - mvp[1] );
249 static void cavlc_mb8x8_mvd( x264_t *h, bs_t *s, int i_list, int i )
251 if( !x264_mb_partition_listX_table[i_list][ h->mb.i_sub_partition[i] ] )
254 switch( h->mb.i_sub_partition[i] )
259 cavlc_mb_mvd( h, s, i_list, 4*i, 2 );
264 cavlc_mb_mvd( h, s, i_list, 4*i+0, 2 );
265 cavlc_mb_mvd( h, s, i_list, 4*i+2, 2 );
270 cavlc_mb_mvd( h, s, i_list, 4*i+0, 1 );
271 cavlc_mb_mvd( h, s, i_list, 4*i+1, 1 );
276 cavlc_mb_mvd( h, s, i_list, 4*i+0, 1 );
277 cavlc_mb_mvd( h, s, i_list, 4*i+1, 1 );
278 cavlc_mb_mvd( h, s, i_list, 4*i+2, 1 );
279 cavlc_mb_mvd( h, s, i_list, 4*i+3, 1 );
284 static inline void x264_macroblock_luma_write_cavlc( x264_t *h, bs_t *s, int i8start, int i8end )
287 if( h->mb.b_transform_8x8 )
289 /* shuffle 8x8 dct coeffs into 4x4 lists */
290 for( i8 = i8start; i8 <= i8end; i8++ )
291 if( h->mb.i_cbp_luma & (1 << i8) )
292 for( i4 = 0; i4 < 4; i4++ )
293 for( i = 0; i < 16; i++ )
294 h->dct.luma4x4[i4+i8*4][i] = h->dct.luma8x8[i8][i4+i*4];
297 for( i8 = i8start; i8 <= i8end; i8++ )
298 if( h->mb.i_cbp_luma & (1 << i8) )
299 for( i4 = 0; i4 < 4; i4++ )
301 h->mb.cache.non_zero_count[x264_scan8[i4+i8*4]] = array_non_zero_count( h->dct.luma4x4[i4+i8*4] );
302 block_residual_write_cavlc( h, s, i4+i8*4, h->dct.luma4x4[i4+i8*4], 16 );
306 /*****************************************************************************
307 * x264_macroblock_write:
308 *****************************************************************************/
309 void x264_macroblock_write_cavlc( x264_t *h, bs_t *s )
311 const int i_mb_type = h->mb.i_type;
316 const int i_mb_pos_start = bs_pos( s );
320 switch( h->sh.i_type )
332 x264_log(h, X264_LOG_ERROR, "internal error or slice unsupported\n" );
337 && (!(h->mb.i_mb_y & 1) || IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride])) )
339 bs_write1( s, h->mb.b_interlaced );
343 if( i_mb_type == I_PCM)
345 bs_write_ue( s, i_mb_i_offset + 25 );
346 i_mb_pos_tex = bs_pos( s );
347 h->stat.frame.i_hdr_bits += i_mb_pos_tex - i_mb_pos_start;
351 memcpy( s->p, h->mb.pic.p_fenc[0], 256 );
353 for( i = 0; i < 8; i++ )
354 memcpy( s->p + i*8, h->mb.pic.p_fenc[1] + i*FENC_STRIDE, 8 );
356 for( i = 0; i < 8; i++ )
357 memcpy( s->p + i*8, h->mb.pic.p_fenc[2] + i*FENC_STRIDE, 8 );
360 /* if PCM is chosen, we need to store reconstructed frame data */
361 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE, 16 );
362 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, 8 );
363 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, 8 );
365 h->stat.frame.i_itex_bits += bs_pos(s) - i_mb_pos_tex;
374 if( i_mb_type == I_4x4 || i_mb_type == I_8x8 )
376 int di = i_mb_type == I_8x8 ? 4 : 1;
377 bs_write_ue( s, i_mb_i_offset + 0 );
378 if( h->pps->b_transform_8x8_mode )
379 bs_write1( s, h->mb.b_transform_8x8 );
381 /* Prediction: Luma */
382 for( i = 0; i < 16; i += di )
384 int i_pred = x264_mb_predict_intra4x4_mode( h, i );
385 int i_mode = x264_mb_pred_mode4x4_fix( h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] );
387 if( i_pred == i_mode)
389 bs_write1( s, 1 ); /* b_prev_intra4x4_pred_mode */
393 if( i_mode >= i_pred )
395 bs_write( s, 4, i_mode );
398 bs_write_ue( s, x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
400 else if( i_mb_type == I_16x16 )
402 bs_write_ue( s, i_mb_i_offset + 1 + x264_mb_pred_mode16x16_fix[h->mb.i_intra16x16_pred_mode] +
403 h->mb.i_cbp_chroma * 4 + ( h->mb.i_cbp_luma == 0 ? 0 : 12 ) );
404 bs_write_ue( s, x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
406 else if( i_mb_type == P_L0 )
408 DECLARE_ALIGNED_4( int16_t mvp[2] );
410 if( h->mb.i_partition == D_16x16 )
414 if( h->mb.pic.i_fref[0] > 1 )
416 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
418 x264_mb_predict_mv( h, 0, 0, 4, mvp );
419 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
420 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
422 else if( h->mb.i_partition == D_16x8 )
425 if( h->mb.pic.i_fref[0] > 1 )
427 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
428 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[8]] );
431 x264_mb_predict_mv( h, 0, 0, 4, mvp );
432 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
433 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
435 x264_mb_predict_mv( h, 0, 8, 4, mvp );
436 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[8]][0] - mvp[0] );
437 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[8]][1] - mvp[1] );
439 else if( h->mb.i_partition == D_8x16 )
442 if( h->mb.pic.i_fref[0] > 1 )
444 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
445 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4]] );
448 x264_mb_predict_mv( h, 0, 0, 2, mvp );
449 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
450 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
452 x264_mb_predict_mv( h, 0, 4, 2, mvp );
453 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[4]][0] - mvp[0] );
454 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[4]][1] - mvp[1] );
457 else if( i_mb_type == P_8x8 )
461 if( h->mb.cache.ref[0][x264_scan8[0]] == 0 && h->mb.cache.ref[0][x264_scan8[4]] == 0 &&
462 h->mb.cache.ref[0][x264_scan8[8]] == 0 && h->mb.cache.ref[0][x264_scan8[12]] == 0 )
473 for( i = 0; i < 4; i++ )
475 bs_write_ue( s, sub_mb_type_p_to_golomb[ h->mb.i_sub_partition[i] ] );
478 if( h->mb.pic.i_fref[0] > 1 && b_sub_ref0 )
480 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
481 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4]] );
482 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[8]] );
483 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[12]] );
486 for( i = 0; i < 4; i++ )
487 cavlc_mb8x8_mvd( h, s, 0, i );
489 else if( i_mb_type == B_8x8 )
491 bs_write_ue( s, 22 );
494 for( i = 0; i < 4; i++ )
496 bs_write_ue( s, sub_mb_type_b_to_golomb[ h->mb.i_sub_partition[i] ] );
499 for( i = 0; i < 4; i++ )
501 if( x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i] ] )
503 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[i*4]] );
506 for( i = 0; i < 4; i++ )
508 if( x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i] ] )
510 bs_write_te( s, h->mb.pic.i_fref[1] - 1, h->mb.cache.ref[1][x264_scan8[i*4]] );
514 for( i = 0; i < 4; i++ )
515 cavlc_mb8x8_mvd( h, s, 0, i );
516 for( i = 0; i < 4; i++ )
517 cavlc_mb8x8_mvd( h, s, 1, i );
519 else if( i_mb_type != B_DIRECT )
524 DECLARE_ALIGNED_4( int16_t mvp[2] );
528 /* init ref list utilisations */
529 for( i = 0; i < 2; i++ )
531 b_list[0][i] = x264_mb_type_list0_table[i_mb_type][i];
532 b_list[1][i] = x264_mb_type_list1_table[i_mb_type][i];
536 bs_write_ue( s, mb_type_b_to_golomb[ h->mb.i_partition - D_16x8 ][ i_mb_type - B_L0_L0 ] );
538 for( i_list = 0; i_list < 2; i_list++ )
540 const int i_ref_max = i_list == 0 ? h->mb.pic.i_fref[0] : h->mb.pic.i_fref[1];
544 switch( h->mb.i_partition )
547 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
550 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
551 if( b_list[i_list][1] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[8]] );
554 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
555 if( b_list[i_list][1] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[4]] );
560 for( i_list = 0; i_list < 2; i_list++ )
562 switch( h->mb.i_partition )
565 if( b_list[i_list][0] )
567 x264_mb_predict_mv( h, i_list, 0, 4, mvp );
568 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
569 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
573 if( b_list[i_list][0] )
575 x264_mb_predict_mv( h, i_list, 0, 4, mvp );
576 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
577 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
579 if( b_list[i_list][1] )
581 x264_mb_predict_mv( h, i_list, 8, 4, mvp );
582 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[8]][0] - mvp[0] );
583 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[8]][1] - mvp[1] );
587 if( b_list[i_list][0] )
589 x264_mb_predict_mv( h, i_list, 0, 2, mvp );
590 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
591 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
593 if( b_list[i_list][1] )
595 x264_mb_predict_mv( h, i_list, 4, 2, mvp );
596 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[4]][0] - mvp[0] );
597 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[4]][1] - mvp[1] );
603 else if( i_mb_type == B_DIRECT )
609 x264_log(h, X264_LOG_ERROR, "invalid/unhandled mb_type\n" );
614 i_mb_pos_tex = bs_pos( s );
615 h->stat.frame.i_hdr_bits += i_mb_pos_tex - i_mb_pos_start;
618 /* Coded block patern */
619 if( i_mb_type == I_4x4 || i_mb_type == I_8x8 )
621 bs_write_ue( s, intra4x4_cbp_to_golomb[( h->mb.i_cbp_chroma << 4 )|h->mb.i_cbp_luma] );
623 else if( i_mb_type != I_16x16 )
625 bs_write_ue( s, inter_cbp_to_golomb[( h->mb.i_cbp_chroma << 4 )|h->mb.i_cbp_luma] );
628 /* transform size 8x8 flag */
629 if( x264_mb_transform_8x8_allowed( h ) && h->mb.i_cbp_luma )
631 bs_write1( s, h->mb.b_transform_8x8 );
635 if( i_mb_type == I_16x16 )
637 cavlc_qp_delta( h, s );
640 block_residual_write_cavlc( h, s, BLOCK_INDEX_LUMA_DC , h->dct.luma16x16_dc, 16 );
643 if( h->mb.i_cbp_luma != 0 )
644 for( i = 0; i < 16; i++ )
646 h->mb.cache.non_zero_count[x264_scan8[i]] = array_non_zero_count( h->dct.luma4x4[i] );
647 block_residual_write_cavlc( h, s, i, h->dct.luma4x4[i]+1, 15 );
650 else if( h->mb.i_cbp_luma != 0 || h->mb.i_cbp_chroma != 0 )
652 cavlc_qp_delta( h, s );
653 x264_macroblock_luma_write_cavlc( h, s, 0, 3 );
655 if( h->mb.i_cbp_chroma != 0 )
657 /* Chroma DC residual present */
658 block_residual_write_cavlc( h, s, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[0], 4 );
659 block_residual_write_cavlc( h, s, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[1], 4 );
660 if( h->mb.i_cbp_chroma&0x02 ) /* Chroma AC residual present */
661 for( i = 16; i < 24; i++ )
663 h->mb.cache.non_zero_count[x264_scan8[i]] = array_non_zero_count( h->dct.luma4x4[i] );
664 block_residual_write_cavlc( h, s, i, h->dct.luma4x4[i]+1, 15 );
669 if( IS_INTRA( i_mb_type ) )
670 h->stat.frame.i_itex_bits += bs_pos(s) - i_mb_pos_tex;
672 h->stat.frame.i_ptex_bits += bs_pos(s) - i_mb_pos_tex;
677 /*****************************************************************************
678 * RD only; doesn't generate a valid bitstream
679 * doesn't write cbp or chroma dc (I don't know how much this matters)
680 * works on all partition sizes except 16x16
681 * for sub8x8, call once per 8x8 block
682 *****************************************************************************/
683 static int x264_partition_size_cavlc( x264_t *h, int i8, int i_pixel )
686 const int i_mb_type = h->mb.i_type;
689 s.i_bits_encoded = 0;
691 if( i_mb_type == P_8x8 )
693 bs_write_ue( &s, sub_mb_type_p_to_golomb[ h->mb.i_sub_partition[i8] ] );
694 if( h->mb.pic.i_fref[0] > 1 )
695 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
696 cavlc_mb8x8_mvd( h, &s, 0, i8 );
698 else if( i_mb_type == P_L0 )
700 if( h->mb.pic.i_fref[0] > 1 )
701 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
702 if( h->mb.i_partition == D_16x8 )
703 cavlc_mb_mvd( h, &s, 0, 4*i8, 4 );
705 cavlc_mb_mvd( h, &s, 0, 4*i8, 2 );
707 else if( i_mb_type == B_8x8 )
709 bs_write_ue( &s, sub_mb_type_b_to_golomb[ h->mb.i_sub_partition[i8] ] );
711 if( h->mb.pic.i_fref[0] > 1
712 && x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i8] ] )
713 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
714 if( h->mb.pic.i_fref[1] > 1
715 && x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i8] ] )
716 bs_write_te( &s, h->mb.pic.i_fref[1] - 1, h->mb.cache.ref[1][x264_scan8[4*i8]] );
718 cavlc_mb8x8_mvd( h, &s, 0, i8 );
719 cavlc_mb8x8_mvd( h, &s, 1, i8 );
723 x264_log(h, X264_LOG_ERROR, "invalid/unhandled mb_type\n" );
727 for( j = (i_pixel < PIXEL_8x8); j >= 0; j-- )
729 x264_macroblock_luma_write_cavlc( h, &s, i8, i8 );
730 h->mb.cache.non_zero_count[x264_scan8[16+i8]] = array_non_zero_count( h->dct.luma4x4[16+i8] );
731 block_residual_write_cavlc( h, &s, 16+i8, h->dct.luma4x4[16+i8]+1, 15 );
732 h->mb.cache.non_zero_count[x264_scan8[20+i8]] = array_non_zero_count( h->dct.luma4x4[20+i8] );
733 block_residual_write_cavlc( h, &s, 20+i8, h->dct.luma4x4[20+i8]+1, 15 );
734 i8 += x264_pixel_size[i_pixel].h >> 3;
737 return s.i_bits_encoded;
740 static int cavlc_intra4x4_pred_size( x264_t *h, int i4, int i_mode )
742 if( x264_mb_predict_intra4x4_mode( h, i4 ) == x264_mb_pred_mode4x4_fix( i_mode ) )
748 static int x264_partition_i8x8_size_cavlc( x264_t *h, int i8, int i_mode )
751 h->out.bs.i_bits_encoded = cavlc_intra4x4_pred_size( h, 4*i8, i_mode );
752 for( i4 = 0; i4 < 4; i4++ )
754 for( i = 0; i < 16; i++ )
755 h->dct.luma4x4[i4+i8*4][i] = h->dct.luma8x8[i8][i4+i*4];
756 h->mb.cache.non_zero_count[x264_scan8[i4+i8*4]] =
757 array_non_zero_count( h->dct.luma4x4[i4+i8*4] );
758 block_residual_write_cavlc( h, &h->out.bs, i4+i8*4, h->dct.luma4x4[i4+i8*4], 16 );
760 return h->out.bs.i_bits_encoded;
763 static int x264_partition_i4x4_size_cavlc( x264_t *h, int i4, int i_mode )
765 h->out.bs.i_bits_encoded = cavlc_intra4x4_pred_size( h, i4, i_mode );
766 block_residual_write_cavlc( h, &h->out.bs, i4, h->dct.luma4x4[i4], 16 );
767 return h->out.bs.i_bits_encoded;
770 static int x264_i8x8_chroma_size_cavlc( x264_t *h )
772 h->out.bs.i_bits_encoded = bs_size_ue( x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
773 if( h->mb.i_cbp_chroma != 0 )
775 block_residual_write_cavlc( h, &h->out.bs, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[0], 4 );
776 block_residual_write_cavlc( h, &h->out.bs, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[1], 4 );
778 if( h->mb.i_cbp_chroma == 2 )
781 for( i = 16; i < 24; i++ )
783 h->mb.cache.non_zero_count[x264_scan8[i]] = array_non_zero_count( h->dct.luma4x4[i] );
784 block_residual_write_cavlc( h, &h->out.bs, i, h->dct.luma4x4[i]+1, 15 );
788 return h->out.bs.i_bits_encoded;