1 /*****************************************************************************
2 * cavlc.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: cavlc.c,v 1.1 2004/06/03 19:27:08 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 #include "common/common.h"
25 #include "common/vlc.h"
26 #include "macroblock.h"
28 static const uint8_t intra4x4_cbp_to_golomb[48]=
30 3, 29, 30, 17, 31, 18, 37, 8, 32, 38, 19, 9, 20, 10, 11, 2,
31 16, 33, 34, 21, 35, 22, 39, 4, 36, 40, 23, 5, 24, 6, 7, 1,
32 41, 42, 43, 25, 44, 26, 46, 12, 45, 47, 27, 13, 28, 14, 15, 0
34 static const uint8_t inter_cbp_to_golomb[48]=
36 0, 2, 3, 7, 4, 8, 17, 13, 5, 18, 9, 14, 10, 15, 16, 11,
37 1, 32, 33, 36, 34, 37, 44, 40, 35, 45, 38, 41, 39, 42, 43, 19,
38 6, 24, 25, 20, 26, 21, 46, 28, 27, 47, 22, 29, 23, 30, 31, 12
40 static const uint8_t mb_type_b_to_golomb[3][9]=
42 { 4, 8, 12, 10, 6, 14, 16, 18, 20 }, /* D_16x8 */
43 { 5, 9, 13, 11, 7, 15, 17, 19, 21 }, /* D_8x16 */
44 { 1, -1, -1, -1, 2, -1, -1, -1, 3 } /* D_16x16 */
46 static const uint8_t sub_mb_type_p_to_golomb[4]=
50 static const uint8_t sub_mb_type_b_to_golomb[13]=
52 10, 4, 5, 1, 11, 6, 7, 2, 12, 8, 9, 3, 0
55 #define BLOCK_INDEX_CHROMA_DC (-1)
56 #define BLOCK_INDEX_LUMA_DC (-2)
58 static inline void bs_write_vlc( bs_t *s, vlc_t v )
60 bs_write( s, v.i_size, v.i_bits );
63 /****************************************************************************
64 * block_residual_write_cavlc:
65 ****************************************************************************/
66 static void block_residual_write_cavlc( x264_t *h, bs_t *s, int i_idx, int16_t *l, int i_count )
68 int level[16], run[16];
69 int i_total, i_trailing;
78 /* first find i_last */
80 while( i_last >= 0 && l[i_last] == 0 )
88 i_total_zero = i_last + 1;
94 /* level and run and total */
97 level[idx] = l[i_last--];
100 while( i_last >= 0 && l[i_last] == 0 )
112 i_trailing = X264_MIN(3, idx);
113 for( idx = 0; idx < i_trailing; idx++ )
115 if( abs(level[idx]) > 1 )
121 i_sign |= level[idx] < 0;
126 if( i_idx == BLOCK_INDEX_CHROMA_DC )
128 bs_write_vlc( s, x264_coeff_token[4][i_total*4+i_trailing] );
132 /* x264_mb_predict_non_zero_code return 0 <-> (16+16+1)>>1 = 16 */
133 static const int ct_index[17] = {0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,3 };
134 int nC = x264_mb_predict_non_zero_code( h, i_idx == BLOCK_INDEX_LUMA_DC ? 0 : i_idx );
135 bs_write_vlc( s, x264_coeff_token[ct_index[nC]][i_total*4+i_trailing] );
143 i_suffix_length = i_total > 10 && i_trailing < 3 ? 1 : 0;
146 bs_write( s, i_trailing, i_sign );
148 for( i = i_trailing; i < i_total; i++ )
150 unsigned int i_level_code;
152 /* calculate level code */
155 i_level_code = -2*level[i] - 1;
157 else /* if( level[i] > 0 ) */
159 i_level_code = 2 * level[i] - 2;
161 if( i == i_trailing && i_trailing < 3 )
163 i_level_code -= 2; /* as level[i] can't be 1 for the first one if i_trailing < 3 */
166 if( ( i_level_code >> i_suffix_length ) < 14 )
168 bs_write_vlc( s, x264_level_prefix[i_level_code >> i_suffix_length] );
169 if( i_suffix_length > 0 )
171 bs_write( s, i_suffix_length, i_level_code );
174 else if( i_suffix_length == 0 && i_level_code < 30 )
176 bs_write_vlc( s, x264_level_prefix[14] );
177 bs_write( s, 4, i_level_code - 14 );
179 else if( i_suffix_length > 0 && ( i_level_code >> i_suffix_length ) == 14 )
181 bs_write_vlc( s, x264_level_prefix[14] );
182 bs_write( s, i_suffix_length, i_level_code );
186 bs_write_vlc( s, x264_level_prefix[15] );
187 i_level_code -= 15 << i_suffix_length;
188 if( i_suffix_length == 0 )
193 if( i_level_code >= 1<<12 )
195 x264_log(h, X264_LOG_WARNING, "OVERFLOW levelcode=%d\n", i_level_code );
198 bs_write( s, 12, i_level_code );
201 if( i_suffix_length == 0 )
205 if( abs( level[i] ) > ( 3 << ( i_suffix_length - 1 ) ) && i_suffix_length < 6 )
211 if( i_total < i_count )
213 if( i_idx == BLOCK_INDEX_CHROMA_DC )
215 bs_write_vlc( s, x264_total_zeros_dc[i_total-1][i_total_zero] );
219 bs_write_vlc( s, x264_total_zeros[i_total-1][i_total_zero] );
223 for( i = 0, i_zero_left = i_total_zero; i < i_total - 1; i++ )
227 if( i_zero_left <= 0 )
232 i_zl = X264_MIN( i_zero_left - 1, 6 );
234 bs_write_vlc( s, x264_run_before[i_zl][run[i]] );
236 i_zero_left -= run[i];
240 static void cavlc_qp_delta( x264_t *h, bs_t *s )
242 int i_dqp = h->mb.i_qp - h->mb.i_last_qp;
247 else if( i_dqp > 25 )
250 bs_write_se( s, i_dqp );
253 static void cavlc_mb_mvd( x264_t *h, bs_t *s, int i_list, int idx, int width )
256 x264_mb_predict_mv( h, i_list, idx, width, mvp );
257 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[idx]][0] - mvp[0] );
258 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[idx]][1] - mvp[1] );
261 static void cavlc_mb8x8_mvd( x264_t *h, bs_t *s, int i_list, int i )
263 if( !x264_mb_partition_listX_table[i_list][ h->mb.i_sub_partition[i] ] )
266 switch( h->mb.i_sub_partition[i] )
271 cavlc_mb_mvd( h, s, i_list, 4*i, 2 );
276 cavlc_mb_mvd( h, s, i_list, 4*i+0, 2 );
277 cavlc_mb_mvd( h, s, i_list, 4*i+2, 2 );
282 cavlc_mb_mvd( h, s, i_list, 4*i+0, 1 );
283 cavlc_mb_mvd( h, s, i_list, 4*i+1, 1 );
288 cavlc_mb_mvd( h, s, i_list, 4*i+0, 1 );
289 cavlc_mb_mvd( h, s, i_list, 4*i+1, 1 );
290 cavlc_mb_mvd( h, s, i_list, 4*i+2, 1 );
291 cavlc_mb_mvd( h, s, i_list, 4*i+3, 1 );
296 static inline void x264_macroblock_luma_write_cavlc( x264_t *h, bs_t *s, int i8start, int i8end )
299 if( h->mb.b_transform_8x8 )
301 /* shuffle 8x8 dct coeffs into 4x4 lists */
302 for( i8 = i8start; i8 <= i8end; i8++ )
303 if( h->mb.i_cbp_luma & (1 << i8) )
304 for( i4 = 0; i4 < 4; i4++ )
306 for( i = 0; i < 16; i++ )
307 h->dct.block[i4+i8*4].luma4x4[i] = h->dct.luma8x8[i8][i4+i*4];
308 h->mb.cache.non_zero_count[x264_scan8[i4+i8*4]] =
309 array_non_zero_count( h->dct.block[i4+i8*4].luma4x4, 16 );
313 for( i8 = i8start; i8 <= i8end; i8++ )
314 if( h->mb.i_cbp_luma & (1 << i8) )
315 for( i4 = 0; i4 < 4; i4++ )
316 block_residual_write_cavlc( h, s, i4+i8*4, h->dct.block[i4+i8*4].luma4x4, 16 );
319 /*****************************************************************************
320 * x264_macroblock_write:
321 *****************************************************************************/
322 void x264_macroblock_write_cavlc( x264_t *h, bs_t *s )
324 const int i_mb_type = h->mb.i_type;
329 const int i_mb_pos_start = bs_pos( s );
333 switch( h->sh.i_type )
345 x264_log(h, X264_LOG_ERROR, "internal error or slice unsupported\n" );
350 && (!(h->mb.i_mb_y & 1) || IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride])) )
352 bs_write1( s, h->mb.b_interlaced );
359 if( i_mb_type == I_PCM )
362 bs_write_ue( s, i_mb_i_offset + 25 );
365 s->i_bits_encoded += 384*8;
369 for( i = 0; i < 16*16; i++ )
371 const int x = 16 * h->mb.i_mb_x + (i % 16);
372 const int y = 16 * h->mb.i_mb_y + (i / 16);
373 bs_write( s, 8, h->fenc->plane[0][y*h->mb.pic.i_stride[0]+x] );
376 for( i = 0; i < 8*8; i++ )
378 const int x = 8 * h->mb.i_mb_x + (i % 8);
379 const int y = 8 * h->mb.i_mb_y + (i / 8);
380 bs_write( s, 8, h->fenc->plane[1][y*h->mb.pic.i_stride[1]+x] );
383 for( i = 0; i < 8*8; i++ )
385 const int x = 8 * h->mb.i_mb_x + (i % 8);
386 const int y = 8 * h->mb.i_mb_y + (i / 8);
387 bs_write( s, 8, h->fenc->plane[2][y*h->mb.pic.i_stride[2]+x] );
392 else if( i_mb_type == I_4x4 || i_mb_type == I_8x8 )
394 int di = i_mb_type == I_8x8 ? 4 : 1;
395 bs_write_ue( s, i_mb_i_offset + 0 );
396 if( h->pps->b_transform_8x8_mode )
397 bs_write1( s, h->mb.b_transform_8x8 );
399 /* Prediction: Luma */
400 for( i = 0; i < 16; i += di )
402 int i_pred = x264_mb_predict_intra4x4_mode( h, i );
403 int i_mode = x264_mb_pred_mode4x4_fix( h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] );
405 if( i_pred == i_mode)
407 bs_write1( s, 1 ); /* b_prev_intra4x4_pred_mode */
411 bs_write1( s, 0 ); /* b_prev_intra4x4_pred_mode */
412 if( i_mode < i_pred )
414 bs_write( s, 3, i_mode );
418 bs_write( s, 3, i_mode - 1 );
422 bs_write_ue( s, x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
424 else if( i_mb_type == I_16x16 )
426 bs_write_ue( s, i_mb_i_offset + 1 + x264_mb_pred_mode16x16_fix[h->mb.i_intra16x16_pred_mode] +
427 h->mb.i_cbp_chroma * 4 + ( h->mb.i_cbp_luma == 0 ? 0 : 12 ) );
428 bs_write_ue( s, x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
430 else if( i_mb_type == P_L0 )
434 if( h->mb.i_partition == D_16x16 )
438 if( h->mb.pic.i_fref[0] > 1 )
440 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
442 x264_mb_predict_mv( h, 0, 0, 4, mvp );
443 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
444 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
446 else if( h->mb.i_partition == D_16x8 )
449 if( h->mb.pic.i_fref[0] > 1 )
451 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
452 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[8]] );
455 x264_mb_predict_mv( h, 0, 0, 4, mvp );
456 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
457 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
459 x264_mb_predict_mv( h, 0, 8, 4, mvp );
460 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[8]][0] - mvp[0] );
461 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[8]][1] - mvp[1] );
463 else if( h->mb.i_partition == D_8x16 )
466 if( h->mb.pic.i_fref[0] > 1 )
468 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
469 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4]] );
472 x264_mb_predict_mv( h, 0, 0, 2, mvp );
473 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][0] - mvp[0] );
474 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[0]][1] - mvp[1] );
476 x264_mb_predict_mv( h, 0, 4, 2, mvp );
477 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[4]][0] - mvp[0] );
478 bs_write_se( s, h->mb.cache.mv[0][x264_scan8[4]][1] - mvp[1] );
481 else if( i_mb_type == P_8x8 )
485 if( h->mb.cache.ref[0][x264_scan8[0]] == 0 && h->mb.cache.ref[0][x264_scan8[4]] == 0 &&
486 h->mb.cache.ref[0][x264_scan8[8]] == 0 && h->mb.cache.ref[0][x264_scan8[12]] == 0 )
497 for( i = 0; i < 4; i++ )
499 bs_write_ue( s, sub_mb_type_p_to_golomb[ h->mb.i_sub_partition[i] ] );
502 if( h->mb.pic.i_fref[0] > 1 && b_sub_ref0 )
504 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[0]] );
505 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4]] );
506 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[8]] );
507 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[12]] );
510 for( i = 0; i < 4; i++ )
511 cavlc_mb8x8_mvd( h, s, 0, i );
513 else if( i_mb_type == B_8x8 )
515 bs_write_ue( s, 22 );
518 for( i = 0; i < 4; i++ )
520 bs_write_ue( s, sub_mb_type_b_to_golomb[ h->mb.i_sub_partition[i] ] );
523 for( i = 0; i < 4; i++ )
525 if( x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i] ] )
527 bs_write_te( s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[i*4]] );
530 for( i = 0; i < 4; i++ )
532 if( x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i] ] )
534 bs_write_te( s, h->mb.pic.i_fref[1] - 1, h->mb.cache.ref[1][x264_scan8[i*4]] );
538 for( i = 0; i < 4; i++ )
539 cavlc_mb8x8_mvd( h, s, 0, i );
540 for( i = 0; i < 4; i++ )
541 cavlc_mb8x8_mvd( h, s, 1, i );
543 else if( i_mb_type != B_DIRECT )
552 /* init ref list utilisations */
553 for( i = 0; i < 2; i++ )
555 b_list[0][i] = x264_mb_type_list0_table[i_mb_type][i];
556 b_list[1][i] = x264_mb_type_list1_table[i_mb_type][i];
560 bs_write_ue( s, mb_type_b_to_golomb[ h->mb.i_partition - D_16x8 ][ i_mb_type - B_L0_L0 ] );
562 for( i_list = 0; i_list < 2; i_list++ )
564 const int i_ref_max = i_list == 0 ? h->mb.pic.i_fref[0] : h->mb.pic.i_fref[1];
568 switch( h->mb.i_partition )
571 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
574 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
575 if( b_list[i_list][1] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[8]] );
578 if( b_list[i_list][0] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[0]] );
579 if( b_list[i_list][1] ) bs_write_te( s, i_ref_max - 1, h->mb.cache.ref[i_list][x264_scan8[4]] );
584 for( i_list = 0; i_list < 2; i_list++ )
586 switch( h->mb.i_partition )
589 if( b_list[i_list][0] )
591 x264_mb_predict_mv( h, i_list, 0, 4, mvp );
592 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
593 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
597 if( b_list[i_list][0] )
599 x264_mb_predict_mv( h, i_list, 0, 4, mvp );
600 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
601 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
603 if( b_list[i_list][1] )
605 x264_mb_predict_mv( h, i_list, 8, 4, mvp );
606 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[8]][0] - mvp[0] );
607 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[8]][1] - mvp[1] );
611 if( b_list[i_list][0] )
613 x264_mb_predict_mv( h, i_list, 0, 2, mvp );
614 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][0] - mvp[0] );
615 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[0]][1] - mvp[1] );
617 if( b_list[i_list][1] )
619 x264_mb_predict_mv( h, i_list, 4, 2, mvp );
620 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[4]][0] - mvp[0] );
621 bs_write_se( s, h->mb.cache.mv[i_list][x264_scan8[4]][1] - mvp[1] );
627 else if( i_mb_type == B_DIRECT )
633 x264_log(h, X264_LOG_ERROR, "invalid/unhandled mb_type\n" );
638 i_mb_pos_tex = bs_pos( s );
639 h->stat.frame.i_hdr_bits += i_mb_pos_tex - i_mb_pos_start;
642 /* Coded block patern */
643 if( i_mb_type == I_4x4 || i_mb_type == I_8x8 )
645 bs_write_ue( s, intra4x4_cbp_to_golomb[( h->mb.i_cbp_chroma << 4 )|h->mb.i_cbp_luma] );
647 else if( i_mb_type != I_16x16 )
649 bs_write_ue( s, inter_cbp_to_golomb[( h->mb.i_cbp_chroma << 4 )|h->mb.i_cbp_luma] );
652 /* transform size 8x8 flag */
653 if( x264_mb_transform_8x8_allowed( h ) && h->mb.i_cbp_luma )
655 bs_write1( s, h->mb.b_transform_8x8 );
659 if( i_mb_type == I_16x16 )
661 cavlc_qp_delta( h, s );
664 block_residual_write_cavlc( h, s, BLOCK_INDEX_LUMA_DC , h->dct.luma16x16_dc, 16 );
667 if( h->mb.i_cbp_luma != 0 )
668 for( i = 0; i < 16; i++ )
669 block_residual_write_cavlc( h, s, i, h->dct.block[i].residual_ac, 15 );
671 else if( h->mb.i_cbp_luma != 0 || h->mb.i_cbp_chroma != 0 )
673 cavlc_qp_delta( h, s );
674 x264_macroblock_luma_write_cavlc( h, s, 0, 3 );
676 if( h->mb.i_cbp_chroma != 0 )
678 /* Chroma DC residual present */
679 block_residual_write_cavlc( h, s, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[0], 4 );
680 block_residual_write_cavlc( h, s, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[1], 4 );
681 if( h->mb.i_cbp_chroma&0x02 ) /* Chroma AC residual present */
682 for( i = 0; i < 8; i++ )
683 block_residual_write_cavlc( h, s, 16 + i, h->dct.block[16+i].residual_ac, 15 );
687 if( IS_INTRA( i_mb_type ) )
688 h->stat.frame.i_itex_bits += bs_pos(s) - i_mb_pos_tex;
690 h->stat.frame.i_ptex_bits += bs_pos(s) - i_mb_pos_tex;
695 /*****************************************************************************
696 * RD only; doesn't generate a valid bitstream
697 * doesn't write cbp or chroma dc (I don't know how much this matters)
698 * works on all partition sizes except 16x16
699 * for sub8x8, call once per 8x8 block
700 *****************************************************************************/
701 int x264_partition_size_cavlc( x264_t *h, int i8, int i_pixel )
704 const int i_mb_type = h->mb.i_type;
707 s.i_bits_encoded = 0;
709 if( i_mb_type == P_8x8 )
711 bs_write_ue( &s, sub_mb_type_p_to_golomb[ h->mb.i_sub_partition[i8] ] );
712 if( h->mb.pic.i_fref[0] > 1 )
713 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
714 cavlc_mb8x8_mvd( h, &s, 0, i8 );
716 else if( i_mb_type == P_L0 )
718 if( h->mb.pic.i_fref[0] > 1 )
719 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
720 if( h->mb.i_partition == D_16x8 )
721 cavlc_mb_mvd( h, &s, 0, 4*i8, 4 );
723 cavlc_mb_mvd( h, &s, 0, 4*i8, 2 );
725 else if( i_mb_type == B_8x8 )
727 bs_write_ue( &s, sub_mb_type_b_to_golomb[ h->mb.i_sub_partition[i8] ] );
729 if( h->mb.pic.i_fref[0] > 1
730 && x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i8] ] )
731 bs_write_te( &s, h->mb.pic.i_fref[0] - 1, h->mb.cache.ref[0][x264_scan8[4*i8]] );
732 if( h->mb.pic.i_fref[1] > 1
733 && x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i8] ] )
734 bs_write_te( &s, h->mb.pic.i_fref[1] - 1, h->mb.cache.ref[1][x264_scan8[4*i8]] );
736 cavlc_mb8x8_mvd( h, &s, 0, i8 );
737 cavlc_mb8x8_mvd( h, &s, 1, i8 );
741 x264_log(h, X264_LOG_ERROR, "invalid/unhandled mb_type\n" );
745 for( j = (i_pixel < PIXEL_8x8); j >= 0; j-- )
747 x264_macroblock_luma_write_cavlc( h, &s, i8, i8 );
749 block_residual_write_cavlc( h, &s, i8, h->dct.block[16+i8 ].residual_ac, 15 );
750 block_residual_write_cavlc( h, &s, i8+4, h->dct.block[16+i8+4].residual_ac, 15 );
752 i8 += x264_pixel_size[i_pixel].h >> 3;
755 return s.i_bits_encoded;
758 static int cavlc_intra4x4_pred_size( x264_t *h, int i4, int i_mode )
760 if( x264_mb_predict_intra4x4_mode( h, i4 ) == x264_mb_pred_mode4x4_fix( i_mode ) )
766 static int x264_partition_i8x8_size_cavlc( x264_t *h, int i8, int i_mode )
769 h->out.bs.i_bits_encoded = cavlc_intra4x4_pred_size( h, 4*i8, i_mode );
770 for( i4 = 0; i4 < 4; i4++ )
772 for( i = 0; i < 16; i++ )
773 h->dct.block[i4+i8*4].luma4x4[i] = h->dct.luma8x8[i8][i4+i*4];
774 h->mb.cache.non_zero_count[x264_scan8[i4+i8*4]] =
775 array_non_zero_count( h->dct.block[i4+i8*4].luma4x4, 16 );
776 block_residual_write_cavlc( h, &h->out.bs, i4+i8*4, h->dct.block[i4+i8*4].luma4x4, 16 );
778 return h->out.bs.i_bits_encoded;
781 static int x264_partition_i4x4_size_cavlc( x264_t *h, int i4, int i_mode )
783 h->out.bs.i_bits_encoded = cavlc_intra4x4_pred_size( h, i4, i_mode );
784 block_residual_write_cavlc( h, &h->out.bs, i4, h->dct.block[i4].luma4x4, 16 );
785 return h->out.bs.i_bits_encoded;
788 static int x264_i8x8_chroma_size_cavlc( x264_t *h )
790 h->out.bs.i_bits_encoded = bs_size_ue( x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ] );
791 if( h->mb.i_cbp_chroma != 0 )
793 block_residual_write_cavlc( h, &h->out.bs, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[0], 4 );
794 block_residual_write_cavlc( h, &h->out.bs, BLOCK_INDEX_CHROMA_DC, h->dct.chroma_dc[1], 4 );
796 if( h->mb.i_cbp_chroma == 2 )
799 for( i = 0; i < 8; i++ )
800 block_residual_write_cavlc( h, &h->out.bs, 16 + i, h->dct.block[16+i].residual_ac, 15 );
803 return h->out.bs.i_bits_encoded;