1 /*****************************************************************************
2 * cabac.c: cabac bitstream writing
3 *****************************************************************************
4 * Copyright (C) 2003-2013 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
28 #include "common/common.h"
29 #include "macroblock.h"
35 static inline void x264_cabac_mb_type_intra( x264_t *h, x264_cabac_t *cb, int i_mb_type,
36 int ctx0, int ctx1, int ctx2, int ctx3, int ctx4, int ctx5 )
38 if( i_mb_type == I_4x4 || i_mb_type == I_8x8 )
40 x264_cabac_encode_decision_noup( cb, ctx0, 0 );
43 else if( i_mb_type == I_PCM )
45 x264_cabac_encode_decision_noup( cb, ctx0, 1 );
46 x264_cabac_encode_flush( h, cb );
51 int i_pred = x264_mb_pred_mode16x16_fix[h->mb.i_intra16x16_pred_mode];
53 x264_cabac_encode_decision_noup( cb, ctx0, 1 );
54 x264_cabac_encode_terminal( cb );
56 x264_cabac_encode_decision_noup( cb, ctx1, !!h->mb.i_cbp_luma );
57 if( h->mb.i_cbp_chroma == 0 )
58 x264_cabac_encode_decision_noup( cb, ctx2, 0 );
61 x264_cabac_encode_decision( cb, ctx2, 1 );
62 x264_cabac_encode_decision_noup( cb, ctx3, h->mb.i_cbp_chroma>>1 );
64 x264_cabac_encode_decision( cb, ctx4, i_pred>>1 );
65 x264_cabac_encode_decision_noup( cb, ctx5, i_pred&1 );
70 static void x264_cabac_field_decoding_flag( x264_t *h, x264_cabac_t *cb )
73 ctx += h->mb.field_decoding_flag & !!h->mb.i_mb_x;
74 ctx += (h->mb.i_mb_top_mbpair_xy >= 0
75 && h->mb.slice_table[h->mb.i_mb_top_mbpair_xy] == h->sh.i_first_mb
76 && h->mb.field[h->mb.i_mb_top_mbpair_xy]);
78 x264_cabac_encode_decision_noup( cb, 70 + ctx, MB_INTERLACED );
79 h->mb.field_decoding_flag = MB_INTERLACED;
83 static void x264_cabac_intra4x4_pred_mode( x264_cabac_t *cb, int i_pred, int i_mode )
85 if( i_pred == i_mode )
86 x264_cabac_encode_decision( cb, 68, 1 );
89 x264_cabac_encode_decision( cb, 68, 0 );
92 x264_cabac_encode_decision( cb, 69, (i_mode )&0x01 );
93 x264_cabac_encode_decision( cb, 69, (i_mode >> 1)&0x01 );
94 x264_cabac_encode_decision( cb, 69, (i_mode >> 2) );
98 static void x264_cabac_intra_chroma_pred_mode( x264_t *h, x264_cabac_t *cb )
100 int i_mode = x264_mb_chroma_pred_mode_fix[h->mb.i_chroma_pred_mode];
103 /* No need to test for I4x4 or I_16x16 as cache_save handle that */
104 if( (h->mb.i_neighbour & MB_LEFT) && h->mb.chroma_pred_mode[h->mb.i_mb_left_xy[0]] != 0 )
106 if( (h->mb.i_neighbour & MB_TOP) && h->mb.chroma_pred_mode[h->mb.i_mb_top_xy] != 0 )
109 x264_cabac_encode_decision_noup( cb, 64 + ctx, i_mode > 0 );
112 x264_cabac_encode_decision( cb, 64 + 3, i_mode > 1 );
114 x264_cabac_encode_decision_noup( cb, 64 + 3, i_mode > 2 );
118 static void x264_cabac_cbp_luma( x264_t *h, x264_cabac_t *cb )
120 int cbp = h->mb.i_cbp_luma;
121 int cbp_l = h->mb.cache.i_cbp_left;
122 int cbp_t = h->mb.cache.i_cbp_top;
123 x264_cabac_encode_decision ( cb, 76 - ((cbp_l >> 1) & 1) - ((cbp_t >> 1) & 2), (cbp >> 0) & 1 );
124 x264_cabac_encode_decision ( cb, 76 - ((cbp >> 0) & 1) - ((cbp_t >> 2) & 2), (cbp >> 1) & 1 );
125 x264_cabac_encode_decision ( cb, 76 - ((cbp_l >> 3) & 1) - ((cbp << 1) & 2), (cbp >> 2) & 1 );
126 x264_cabac_encode_decision_noup( cb, 76 - ((cbp >> 2) & 1) - ((cbp >> 0) & 2), (cbp >> 3) & 1 );
129 static void x264_cabac_cbp_chroma( x264_t *h, x264_cabac_t *cb )
131 int cbp_a = h->mb.cache.i_cbp_left & 0x30;
132 int cbp_b = h->mb.cache.i_cbp_top & 0x30;
135 if( cbp_a && h->mb.cache.i_cbp_left != -1 ) ctx++;
136 if( cbp_b && h->mb.cache.i_cbp_top != -1 ) ctx+=2;
137 if( h->mb.i_cbp_chroma == 0 )
138 x264_cabac_encode_decision_noup( cb, 77 + ctx, 0 );
141 x264_cabac_encode_decision_noup( cb, 77 + ctx, 1 );
144 if( cbp_a == 0x20 ) ctx++;
145 if( cbp_b == 0x20 ) ctx += 2;
146 x264_cabac_encode_decision_noup( cb, 77 + ctx, h->mb.i_cbp_chroma >> 1 );
150 static void x264_cabac_qp_delta( x264_t *h, x264_cabac_t *cb )
152 int i_dqp = h->mb.i_qp - h->mb.i_last_qp;
155 /* Avoid writing a delta quant if we have an empty i16x16 block, e.g. in a completely flat background area */
156 if( h->mb.i_type == I_16x16 && !h->mb.cbp[h->mb.i_mb_xy] )
159 h->mb.i_qp = h->mb.i_last_qp;
164 /* Since, per the above, empty-CBP I16x16 blocks never have delta quants,
165 * we don't have to check for them. */
166 ctx = h->mb.i_last_dqp && h->mb.cbp[h->mb.i_mb_prev_xy];
170 /* Faster than (i_dqp <= 0 ? (-2*i_dqp) : (2*i_dqp-1)).
171 * If you so much as sneeze on these lines, gcc will compile this suboptimally. */
174 if( val < 0 ) val = i_dqp;
176 /* dqp is interpreted modulo (QP_MAX_SPEC+1) */
177 if( val >= QP_MAX_SPEC && val != QP_MAX_SPEC+1 )
178 val = 2*QP_MAX_SPEC+1 - val;
181 x264_cabac_encode_decision( cb, 60 + ctx, 1 );
185 x264_cabac_encode_decision_noup( cb, 60 + ctx, 0 );
189 void x264_cabac_mb_skip( x264_t *h, int b_skip )
191 int ctx = h->mb.cache.i_neighbour_skip + 11;
192 if( h->sh.i_type != SLICE_TYPE_P )
194 x264_cabac_encode_decision( &h->cabac, ctx, b_skip );
198 static inline void x264_cabac_subpartition_p( x264_cabac_t *cb, int i_sub )
200 if( i_sub == D_L0_8x8 )
202 x264_cabac_encode_decision( cb, 21, 1 );
205 x264_cabac_encode_decision( cb, 21, 0 );
206 if( i_sub == D_L0_8x4 )
207 x264_cabac_encode_decision( cb, 22, 0 );
210 x264_cabac_encode_decision( cb, 22, 1 );
211 x264_cabac_encode_decision( cb, 23, i_sub == D_L0_4x8 );
215 static ALWAYS_INLINE void x264_cabac_subpartition_b( x264_cabac_t *cb, int i_sub )
217 if( i_sub == D_DIRECT_8x8 )
219 x264_cabac_encode_decision( cb, 36, 0 );
222 x264_cabac_encode_decision( cb, 36, 1 );
223 if( i_sub == D_BI_8x8 )
225 x264_cabac_encode_decision( cb, 37, 1 );
226 x264_cabac_encode_decision( cb, 38, 0 );
227 x264_cabac_encode_decision( cb, 39, 0 );
228 x264_cabac_encode_decision( cb, 39, 0 );
231 x264_cabac_encode_decision( cb, 37, 0 );
232 x264_cabac_encode_decision( cb, 39, i_sub == D_L1_8x8 );
235 static ALWAYS_INLINE void x264_cabac_transform_size( x264_t *h, x264_cabac_t *cb )
237 int ctx = 399 + h->mb.cache.i_neighbour_transform_size;
238 x264_cabac_encode_decision_noup( cb, ctx, h->mb.b_transform_8x8 );
241 static ALWAYS_INLINE void x264_cabac_ref_internal( x264_t *h, x264_cabac_t *cb, int i_list, int idx, int bframe )
243 const int i8 = x264_scan8[idx];
244 const int i_refa = h->mb.cache.ref[i_list][i8 - 1];
245 const int i_refb = h->mb.cache.ref[i_list][i8 - 8];
248 if( i_refa > 0 && (!bframe || !h->mb.cache.skip[i8 - 1]) )
250 if( i_refb > 0 && (!bframe || !h->mb.cache.skip[i8 - 8]) )
253 for( int i_ref = h->mb.cache.ref[i_list][i8]; i_ref > 0; i_ref-- )
255 x264_cabac_encode_decision( cb, 54 + ctx, 1 );
258 x264_cabac_encode_decision( cb, 54 + ctx, 0 );
261 static NOINLINE void x264_cabac_ref_p( x264_t *h, x264_cabac_t *cb, int idx )
263 x264_cabac_ref_internal( h, cb, 0, idx, 0 );
265 static NOINLINE void x264_cabac_ref_b( x264_t *h, x264_cabac_t *cb, int i_list, int idx )
267 x264_cabac_ref_internal( h, cb, i_list, idx, 1 );
270 static ALWAYS_INLINE int x264_cabac_mvd_cpn( x264_t *h, x264_cabac_t *cb, int i_list, int idx, int l, int mvd, int ctx )
272 int ctxbase = l ? 47 : 40;
276 x264_cabac_encode_decision( cb, ctxbase + ctx, 0 );
280 int i_abs = abs( mvd );
281 x264_cabac_encode_decision( cb, ctxbase + ctx, 1 );
285 for( int i = 1; i < i_abs; i++ )
286 x264_cabac_encode_decision( cb, ctxbase + i + 2, 1 );
287 x264_cabac_encode_decision( cb, ctxbase + i_abs + 2, 0 );
288 x264_cabac_encode_bypass( cb, mvd >> 31 );
292 x264_cabac_encode_decision( cb, ctxbase + 3, 1 );
293 x264_cabac_encode_decision( cb, ctxbase + 4, 1 );
294 x264_cabac_encode_decision( cb, ctxbase + 5, 1 );
297 cb->f8_bits_encoded += x264_cabac_size_unary[i_abs - 3][cb->state[ctxbase+6]];
298 cb->state[ctxbase+6] = x264_cabac_transition_unary[i_abs - 3][cb->state[ctxbase+6]];
302 cb->f8_bits_encoded += cabac_size_5ones[cb->state[ctxbase+6]];
303 cb->state[ctxbase+6] = cabac_transition_5ones[cb->state[ctxbase+6]];
304 x264_cabac_encode_ue_bypass( cb, 3, i_abs - 9 );
308 static const uint8_t ctxes[8] = { 3,4,5,6,6,6,6,6 };
312 for( int i = 1; i < i_abs; i++ )
313 x264_cabac_encode_decision( cb, ctxbase + ctxes[i-1], 1 );
314 x264_cabac_encode_decision( cb, ctxbase + ctxes[i_abs-1], 0 );
318 for( int i = 1; i < 9; i++ )
319 x264_cabac_encode_decision( cb, ctxbase + ctxes[i-1], 1 );
320 x264_cabac_encode_ue_bypass( cb, 3, i_abs - 9 );
322 x264_cabac_encode_bypass( cb, mvd >> 31 );
324 /* Since we don't need to keep track of MVDs larger than 66, just cap the value.
325 * This lets us store MVDs as 8-bit values instead of 16-bit. */
326 return X264_MIN( i_abs, 66 );
329 static NOINLINE uint16_t x264_cabac_mvd( x264_t *h, x264_cabac_t *cb, int i_list, int idx, int width )
331 ALIGNED_4( int16_t mvp[2] );
335 x264_mb_predict_mv( h, i_list, idx, width, mvp );
336 mdx = h->mb.cache.mv[i_list][x264_scan8[idx]][0] - mvp[0];
337 mdy = h->mb.cache.mv[i_list][x264_scan8[idx]][1] - mvp[1];
338 uint16_t amvd = x264_cabac_mvd_sum(h->mb.cache.mvd[i_list][x264_scan8[idx] - 1],
339 h->mb.cache.mvd[i_list][x264_scan8[idx] - 8]);
342 mdx = x264_cabac_mvd_cpn( h, cb, i_list, idx, 0, mdx, amvd&0xFF );
343 mdy = x264_cabac_mvd_cpn( h, cb, i_list, idx, 1, mdy, amvd>>8 );
345 return pack8to16(mdx,mdy);
348 #define x264_cabac_mvd(h,cb,i_list,idx,width,height)\
351 uint16_t mvd = x264_cabac_mvd(h,cb,i_list,idx,width);\
352 x264_macroblock_cache_mvd( h, block_idx_x[idx], block_idx_y[idx], width, height, i_list, mvd );\
355 static inline void x264_cabac_8x8_mvd( x264_t *h, x264_cabac_t *cb, int i )
357 switch( h->mb.i_sub_partition[i] )
360 x264_cabac_mvd( h, cb, 0, 4*i, 2, 2 );
363 x264_cabac_mvd( h, cb, 0, 4*i+0, 2, 1 );
364 x264_cabac_mvd( h, cb, 0, 4*i+2, 2, 1 );
367 x264_cabac_mvd( h, cb, 0, 4*i+0, 1, 2 );
368 x264_cabac_mvd( h, cb, 0, 4*i+1, 1, 2 );
371 x264_cabac_mvd( h, cb, 0, 4*i+0, 1, 1 );
372 x264_cabac_mvd( h, cb, 0, 4*i+1, 1, 1 );
373 x264_cabac_mvd( h, cb, 0, 4*i+2, 1, 1 );
374 x264_cabac_mvd( h, cb, 0, 4*i+3, 1, 1 );
381 static ALWAYS_INLINE void x264_cabac_mb_header_i( x264_t *h, x264_cabac_t *cb, int i_mb_type, int slice_type, int chroma )
383 if( slice_type == SLICE_TYPE_I )
386 if( (h->mb.i_neighbour & MB_LEFT) && h->mb.i_mb_type_left[0] != I_4x4 )
388 if( (h->mb.i_neighbour & MB_TOP) && h->mb.i_mb_type_top != I_4x4 )
391 x264_cabac_mb_type_intra( h, cb, i_mb_type, 3+ctx, 3+3, 3+4, 3+5, 3+6, 3+7 );
393 else if( slice_type == SLICE_TYPE_P )
396 x264_cabac_encode_decision_noup( cb, 14, 1 );
399 x264_cabac_mb_type_intra( h, cb, i_mb_type, 17+0, 17+1, 17+2, 17+2, 17+3, 17+3 );
401 else if( slice_type == SLICE_TYPE_B )
404 x264_cabac_encode_decision_noup( cb, 27+3, 1 );
405 x264_cabac_encode_decision_noup( cb, 27+4, 1 );
406 x264_cabac_encode_decision( cb, 27+5, 1 );
407 x264_cabac_encode_decision( cb, 27+5, 0 );
408 x264_cabac_encode_decision( cb, 27+5, 1 );
411 x264_cabac_mb_type_intra( h, cb, i_mb_type, 32+0, 32+1, 32+2, 32+2, 32+3, 32+3 );
414 if( i_mb_type == I_PCM )
417 if( i_mb_type != I_16x16 )
419 if( h->pps->b_transform_8x8_mode )
420 x264_cabac_transform_size( h, cb );
422 int di = h->mb.b_transform_8x8 ? 4 : 1;
423 for( int i = 0; i < 16; i += di )
425 const int i_pred = x264_mb_predict_intra4x4_mode( h, i );
426 const int i_mode = x264_mb_pred_mode4x4_fix( h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] );
427 x264_cabac_intra4x4_pred_mode( cb, i_pred, i_mode );
432 x264_cabac_intra_chroma_pred_mode( h, cb );
435 static ALWAYS_INLINE void x264_cabac_mb_header_p( x264_t *h, x264_cabac_t *cb, int i_mb_type, int chroma )
437 if( i_mb_type == P_L0 )
439 x264_cabac_encode_decision_noup( cb, 14, 0 );
440 if( h->mb.i_partition == D_16x16 )
442 x264_cabac_encode_decision_noup( cb, 15, 0 );
443 x264_cabac_encode_decision_noup( cb, 16, 0 );
444 if( h->mb.pic.i_fref[0] > 1 )
445 x264_cabac_ref_p( h, cb, 0 );
446 x264_cabac_mvd( h, cb, 0, 0, 4, 4 );
448 else if( h->mb.i_partition == D_16x8 )
450 x264_cabac_encode_decision_noup( cb, 15, 1 );
451 x264_cabac_encode_decision_noup( cb, 17, 1 );
452 if( h->mb.pic.i_fref[0] > 1 )
454 x264_cabac_ref_p( h, cb, 0 );
455 x264_cabac_ref_p( h, cb, 8 );
457 x264_cabac_mvd( h, cb, 0, 0, 4, 2 );
458 x264_cabac_mvd( h, cb, 0, 8, 4, 2 );
460 else //if( h->mb.i_partition == D_8x16 )
462 x264_cabac_encode_decision_noup( cb, 15, 1 );
463 x264_cabac_encode_decision_noup( cb, 17, 0 );
464 if( h->mb.pic.i_fref[0] > 1 )
466 x264_cabac_ref_p( h, cb, 0 );
467 x264_cabac_ref_p( h, cb, 4 );
469 x264_cabac_mvd( h, cb, 0, 0, 2, 4 );
470 x264_cabac_mvd( h, cb, 0, 4, 2, 4 );
473 else if( i_mb_type == P_8x8 )
475 x264_cabac_encode_decision_noup( cb, 14, 0 );
476 x264_cabac_encode_decision_noup( cb, 15, 0 );
477 x264_cabac_encode_decision_noup( cb, 16, 1 );
480 for( int i = 0; i < 4; i++ )
481 x264_cabac_subpartition_p( cb, h->mb.i_sub_partition[i] );
484 if( h->mb.pic.i_fref[0] > 1 )
486 x264_cabac_ref_p( h, cb, 0 );
487 x264_cabac_ref_p( h, cb, 4 );
488 x264_cabac_ref_p( h, cb, 8 );
489 x264_cabac_ref_p( h, cb, 12 );
492 for( int i = 0; i < 4; i++ )
493 x264_cabac_8x8_mvd( h, cb, i );
496 x264_cabac_mb_header_i( h, cb, i_mb_type, SLICE_TYPE_P, chroma );
499 static ALWAYS_INLINE void x264_cabac_mb_header_b( x264_t *h, x264_cabac_t *cb, int i_mb_type, int chroma )
502 if( (h->mb.i_neighbour & MB_LEFT) && h->mb.i_mb_type_left[0] != B_SKIP && h->mb.i_mb_type_left[0] != B_DIRECT )
504 if( (h->mb.i_neighbour & MB_TOP) && h->mb.i_mb_type_top != B_SKIP && h->mb.i_mb_type_top != B_DIRECT )
507 if( i_mb_type == B_DIRECT )
509 x264_cabac_encode_decision_noup( cb, 27+ctx, 0 );
512 x264_cabac_encode_decision_noup( cb, 27+ctx, 1 );
514 if( i_mb_type == B_8x8 )
516 x264_cabac_encode_decision_noup( cb, 27+3, 1 );
517 x264_cabac_encode_decision_noup( cb, 27+4, 1 );
518 x264_cabac_encode_decision( cb, 27+5, 1 );
519 x264_cabac_encode_decision( cb, 27+5, 1 );
520 x264_cabac_encode_decision_noup( cb, 27+5, 1 );
523 for( int i = 0; i < 4; i++ )
524 x264_cabac_subpartition_b( cb, h->mb.i_sub_partition[i] );
527 if( h->mb.pic.i_fref[0] > 1 )
528 for( int i = 0; i < 4; i++ )
529 if( x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i] ] )
530 x264_cabac_ref_b( h, cb, 0, 4*i );
532 if( h->mb.pic.i_fref[1] > 1 )
533 for( int i = 0; i < 4; i++ )
534 if( x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i] ] )
535 x264_cabac_ref_b( h, cb, 1, 4*i );
537 for( int i = 0; i < 4; i++ )
538 if( x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i] ] )
539 x264_cabac_mvd( h, cb, 0, 4*i, 2, 2 );
541 for( int i = 0; i < 4; i++ )
542 if( x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i] ] )
543 x264_cabac_mvd( h, cb, 1, 4*i, 2, 2 );
545 else if( i_mb_type >= B_L0_L0 && i_mb_type <= B_BI_BI )
548 static const uint8_t i_mb_bits[9*3] =
550 0x31, 0x29, 0x4, /* L0 L0 */
551 0x35, 0x2d, 0, /* L0 L1 */
552 0x43, 0x63, 0, /* L0 BI */
553 0x3d, 0x2f, 0, /* L1 L0 */
554 0x39, 0x25, 0x6, /* L1 L1 */
555 0x53, 0x73, 0, /* L1 BI */
556 0x4b, 0x6b, 0, /* BI L0 */
557 0x5b, 0x7b, 0, /* BI L1 */
558 0x47, 0x67, 0x21 /* BI BI */
561 const int idx = (i_mb_type - B_L0_L0) * 3 + (h->mb.i_partition - D_16x8);
562 int bits = i_mb_bits[idx];
564 x264_cabac_encode_decision_noup( cb, 27+3, bits&1 );
565 x264_cabac_encode_decision( cb, 27+5-(bits&1), (bits>>1)&1 ); bits >>= 2;
568 x264_cabac_encode_decision( cb, 27+5, bits&1 ); bits >>= 1;
569 x264_cabac_encode_decision( cb, 27+5, bits&1 ); bits >>= 1;
570 x264_cabac_encode_decision( cb, 27+5, bits&1 ); bits >>= 1;
572 x264_cabac_encode_decision_noup( cb, 27+5, bits&1 );
575 const uint8_t (*b_list)[2] = x264_mb_type_list_table[i_mb_type];
576 if( h->mb.pic.i_fref[0] > 1 )
579 x264_cabac_ref_b( h, cb, 0, 0 );
580 if( b_list[0][1] && h->mb.i_partition != D_16x16 )
581 x264_cabac_ref_b( h, cb, 0, 8 >> (h->mb.i_partition == D_8x16) );
583 if( h->mb.pic.i_fref[1] > 1 )
586 x264_cabac_ref_b( h, cb, 1, 0 );
587 if( b_list[1][1] && h->mb.i_partition != D_16x16 )
588 x264_cabac_ref_b( h, cb, 1, 8 >> (h->mb.i_partition == D_8x16) );
590 for( int i_list = 0; i_list < 2; i_list++ )
592 if( h->mb.i_partition == D_16x16 )
594 if( b_list[i_list][0] ) x264_cabac_mvd( h, cb, i_list, 0, 4, 4 );
596 else if( h->mb.i_partition == D_16x8 )
598 if( b_list[i_list][0] ) x264_cabac_mvd( h, cb, i_list, 0, 4, 2 );
599 if( b_list[i_list][1] ) x264_cabac_mvd( h, cb, i_list, 8, 4, 2 );
601 else //if( h->mb.i_partition == D_8x16 )
603 if( b_list[i_list][0] ) x264_cabac_mvd( h, cb, i_list, 0, 2, 4 );
604 if( b_list[i_list][1] ) x264_cabac_mvd( h, cb, i_list, 4, 2, 4 );
609 x264_cabac_mb_header_i( h, cb, i_mb_type, SLICE_TYPE_B, chroma );
612 static int ALWAYS_INLINE x264_cabac_cbf_ctxidxinc( x264_t *h, int i_cat, int i_idx, int b_intra, int b_dc )
614 static const uint16_t base_ctx[14] = {85,89,93,97,101,1012,460,464,468,1016,472,476,480,1020};
619 if( i_cat == DCT_CHROMA_DC )
621 int i_nza = h->mb.cache.i_cbp_left != -1 ? (h->mb.cache.i_cbp_left >> (8 + i_idx)) & 1 : b_intra;
622 int i_nzb = h->mb.cache.i_cbp_top != -1 ? (h->mb.cache.i_cbp_top >> (8 + i_idx)) & 1 : b_intra;
623 return base_ctx[i_cat] + 2*i_nzb + i_nza;
627 int i_nza = (h->mb.cache.i_cbp_left >> (8 + i_idx)) & 1;
628 int i_nzb = (h->mb.cache.i_cbp_top >> (8 + i_idx)) & 1;
629 return base_ctx[i_cat] + 2*i_nzb + i_nza;
634 int i_nza = h->mb.cache.non_zero_count[x264_scan8[i_idx] - 1];
635 int i_nzb = h->mb.cache.non_zero_count[x264_scan8[i_idx] - 8];
636 if( x264_constant_p(b_intra) && !b_intra )
637 return base_ctx[i_cat] + ((2*i_nzb + i_nza)&0x7f);
640 i_nza &= 0x7f + (b_intra << 7);
641 i_nzb &= 0x7f + (b_intra << 7);
642 return base_ctx[i_cat] + 2*!!i_nzb + !!i_nza;
648 extern const uint8_t x264_significant_coeff_flag_offset_8x8[2][64];
649 extern const uint8_t x264_last_coeff_flag_offset_8x8[63];
650 extern const uint8_t x264_coeff_flag_offset_chroma_422_dc[7];
651 extern const uint16_t x264_significant_coeff_flag_offset[2][16];
652 extern const uint16_t x264_last_coeff_flag_offset[2][16];
653 extern const uint16_t x264_coeff_abs_level_m1_offset[16];
654 extern const uint8_t x264_count_cat_m1[14];
656 /* Padded to [64] for easier addressing */
657 const uint8_t x264_significant_coeff_flag_offset_8x8[2][64] =
659 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
660 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
661 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
662 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12
664 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5,
665 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11,
666 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9,
667 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14
669 const uint8_t x264_last_coeff_flag_offset_8x8[63] =
671 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
672 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
673 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
674 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
676 const uint8_t x264_coeff_flag_offset_chroma_422_dc[7] = { 0, 0, 1, 1, 2, 2, 2 }; /* MIN( i/2, 2 ) */
677 const uint16_t x264_significant_coeff_flag_offset[2][16] =
679 { 105+0, 105+15, 105+29, 105+44, 105+47, 402, 484+0, 484+15, 484+29, 660, 528+0, 528+15, 528+29, 718, 0, 0 },
680 { 277+0, 277+15, 277+29, 277+44, 277+47, 436, 776+0, 776+15, 776+29, 675, 820+0, 820+15, 820+29, 733, 0, 0 }
682 const uint16_t x264_last_coeff_flag_offset[2][16] =
684 { 166+0, 166+15, 166+29, 166+44, 166+47, 417, 572+0, 572+15, 572+29, 690, 616+0, 616+15, 616+29, 748, 0, 0 },
685 { 338+0, 338+15, 338+29, 338+44, 338+47, 451, 864+0, 864+15, 864+29, 699, 908+0, 908+15, 908+29, 757, 0, 0 }
687 const uint16_t x264_coeff_abs_level_m1_offset[16] =
689 227+0, 227+10, 227+20, 227+30, 227+39, 426, 952+0, 952+10, 952+20, 708, 982+0, 982+10, 982+20, 766
691 const uint8_t x264_count_cat_m1[14] = {15, 14, 15, 3, 14, 63, 15, 14, 15, 63, 15, 14, 15, 63};
694 // node ctx: 0..3: abslevel1 (with abslevelgt1 == 0).
695 // 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter).
696 /* map node ctx => cabac ctx for level=1 */
697 static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 };
698 /* map node ctx => cabac ctx for level>1 */
699 static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 };
700 /* 4:2:2 chroma dc uses a slightly different state machine for some reason, also note that
701 * 4:2:0 chroma dc doesn't use the last state so it has identical output with both arrays. */
702 static const uint8_t coeff_abs_levelgt1_ctx_chroma_dc[8] = { 5, 5, 5, 5, 6, 7, 8, 8 };
704 static const uint8_t coeff_abs_level_transition[2][8] = {
705 /* update node ctx after coding a level=1 */
706 { 1, 2, 3, 3, 4, 5, 6, 7 },
707 /* update node ctx after coding a level>1 */
708 { 4, 4, 4, 4, 5, 6, 7, 7 }
712 static ALWAYS_INLINE void x264_cabac_block_residual_internal( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l, int chroma422dc )
714 int ctx_sig = x264_significant_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
715 int ctx_last = x264_last_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
716 int ctx_level = x264_coeff_abs_level_m1_offset[ctx_block_cat];
717 int coeff_idx = -1, node_ctx = 0;
718 int last = h->quantf.coeff_last[ctx_block_cat]( l );
719 const uint8_t *levelgt1_ctx = chroma422dc ? coeff_abs_levelgt1_ctx_chroma_dc : coeff_abs_levelgt1_ctx;
722 #define WRITE_SIGMAP( sig_off, last_off )\
729 coeffs[++coeff_idx] = l[i];\
730 x264_cabac_encode_decision( cb, ctx_sig + sig_off, 1 );\
733 x264_cabac_encode_decision( cb, ctx_last + last_off, 1 );\
737 x264_cabac_encode_decision( cb, ctx_last + last_off, 0 );\
740 x264_cabac_encode_decision( cb, ctx_sig + sig_off, 0 );\
741 if( ++i == count_m1 )\
743 coeffs[++coeff_idx] = l[i];\
752 WRITE_SIGMAP( x264_coeff_flag_offset_chroma_422_dc[i], x264_coeff_flag_offset_chroma_422_dc[i] )
756 int count_m1 = x264_count_cat_m1[ctx_block_cat];
759 const uint8_t *sig_offset = x264_significant_coeff_flag_offset_8x8[MB_INTERLACED];
760 WRITE_SIGMAP( sig_offset[i], x264_last_coeff_flag_offset_8x8[i] )
768 /* write coeff_abs - 1 */
769 int coeff = coeffs[coeff_idx];
770 int abs_coeff = abs(coeff);
771 int coeff_sign = coeff >> 31;
772 int ctx = coeff_abs_level1_ctx[node_ctx] + ctx_level;
776 x264_cabac_encode_decision( cb, ctx, 1 );
777 ctx = levelgt1_ctx[node_ctx] + ctx_level;
778 for( int i = X264_MIN( abs_coeff, 15 ) - 2; i > 0; i-- )
779 x264_cabac_encode_decision( cb, ctx, 1 );
781 x264_cabac_encode_decision( cb, ctx, 0 );
783 x264_cabac_encode_ue_bypass( cb, 0, abs_coeff - 15 );
785 node_ctx = coeff_abs_level_transition[1][node_ctx];
789 x264_cabac_encode_decision( cb, ctx, 0 );
790 node_ctx = coeff_abs_level_transition[0][node_ctx];
793 x264_cabac_encode_bypass( cb, coeff_sign );
794 } while( --coeff_idx >= 0 );
796 static void x264_cabac_block_residual( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
798 #if ARCH_X86_64 && HAVE_MMX
799 h->bsf.cabac_block_residual_internal( l, MB_INTERLACED, ctx_block_cat, cb );
801 x264_cabac_block_residual_internal( h, cb, ctx_block_cat, l, 0 );
804 static void x264_cabac_block_residual_422_dc( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
806 /* Template a version specifically for chroma 4:2:2 DC in order to avoid
807 * slowing down everything else due to the added complexity. */
808 x264_cabac_block_residual_internal( h, cb, DCT_CHROMA_DC, l, 1 );
810 #define x264_cabac_block_residual_8x8( h, cb, cat, l ) x264_cabac_block_residual( h, cb, cat, l )
813 /* Faster RDO by merging sigmap and level coding. Note that for 8x8dct and chroma 4:2:2 dc this is
814 * slightly incorrect because the sigmap is not reversible (contexts are repeated). However, there
815 * is nearly no quality penalty for this (~0.001db) and the speed boost (~30%) is worth it. */
816 static void ALWAYS_INLINE x264_cabac_block_residual_internal( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l, int b_8x8, int chroma422dc )
818 const uint8_t *sig_offset = x264_significant_coeff_flag_offset_8x8[MB_INTERLACED];
819 int ctx_sig = x264_significant_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
820 int ctx_last = x264_last_coeff_flag_offset[MB_INTERLACED][ctx_block_cat];
821 int ctx_level = x264_coeff_abs_level_m1_offset[ctx_block_cat];
822 int last = h->quantf.coeff_last[ctx_block_cat]( l );
823 int coeff_abs = abs(l[last]);
824 int ctx = coeff_abs_level1_ctx[0] + ctx_level;
826 const uint8_t *levelgt1_ctx = chroma422dc ? coeff_abs_levelgt1_ctx_chroma_dc : coeff_abs_levelgt1_ctx;
828 if( last != (b_8x8 ? 63 : chroma422dc ? 7 : x264_count_cat_m1[ctx_block_cat]) )
830 x264_cabac_encode_decision( cb, ctx_sig + (b_8x8 ? sig_offset[last] :
831 chroma422dc ? x264_coeff_flag_offset_chroma_422_dc[last] : last), 1 );
832 x264_cabac_encode_decision( cb, ctx_last + (b_8x8 ? x264_last_coeff_flag_offset_8x8[last] :
833 chroma422dc ? x264_coeff_flag_offset_chroma_422_dc[last] : last), 1 );
838 x264_cabac_encode_decision( cb, ctx, 1 );
839 ctx = levelgt1_ctx[0] + ctx_level;
842 cb->f8_bits_encoded += x264_cabac_size_unary[coeff_abs-1][cb->state[ctx]];
843 cb->state[ctx] = x264_cabac_transition_unary[coeff_abs-1][cb->state[ctx]];
847 cb->f8_bits_encoded += x264_cabac_size_unary[14][cb->state[ctx]];
848 cb->state[ctx] = x264_cabac_transition_unary[14][cb->state[ctx]];
849 x264_cabac_encode_ue_bypass( cb, 0, coeff_abs - 15 );
851 node_ctx = coeff_abs_level_transition[1][0];
855 x264_cabac_encode_decision( cb, ctx, 0 );
856 node_ctx = coeff_abs_level_transition[0][0];
857 x264_cabac_encode_bypass( cb, 0 ); // sign
860 for( int i = last-1 ; i >= 0; i-- )
864 coeff_abs = abs(l[i]);
865 x264_cabac_encode_decision( cb, ctx_sig + (b_8x8 ? sig_offset[i] :
866 chroma422dc ? x264_coeff_flag_offset_chroma_422_dc[i] : i), 1 );
867 x264_cabac_encode_decision( cb, ctx_last + (b_8x8 ? x264_last_coeff_flag_offset_8x8[i] :
868 chroma422dc ? x264_coeff_flag_offset_chroma_422_dc[i] : i), 0 );
869 ctx = coeff_abs_level1_ctx[node_ctx] + ctx_level;
873 x264_cabac_encode_decision( cb, ctx, 1 );
874 ctx = levelgt1_ctx[node_ctx] + ctx_level;
877 cb->f8_bits_encoded += x264_cabac_size_unary[coeff_abs-1][cb->state[ctx]];
878 cb->state[ctx] = x264_cabac_transition_unary[coeff_abs-1][cb->state[ctx]];
882 cb->f8_bits_encoded += x264_cabac_size_unary[14][cb->state[ctx]];
883 cb->state[ctx] = x264_cabac_transition_unary[14][cb->state[ctx]];
884 x264_cabac_encode_ue_bypass( cb, 0, coeff_abs - 15 );
886 node_ctx = coeff_abs_level_transition[1][node_ctx];
890 x264_cabac_encode_decision( cb, ctx, 0 );
891 node_ctx = coeff_abs_level_transition[0][node_ctx];
892 x264_cabac_encode_bypass( cb, 0 );
896 x264_cabac_encode_decision( cb, ctx_sig + (b_8x8 ? sig_offset[i] :
897 chroma422dc ? x264_coeff_flag_offset_chroma_422_dc[i] : i), 0 );
901 void x264_cabac_block_residual_8x8_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
903 x264_cabac_block_residual_internal( h, cb, ctx_block_cat, l, 1, 0 );
905 void x264_cabac_block_residual_rd_c( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
907 x264_cabac_block_residual_internal( h, cb, ctx_block_cat, l, 0, 0 );
910 static ALWAYS_INLINE void x264_cabac_block_residual_8x8( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
912 #if ARCH_X86_64 && HAVE_MMX
913 h->bsf.cabac_block_residual_8x8_rd_internal( l, MB_INTERLACED, ctx_block_cat, cb );
915 x264_cabac_block_residual_8x8_rd_c( h, cb, ctx_block_cat, l );
918 static ALWAYS_INLINE void x264_cabac_block_residual( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
920 #if ARCH_X86_64 && HAVE_MMX
921 h->bsf.cabac_block_residual_rd_internal( l, MB_INTERLACED, ctx_block_cat, cb );
923 x264_cabac_block_residual_rd_c( h, cb, ctx_block_cat, l );
927 static void x264_cabac_block_residual_422_dc( x264_t *h, x264_cabac_t *cb, int ctx_block_cat, dctcoef *l )
929 x264_cabac_block_residual_internal( h, cb, DCT_CHROMA_DC, l, 0, 1 );
933 #define x264_cabac_block_residual_cbf_internal( h, cb, ctx_block_cat, i_idx, l, b_intra, b_dc, name )\
936 int ctxidxinc = x264_cabac_cbf_ctxidxinc( h, ctx_block_cat, i_idx, b_intra, b_dc );\
937 if( h->mb.cache.non_zero_count[x264_scan8[i_idx]] )\
939 x264_cabac_encode_decision( cb, ctxidxinc, 1 );\
940 x264_cabac_block_residual##name( h, cb, ctx_block_cat, l );\
943 x264_cabac_encode_decision( cb, ctxidxinc, 0 );\
946 #define x264_cabac_block_residual_dc_cbf( h, cb, ctx_block_cat, i_idx, l, b_intra )\
947 x264_cabac_block_residual_cbf_internal( h, cb, ctx_block_cat, i_idx, l, b_intra, 1, )
949 #define x264_cabac_block_residual_cbf( h, cb, ctx_block_cat, i_idx, l, b_intra )\
950 x264_cabac_block_residual_cbf_internal( h, cb, ctx_block_cat, i_idx, l, b_intra, 0, )
952 #define x264_cabac_block_residual_8x8_cbf( h, cb, ctx_block_cat, i_idx, l, b_intra )\
953 x264_cabac_block_residual_cbf_internal( h, cb, ctx_block_cat, i_idx, l, b_intra, 0, _8x8 )
955 #define x264_cabac_block_residual_422_dc_cbf( h, cb, ch, b_intra )\
956 x264_cabac_block_residual_cbf_internal( h, cb, DCT_CHROMA_DC, CHROMA_DC+(ch), h->dct.chroma_dc[ch], b_intra, 1, _422_dc )
958 static ALWAYS_INLINE void x264_macroblock_write_cabac_internal( x264_t *h, x264_cabac_t *cb, int plane_count, int chroma )
960 const int i_mb_type = h->mb.i_type;
963 const int i_mb_pos_start = x264_cabac_pos( cb );
967 (!(h->mb.i_mb_y & 1) || IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride])) )
969 x264_cabac_field_decoding_flag( h, cb );
973 if( h->sh.i_type == SLICE_TYPE_P )
974 x264_cabac_mb_header_p( h, cb, i_mb_type, chroma );
975 else if( h->sh.i_type == SLICE_TYPE_B )
976 x264_cabac_mb_header_b( h, cb, i_mb_type, chroma );
977 else //if( h->sh.i_type == SLICE_TYPE_I )
978 x264_cabac_mb_header_i( h, cb, i_mb_type, SLICE_TYPE_I, chroma );
981 i_mb_pos_tex = x264_cabac_pos( cb );
982 h->stat.frame.i_mv_bits += i_mb_pos_tex - i_mb_pos_start;
984 if( i_mb_type == I_PCM )
987 bs_init( &s, cb->p, cb->p_end - cb->p );
989 for( int p = 0; p < plane_count; p++ )
990 for( int i = 0; i < 256; i++ )
991 bs_write( &s, BIT_DEPTH, h->mb.pic.p_fenc[p][i] );
993 for( int ch = 1; ch < 3; ch++ )
994 for( int i = 0; i < 16>>CHROMA_V_SHIFT; i++ )
995 for( int j = 0; j < 8; j++ )
996 bs_write( &s, BIT_DEPTH, h->mb.pic.p_fenc[ch][i*FENC_STRIDE+j] );
1000 x264_cabac_encode_init_core( cb );
1002 h->stat.frame.i_tex_bits += x264_cabac_pos( cb ) - i_mb_pos_tex;
1007 if( i_mb_type != I_16x16 )
1009 x264_cabac_cbp_luma( h, cb );
1011 x264_cabac_cbp_chroma( h, cb );
1014 if( x264_mb_transform_8x8_allowed( h ) && h->mb.i_cbp_luma )
1016 x264_cabac_transform_size( h, cb );
1019 if( h->mb.i_cbp_luma || (chroma && h->mb.i_cbp_chroma) || i_mb_type == I_16x16 )
1021 const int b_intra = IS_INTRA( i_mb_type );
1022 x264_cabac_qp_delta( h, cb );
1024 /* write residual */
1025 if( i_mb_type == I_16x16 )
1028 for( int p = 0; p < plane_count; p++ )
1030 x264_cabac_block_residual_dc_cbf( h, cb, ctx_cat_plane[DCT_LUMA_DC][p], LUMA_DC+p, h->dct.luma16x16_dc[p], 1 );
1033 if( h->mb.i_cbp_luma )
1034 for( int i = p*16; i < p*16+16; i++ )
1035 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_AC][p], i, h->dct.luma4x4[i]+1, 1 );
1038 else if( h->mb.b_transform_8x8 )
1040 if( plane_count == 3 )
1042 ALIGNED_4( uint8_t nnzbak[3][8] );
1044 /* Stupid nnz munging in the case that neighbors don't have
1045 * 8x8 transform enabled. */
1046 #define BACKUP( dst, src, res )\
1050 #define RESTORE( dst, src, res )\
1053 #define MUNGE_8x8_NNZ( MUNGE )\
1054 if( (h->mb.i_neighbour & MB_LEFT) && !h->mb.mb_transform_size[h->mb.i_mb_left_xy[0]] )\
1056 MUNGE( nnzbak[0][0], h->mb.cache.non_zero_count[x264_scan8[16*0+ 0] - 1], 0x80 )\
1057 MUNGE( nnzbak[0][1], h->mb.cache.non_zero_count[x264_scan8[16*0+ 2] - 1], 0x80 )\
1058 MUNGE( nnzbak[1][0], h->mb.cache.non_zero_count[x264_scan8[16*1+ 0] - 1], 0x80 )\
1059 MUNGE( nnzbak[1][1], h->mb.cache.non_zero_count[x264_scan8[16*1+ 2] - 1], 0x80 )\
1060 MUNGE( nnzbak[2][0], h->mb.cache.non_zero_count[x264_scan8[16*2+ 0] - 1], 0x80 )\
1061 MUNGE( nnzbak[2][1], h->mb.cache.non_zero_count[x264_scan8[16*2+ 2] - 1], 0x80 )\
1063 if( (h->mb.i_neighbour & MB_LEFT) && !h->mb.mb_transform_size[h->mb.i_mb_left_xy[1]] )\
1065 MUNGE( nnzbak[0][2], h->mb.cache.non_zero_count[x264_scan8[16*0+ 8] - 1], 0x80 )\
1066 MUNGE( nnzbak[0][3], h->mb.cache.non_zero_count[x264_scan8[16*0+10] - 1], 0x80 )\
1067 MUNGE( nnzbak[1][2], h->mb.cache.non_zero_count[x264_scan8[16*1+ 8] - 1], 0x80 )\
1068 MUNGE( nnzbak[1][3], h->mb.cache.non_zero_count[x264_scan8[16*1+10] - 1], 0x80 )\
1069 MUNGE( nnzbak[2][2], h->mb.cache.non_zero_count[x264_scan8[16*2+ 8] - 1], 0x80 )\
1070 MUNGE( nnzbak[2][3], h->mb.cache.non_zero_count[x264_scan8[16*2+10] - 1], 0x80 )\
1072 if( (h->mb.i_neighbour & MB_TOP) && !h->mb.mb_transform_size[h->mb.i_mb_top_xy] )\
1074 MUNGE( M32( &nnzbak[0][4] ), M32( &h->mb.cache.non_zero_count[x264_scan8[16*0] - 8] ), 0x80808080U )\
1075 MUNGE( M32( &nnzbak[1][4] ), M32( &h->mb.cache.non_zero_count[x264_scan8[16*1] - 8] ), 0x80808080U )\
1076 MUNGE( M32( &nnzbak[2][4] ), M32( &h->mb.cache.non_zero_count[x264_scan8[16*2] - 8] ), 0x80808080U )\
1079 MUNGE_8x8_NNZ( BACKUP )
1081 for( int p = 0; p < 3; p++ )
1082 FOREACH_BIT( i, 0, h->mb.i_cbp_luma )
1083 x264_cabac_block_residual_8x8_cbf( h, cb, ctx_cat_plane[DCT_LUMA_8x8][p], i*4+p*16, h->dct.luma8x8[i+p*4], b_intra );
1085 MUNGE_8x8_NNZ( RESTORE )
1089 FOREACH_BIT( i, 0, h->mb.i_cbp_luma )
1090 x264_cabac_block_residual_8x8( h, cb, DCT_LUMA_8x8, h->dct.luma8x8[i] );
1095 for( int p = 0; p < plane_count; p++ )
1096 FOREACH_BIT( i8x8, 0, h->mb.i_cbp_luma )
1097 for( int i = 0; i < 4; i++ )
1098 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_4x4][p], i+i8x8*4+p*16, h->dct.luma4x4[i+i8x8*4+p*16], b_intra );
1101 if( chroma && h->mb.i_cbp_chroma ) /* Chroma DC residual present */
1103 if( CHROMA_FORMAT == CHROMA_422 )
1105 x264_cabac_block_residual_422_dc_cbf( h, cb, 0, b_intra );
1106 x264_cabac_block_residual_422_dc_cbf( h, cb, 1, b_intra );
1110 x264_cabac_block_residual_dc_cbf( h, cb, DCT_CHROMA_DC, CHROMA_DC+0, h->dct.chroma_dc[0], b_intra );
1111 x264_cabac_block_residual_dc_cbf( h, cb, DCT_CHROMA_DC, CHROMA_DC+1, h->dct.chroma_dc[1], b_intra );
1114 if( h->mb.i_cbp_chroma == 2 ) /* Chroma AC residual present */
1116 int step = 8 << CHROMA_V_SHIFT;
1117 for( int i = 16; i < 3*16; i += step )
1118 for( int j = i; j < i+4; j++ )
1119 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, j, h->dct.luma4x4[j]+1, b_intra );
1125 h->stat.frame.i_tex_bits += x264_cabac_pos( cb ) - i_mb_pos_tex;
1129 void x264_macroblock_write_cabac( x264_t *h, x264_cabac_t *cb )
1132 x264_macroblock_write_cabac_internal( h, cb, 3, 0 );
1134 x264_macroblock_write_cabac_internal( h, cb, 1, 1 );
1138 /*****************************************************************************
1139 * RD only; doesn't generate a valid bitstream
1140 * doesn't write cbp or chroma dc (I don't know how much this matters)
1141 * doesn't write ref (never varies between calls, so no point in doing so)
1142 * only writes subpartition for p8x8, needed for sub-8x8 mode decision RDO
1143 * works on all partition sizes except 16x16
1144 *****************************************************************************/
1145 static void x264_partition_size_cabac( x264_t *h, x264_cabac_t *cb, int i8, int i_pixel )
1147 const int i_mb_type = h->mb.i_type;
1148 int b_8x16 = h->mb.i_partition == D_8x16;
1149 int plane_count = CHROMA444 ? 3 : 1;
1151 if( i_mb_type == P_8x8 )
1153 x264_cabac_8x8_mvd( h, cb, i8 );
1154 x264_cabac_subpartition_p( cb, h->mb.i_sub_partition[i8] );
1156 else if( i_mb_type == P_L0 )
1157 x264_cabac_mvd( h, cb, 0, 4*i8, 4>>b_8x16, 2<<b_8x16 );
1158 else if( i_mb_type > B_DIRECT && i_mb_type < B_8x8 )
1160 if( x264_mb_type_list_table[ i_mb_type ][0][!!i8] ) x264_cabac_mvd( h, cb, 0, 4*i8, 4>>b_8x16, 2<<b_8x16 );
1161 if( x264_mb_type_list_table[ i_mb_type ][1][!!i8] ) x264_cabac_mvd( h, cb, 1, 4*i8, 4>>b_8x16, 2<<b_8x16 );
1163 else //if( i_mb_type == B_8x8 )
1165 if( x264_mb_partition_listX_table[0][ h->mb.i_sub_partition[i8] ] )
1166 x264_cabac_mvd( h, cb, 0, 4*i8, 2, 2 );
1167 if( x264_mb_partition_listX_table[1][ h->mb.i_sub_partition[i8] ] )
1168 x264_cabac_mvd( h, cb, 1, 4*i8, 2, 2 );
1171 for( int j = (i_pixel < PIXEL_8x8); j >= 0; j-- )
1173 if( h->mb.i_cbp_luma & (1 << i8) )
1175 if( h->mb.b_transform_8x8 )
1178 for( int p = 0; p < 3; p++ )
1179 x264_cabac_block_residual_8x8_cbf( h, cb, ctx_cat_plane[DCT_LUMA_8x8][p], i8*4+p*16, h->dct.luma8x8[i8+p*4], 0 );
1181 x264_cabac_block_residual_8x8( h, cb, DCT_LUMA_8x8, h->dct.luma8x8[i8] );
1184 for( int p = 0; p < plane_count; p++ )
1185 for( int i4 = 0; i4 < 4; i4++ )
1186 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_4x4][p], i4+i8*4+p*16, h->dct.luma4x4[i4+i8*4+p*16], 0 );
1189 if( h->mb.i_cbp_chroma )
1191 if( CHROMA_FORMAT == CHROMA_422 )
1193 int offset = (5*i8) & 0x09;
1194 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 16+offset, h->dct.luma4x4[16+offset]+1, 0 );
1195 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 18+offset, h->dct.luma4x4[18+offset]+1, 0 );
1196 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 32+offset, h->dct.luma4x4[32+offset]+1, 0 );
1197 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 34+offset, h->dct.luma4x4[34+offset]+1, 0 );
1201 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 16+i8, h->dct.luma4x4[16+i8]+1, 0 );
1202 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, 32+i8, h->dct.luma4x4[32+i8]+1, 0 );
1206 i8 += x264_pixel_size[i_pixel].h >> 3;
1210 static void x264_subpartition_size_cabac( x264_t *h, x264_cabac_t *cb, int i4, int i_pixel )
1212 int b_8x4 = i_pixel == PIXEL_8x4;
1213 int plane_count = CHROMA444 ? 3 : 1;
1214 if( i_pixel == PIXEL_4x4 )
1215 x264_cabac_mvd( h, cb, 0, i4, 1, 1 );
1217 x264_cabac_mvd( h, cb, 0, i4, 1+b_8x4, 2-b_8x4 );
1218 for( int p = 0; p < plane_count; p++ )
1220 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_4x4][p], p*16+i4, h->dct.luma4x4[p*16+i4], 0 );
1221 if( i_pixel != PIXEL_4x4 )
1222 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_4x4][p], p*16+i4+2-b_8x4, h->dct.luma4x4[p*16+i4+2-b_8x4], 0 );
1226 static void x264_partition_i8x8_size_cabac( x264_t *h, x264_cabac_t *cb, int i8, int i_mode )
1228 const int i_pred = x264_mb_predict_intra4x4_mode( h, 4*i8 );
1229 i_mode = x264_mb_pred_mode4x4_fix( i_mode );
1230 x264_cabac_intra4x4_pred_mode( cb, i_pred, i_mode );
1231 x264_cabac_cbp_luma( h, cb );
1232 if( h->mb.i_cbp_luma & (1 << i8) )
1235 for( int p = 0; p < 3; p++ )
1236 x264_cabac_block_residual_8x8_cbf( h, cb, ctx_cat_plane[DCT_LUMA_8x8][p], i8*4+p*16, h->dct.luma8x8[i8+p*4], 1 );
1238 x264_cabac_block_residual_8x8( h, cb, DCT_LUMA_8x8, h->dct.luma8x8[i8] );
1242 static void x264_partition_i4x4_size_cabac( x264_t *h, x264_cabac_t *cb, int i4, int i_mode )
1244 const int i_pred = x264_mb_predict_intra4x4_mode( h, i4 );
1245 int plane_count = CHROMA444 ? 3 : 1;
1246 i_mode = x264_mb_pred_mode4x4_fix( i_mode );
1247 x264_cabac_intra4x4_pred_mode( cb, i_pred, i_mode );
1248 for( int p = 0; p < plane_count; p++ )
1249 x264_cabac_block_residual_cbf( h, cb, ctx_cat_plane[DCT_LUMA_4x4][p], i4+p*16, h->dct.luma4x4[i4+p*16], 1 );
1252 static void x264_chroma_size_cabac( x264_t *h, x264_cabac_t *cb )
1254 x264_cabac_intra_chroma_pred_mode( h, cb );
1255 x264_cabac_cbp_chroma( h, cb );
1256 if( h->mb.i_cbp_chroma )
1258 if( CHROMA_FORMAT == CHROMA_422 )
1260 x264_cabac_block_residual_422_dc_cbf( h, cb, 0, 1 );
1261 x264_cabac_block_residual_422_dc_cbf( h, cb, 1, 1 );
1265 x264_cabac_block_residual_dc_cbf( h, cb, DCT_CHROMA_DC, CHROMA_DC+0, h->dct.chroma_dc[0], 1 );
1266 x264_cabac_block_residual_dc_cbf( h, cb, DCT_CHROMA_DC, CHROMA_DC+1, h->dct.chroma_dc[1], 1 );
1269 if( h->mb.i_cbp_chroma == 2 )
1271 int step = 8 << CHROMA_V_SHIFT;
1272 for( int i = 16; i < 3*16; i += step )
1273 for( int j = i; j < i+4; j++ )
1274 x264_cabac_block_residual_cbf( h, cb, DCT_CHROMA_AC, j, h->dct.luma4x4[j]+1, 1 );