1 /*****************************************************************************
2 * rdo.c: h264 encoder library (rate-distortion optimization)
3 *****************************************************************************
4 * Copyright (C) 2005-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Fiona Glaser <fiona@x264.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *****************************************************************************/
24 /* duplicate all the writer functions, just calculating bit cost
25 * instead of writing the bitstream.
26 * TODO: use these for fast 1st pass too. */
30 /* Transition and size tables for abs<9 MVD and residual coding */
31 /* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */
32 static uint8_t cabac_transition_unary[15][128];
33 static uint16_t cabac_size_unary[15][128];
34 /* Transition and size tables for abs>9 MVD */
35 /* Consist of 5 1s and a bypass sign bit */
36 static uint8_t cabac_transition_5ones[128];
37 static uint16_t cabac_size_5ones[128];
39 /* CAVLC: produces exactly the same bit count as a normal encode */
40 /* this probably still leaves some unnecessary computations */
41 #define bs_write1(s,v) ((s)->i_bits_encoded += 1)
42 #define bs_write(s,n,v) ((s)->i_bits_encoded += (n))
43 #define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v))
44 #define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v))
45 #define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l))
46 #define x264_macroblock_write_cavlc static x264_macroblock_size_cavlc
49 /* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of
50 * fractional bits, but only finite precision. */
51 #undef x264_cabac_encode_decision
52 #undef x264_cabac_encode_decision_noup
53 #undef x264_cabac_encode_bypass
54 #undef x264_cabac_encode_terminal
55 #define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
56 #define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
57 #define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
58 #define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256)
59 #define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8)
60 #define x264_macroblock_write_cabac static x264_macroblock_size_cabac
63 #define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
64 sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) )
65 #define COPY_CABAC_PART( pos, size )\
66 memcpy( &cb->state[pos], &h->cabac.state[pos], size )
68 static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
70 static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
71 static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
72 static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
73 int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
74 + hadamard_offset[size];
75 uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
80 pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
81 res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
82 h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
87 static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
89 static const uint8_t satd_shift_x[3] = {3, 2, 2};
90 static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
91 static const uint8_t satd_offset[3] = {0, 8, 16};
92 ALIGNED_16( static pixel zero[16] );
93 int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
94 + satd_offset[size - PIXEL_8x4];
95 int res = h->mb.pic.fenc_satd_cache[cache_index];
100 pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
101 int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
102 res = h->pixf.satd[size]( fenc, FENC_STRIDE, zero, 0 ) - dc;
103 h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
108 /* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */
109 /* SATD and SA8D are used to measure block complexity. */
110 /* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */
111 /* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */
113 /* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */
114 /* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */
115 /* This optimization can also be used in non-RD transform decision. */
117 static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
119 ALIGNED_16(static pixel zero[16]);
121 pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
122 pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
123 if( p == 0 && h->mb.i_psy_rd )
125 /* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
126 if( size <= PIXEL_8x8 )
128 uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE );
129 uint64_t fenc_acs = cached_hadamard( h, size, x, y );
130 satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs)
131 + abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32));
136 int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, zero, 0 ) >> 1;
137 satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, zero, 0 ) - dc - cached_satd( h, size, x, y ));
139 satd = (satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8;
141 return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd;
144 static inline int ssd_mb( x264_t *h )
146 int chromassd = ssd_plane(h, PIXEL_8x8, 1, 0, 0) + ssd_plane(h, PIXEL_8x8, 2, 0, 0);
147 chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
148 return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chromassd;
151 static int x264_rd_cost_mb( x264_t *h, int i_lambda2 )
153 int b_transform_bak = h->mb.b_transform_8x8;
156 int type_bak = h->mb.i_type;
158 x264_macroblock_encode( h );
160 if( h->mb.b_deblock_rdo )
161 x264_macroblock_deblock( h );
165 if( IS_SKIP( h->mb.i_type ) )
167 i_bits = (1 * i_lambda2 + 128) >> 8;
169 else if( h->param.b_cabac )
171 x264_cabac_t cabac_tmp;
173 x264_macroblock_size_cabac( h, &cabac_tmp );
174 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16;
178 x264_macroblock_size_cavlc( h );
179 i_bits = ( h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8;
182 h->mb.b_transform_8x8 = b_transform_bak;
183 h->mb.i_type = type_bak;
185 return i_ssd + i_bits;
188 /* For small partitions (i.e. those using at most one DCT category's worth of CABAC states),
189 * it's faster to copy the individual parts than to perform a whole CABAC_COPY. */
190 static ALWAYS_INLINE void x264_copy_cabac_part( x264_t *h, x264_cabac_t *cb, int cat, int intra )
193 COPY_CABAC_PART( 68, 2 ); //intra pred mode
195 COPY_CABAC_PART( 40, 16 ); //mvd, rounded up to 16 bytes
197 /* 8x8dct writes CBP, while non-8x8dct writes CBF */
198 if( cat != DCT_LUMA_8x8 )
199 COPY_CABAC_PART( 85 + cat * 4, 4 );
201 COPY_CABAC_PART( 73, 4 );
203 /* Really should be 15 bytes, but rounding up a byte saves some
204 * instructions and is faster, and copying extra data doesn't hurt. */
205 COPY_CABAC_PART( significant_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
206 COPY_CABAC_PART( last_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
207 COPY_CABAC_PART( coeff_abs_level_m1_offset[cat], 10 );
208 cb->f8_bits_encoded = 0;
211 /* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
213 static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
215 uint64_t i_ssd, i_bits;
217 x264_macroblock_encode_p4x4( h, i4 );
218 if( i_pixel == PIXEL_8x4 )
219 x264_macroblock_encode_p4x4( h, i4+1 );
220 if( i_pixel == PIXEL_4x8 )
221 x264_macroblock_encode_p4x4( h, i4+2 );
223 i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
225 if( h->param.b_cabac )
227 x264_cabac_t cabac_tmp;
228 x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_4x4, 0 );
229 x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
230 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
233 i_bits = x264_subpartition_size_cavlc( h, i4, i_pixel );
235 return (i_ssd<<8) + i_bits;
238 uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
240 uint64_t i_ssd, i_bits;
244 if( i_pixel == PIXEL_16x16 )
246 int i_cost = x264_rd_cost_mb( h, i_lambda2 );
250 if( i_pixel > PIXEL_8x8 )
251 return x264_rd_cost_subpart( h, i_lambda2, i4, i_pixel );
253 h->mb.i_cbp_luma = 0;
255 x264_macroblock_encode_p8x8( h, i8 );
256 if( i_pixel == PIXEL_16x8 )
257 x264_macroblock_encode_p8x8( h, i8+1 );
258 if( i_pixel == PIXEL_8x16 )
259 x264_macroblock_encode_p8x8( h, i8+2 );
261 chromassd = ssd_plane( h, i_pixel+3, 1, (i8&1)*4, (i8>>1)*4 )
262 + ssd_plane( h, i_pixel+3, 2, (i8&1)*4, (i8>>1)*4 );
263 chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
264 i_ssd = ssd_plane( h, i_pixel, 0, (i8&1)*8, (i8>>1)*8 ) + chromassd;
266 if( h->param.b_cabac )
268 x264_cabac_t cabac_tmp;
270 x264_partition_size_cabac( h, &cabac_tmp, i8, i_pixel );
271 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
274 i_bits = x264_partition_size_cavlc( h, i8, i_pixel ) * i_lambda2;
276 return (i_ssd<<8) + i_bits;
279 static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode )
281 uint64_t i_ssd, i_bits;
282 h->mb.i_cbp_luma &= ~(1<<i8);
283 h->mb.b_transform_8x8 = 1;
285 x264_mb_encode_i8x8( h, i8, h->mb.i_qp );
286 i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
288 if( h->param.b_cabac )
290 x264_cabac_t cabac_tmp;
291 x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_8x8, 1 );
292 x264_partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
293 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
296 i_bits = x264_partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2;
298 return (i_ssd<<8) + i_bits;
301 static uint64_t x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
303 uint64_t i_ssd, i_bits;
305 x264_mb_encode_i4x4( h, i4, h->mb.i_qp );
306 i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
308 if( h->param.b_cabac )
310 x264_cabac_t cabac_tmp;
311 x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_4x4, 1 );
312 x264_partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
313 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
316 i_bits = x264_partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2;
318 return (i_ssd<<8) + i_bits;
321 static uint64_t x264_rd_cost_i8x8_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
323 uint64_t i_ssd, i_bits;
326 x264_mb_encode_8x8_chroma( h, 0, h->mb.i_chroma_qp );
327 i_ssd = ssd_plane( h, PIXEL_8x8, 1, 0, 0 ) +
328 ssd_plane( h, PIXEL_8x8, 2, 0, 0 );
330 h->mb.i_chroma_pred_mode = i_mode;
332 if( h->param.b_cabac )
334 x264_cabac_t cabac_tmp;
336 x264_i8x8_chroma_size_cabac( h, &cabac_tmp );
337 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
340 i_bits = x264_i8x8_chroma_size_cavlc( h ) * i_lambda2;
342 return (i_ssd<<8) + i_bits;
344 /****************************************************************************
345 * Trellis RD quantization
346 ****************************************************************************/
348 #define TRELLIS_SCORE_MAX ((uint64_t)1<<50)
349 #define CABAC_SIZE_BITS 8
350 #define SSD_WEIGHT_BITS 5
351 #define LAMBDA_BITS 4
353 /* precalculate the cost of coding various combinations of bits in a single context */
354 void x264_rdo_init( void )
356 for( int i_prefix = 0; i_prefix < 15; i_prefix++ )
358 for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
363 for( int i = 1; i < i_prefix; i++ )
364 f8_bits += x264_cabac_size_decision2( &ctx, 1 );
365 if( i_prefix > 0 && i_prefix < 14 )
366 f8_bits += x264_cabac_size_decision2( &ctx, 0 );
367 f8_bits += 1 << CABAC_SIZE_BITS; //sign
369 cabac_size_unary[i_prefix][i_ctx] = f8_bits;
370 cabac_transition_unary[i_prefix][i_ctx] = ctx;
373 for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
378 for( int i = 0; i < 5; i++ )
379 f8_bits += x264_cabac_size_decision2( &ctx, 1 );
380 f8_bits += 1 << CABAC_SIZE_BITS; //sign
382 cabac_size_5ones[i_ctx] = f8_bits;
383 cabac_transition_5ones[i_ctx] = ctx;
389 int level_idx; // index into level_tree[]
390 uint8_t cabac_state[10]; //just the contexts relevant to coding abs_level_m1
394 // save cabac state between blocks?
395 // use trellis' RD score instead of x264_mb_decimate_score?
396 // code 8x8 sig/last flags forwards with deadzone and save the contexts at
398 // change weights when using CQMs?
400 // possible optimizations:
401 // make scores fit in 32bit
402 // save quantized coefs during rd, to avoid a duplicate trellis in the final encode
403 // if trellissing all MBRD modes, finish SSD calculation so we can skip all of
404 // the normal dequant/idct/ssd/cabac
406 // the unquant_mf here is not the same as dequant_mf:
407 // in normal operation (dct->quant->dequant->idct) the dct and idct are not
408 // normalized. quant/dequant absorb those scaling factors.
409 // in this function, we just do (quant->unquant) and want the output to be
410 // comparable to the input. so unquant is the direct inverse of quant,
411 // and uses the dct scaling factors, not the idct ones.
413 static ALWAYS_INLINE int quant_trellis_cabac( x264_t *h, dctcoef *dct,
414 const uint16_t *quant_mf, const int *unquant_mf,
415 const int *coef_weight, const uint8_t *zigzag,
416 int i_ctxBlockCat, int i_lambda2, int b_ac, int dc, int i_coefs, int idx )
418 int abs_coefs[64], signs[64];
419 trellis_node_t nodes[2][8];
420 trellis_node_t *nodes_cur = nodes[0];
421 trellis_node_t *nodes_prev = nodes[1];
422 trellis_node_t *bnode;
423 const int b_interlaced = h->mb.b_interlaced;
424 uint8_t *cabac_state_sig = &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
425 uint8_t *cabac_state_last = &h->cabac.state[ last_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
426 const int f = 1 << 15; // no deadzone
430 // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
431 // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
432 // but it takes more time to remove dead states than you gain in reduced memory.
436 } level_tree[64*8*2];
437 int i_levels_used = 1;
440 for( i = i_coefs-1; i >= b_ac; i-- )
441 if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
446 /* We only need to zero an empty 4x4 block. 8x8 can be
447 implicitly emptied via zero nnz, as can dc. */
448 if( i_coefs == 16 && !dc )
449 memset( dct, 0, 16 * sizeof(dctcoef) );
455 for( ; i >= b_ac; i-- )
457 int coef = dct[zigzag[i]];
458 abs_coefs[i] = abs(coef);
459 signs[i] = coef < 0 ? -1 : 1;
463 for( int j = 1; j < 8; j++ )
464 nodes_cur[j].score = TRELLIS_SCORE_MAX;
465 nodes_cur[0].score = 0;
466 nodes_cur[0].level_idx = 0;
467 level_tree[0].abs_level = 0;
468 level_tree[0].next = 0;
470 // coefs are processed in reverse order, because that's how the abs value is coded.
471 // last_coef and significant_coef flags are normally coded in forward order, but
472 // we have to reverse them to match the levels.
473 // in 4x4 blocks, last_coef and significant_coef use a separate context for each
474 // position, so the order doesn't matter, and we don't even have to update their contexts.
475 // in 8x8 blocks, some positions share contexts, so we'll just have to hope that
476 // cabac isn't too sensitive.
478 memcpy( nodes_cur[0].cabac_state, &h->cabac.state[ coeff_abs_level_m1_offset[i_ctxBlockCat] ], 10 );
480 for( i = i_last_nnz; i >= b_ac; i-- )
482 int i_coef = abs_coefs[i];
483 int q = ( f + i_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) ) >> 16;
484 int cost_sig[2], cost_last[2];
487 // skip 0s: this doesn't affect the output, but saves some unnecessary computation.
490 // no need to calculate ssd of 0s: it's the same in all nodes.
491 // no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
492 int sigindex = i_coefs == 64 ? significant_coeff_flag_offset_8x8[b_interlaced][i] : i;
493 const uint32_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )
494 * (uint64_t)i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
495 for( int j = 1; j < 8; j++ )
497 if( nodes_cur[j].score != TRELLIS_SCORE_MAX )
499 #define SET_LEVEL(n,l) \
500 level_tree[i_levels_used].abs_level = l; \
501 level_tree[i_levels_used].next = n.level_idx; \
502 n.level_idx = i_levels_used; \
505 SET_LEVEL( nodes_cur[j], 0 );
506 nodes_cur[j].score += cost_sig0;
512 XCHG( trellis_node_t*, nodes_cur, nodes_prev );
514 for( int j = 0; j < 8; j++ )
515 nodes_cur[j].score = TRELLIS_SCORE_MAX;
519 int sigindex = i_coefs == 64 ? significant_coeff_flag_offset_8x8[b_interlaced][i] : i;
520 int lastindex = i_coefs == 64 ? last_coeff_flag_offset_8x8[i] : i;
521 cost_sig[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );
522 cost_sig[1] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );
523 cost_last[0] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 );
524 cost_last[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 );
528 cost_sig[0] = cost_sig[1] = 0;
529 cost_last[0] = cost_last[1] = 0;
532 // there are a few cases where increasing the coeff magnitude helps,
533 // but it's only around .003 dB, and skipping them ~doubles the speed of trellis.
534 // could also try q-2: that sometimes helps, but also sometimes decimates blocks
535 // that are better left coded, especially at QP > 40.
536 for( int abs_level = q; abs_level >= q-1; abs_level-- )
538 int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);
539 int d = i_coef - unquant_abs_level;
541 /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
542 if( h->mb.i_psy_trellis && i && !dc && i_ctxBlockCat != DCT_CHROMA_AC )
544 int orig_coef = (i_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];
545 int predicted_coef = orig_coef - i_coef * signs[i];
546 int psy_value = h->mb.i_psy_trellis * abs(predicted_coef + unquant_abs_level * signs[i]);
547 int psy_weight = (i_coefs == 64) ? x264_dct8_weight_tab[zigzag[i]] : x264_dct4_weight_tab[zigzag[i]];
548 ssd = (int64_t)d*d * coef_weight[i] - psy_weight * psy_value;
551 /* FIXME: for i16x16 dc is this weight optimal? */
552 ssd = (int64_t)d*d * (dc?256:coef_weight[i]);
554 for( int j = 0; j < 8; j++ )
557 if( nodes_prev[j].score == TRELLIS_SCORE_MAX )
561 /* code the proposed level, and count how much entropy it would take */
562 if( abs_level || node_ctx )
564 unsigned f8_bits = cost_sig[ abs_level != 0 ];
567 const int i_prefix = X264_MIN( abs_level - 1, 14 );
568 f8_bits += cost_last[ node_ctx == 0 ];
569 f8_bits += x264_cabac_size_decision2( &n.cabac_state[coeff_abs_level1_ctx[node_ctx]], i_prefix > 0 );
572 uint8_t *ctx = &n.cabac_state[coeff_abs_levelgt1_ctx[node_ctx]];
573 f8_bits += cabac_size_unary[i_prefix][*ctx];
574 *ctx = cabac_transition_unary[i_prefix][*ctx];
575 if( abs_level >= 15 )
576 f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS;
577 node_ctx = coeff_abs_level_transition[1][node_ctx];
581 f8_bits += 1 << CABAC_SIZE_BITS;
582 node_ctx = coeff_abs_level_transition[0][node_ctx];
585 n.score += (uint64_t)f8_bits * i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
590 /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */
593 d = i_coef * signs[0] - ((unquant_abs_level * signs[0] + 8)&~15);
594 n.score += (int64_t)d*d * coef_weight[i];
597 /* save the node if it's better than any existing node with the same cabac ctx */
598 if( n.score < nodes_cur[node_ctx].score )
600 SET_LEVEL( n, abs_level );
601 nodes_cur[node_ctx] = n;
607 /* output levels from the best path through the trellis */
608 bnode = &nodes_cur[0];
609 for( int j = 1; j < 8; j++ )
610 if( nodes_cur[j].score < bnode->score )
611 bnode = &nodes_cur[j];
613 if( bnode == &nodes_cur[0] )
615 if( i_coefs == 16 && !dc )
616 memset( dct, 0, 16 * sizeof(dctcoef) );
620 int level = bnode->level_idx;
621 for( i = b_ac; level; i++ )
623 dct[zigzag[i]] = level_tree[level].abs_level * signs[i];
624 level = level_tree[level].next;
626 for( ; i < i_coefs; i++ )
632 const static uint8_t x264_zigzag_scan2[4] = {0,1,2,3};
634 int x264_quant_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
635 int i_qp, int i_ctxBlockCat, int b_intra, int b_chroma )
637 return quant_trellis_cabac( h, dct,
638 h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
639 NULL, i_ctxBlockCat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[h->mb.b_interlaced],
640 i_ctxBlockCat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, 1, i_ctxBlockCat==DCT_CHROMA_DC ? 4 : 16, 0 );
643 int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
644 int i_qp, int i_ctxBlockCat, int b_intra, int b_chroma, int idx )
646 int b_ac = (i_ctxBlockCat == DCT_LUMA_AC || i_ctxBlockCat == DCT_CHROMA_AC);
647 return quant_trellis_cabac( h, dct,
648 h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
649 x264_dct4_weight2_zigzag[h->mb.b_interlaced],
650 x264_zigzag_scan4[h->mb.b_interlaced],
651 i_ctxBlockCat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, 0, 16, idx );
654 int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
655 int i_qp, int b_intra, int idx )
657 return quant_trellis_cabac( h, dct,
658 h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
659 x264_dct8_weight2_zigzag[h->mb.b_interlaced],
660 x264_zigzag_scan8[h->mb.b_interlaced],
661 DCT_LUMA_8x8, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 64, idx );