1 /*****************************************************************************
2 * analyse.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003 x264 project
5 * $Id: analyse.c,v 1.1 2004/06/03 19:27:08 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 * Loren Merritt <lorenm@u.washington.edu>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
23 *****************************************************************************/
31 #include "common/common.h"
32 #include "macroblock.h"
34 #include "ratecontrol.h"
47 /* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */
48 DECLARE_ALIGNED_8( int mvc[32][5][2] );
52 int i_cost4x4[4]; /* cost per 8x8 partition */
53 x264_me_t me4x4[4][4];
56 int i_cost8x4[4]; /* cost per 8x8 partition */
57 x264_me_t me8x4[4][2];
60 int i_cost4x8[4]; /* cost per 8x8 partition */
61 x264_me_t me4x8[4][2];
71 } x264_mb_analysis_list_t;
75 /* conduct the analysis using this lamda and QP */
84 /* Take some shortcuts in intra search if intra is deemed unlikely */
90 int i_satd_i16x16_dir[7];
94 int i_satd_i8x8_dir[12][4];
101 int i_satd_i8x8chroma;
102 int i_satd_i8x8chroma_dir[4];
103 int i_predict8x8chroma;
105 /* II: Inter part P/B frame */
106 x264_mb_analysis_list_t l0;
107 x264_mb_analysis_list_t l1;
109 int i_cost16x16bi; /* used the same ref and mv as l0 and l1 (at least for now) */
110 int i_cost16x16direct;
112 int i_cost8x8direct[4];
121 int i_mb_partition16x8[2]; /* mb_partition_e */
122 int i_mb_partition8x16[2];
123 int i_mb_type16x8; /* mb_class_e */
126 int b_direct_available;
128 } x264_mb_analysis_t;
130 /* lambda = pow(2,qp/6-2) */
131 static const int i_qp0_cost_table[52] = {
132 1, 1, 1, 1, 1, 1, 1, 1, /* 0-7 */
133 1, 1, 1, 1, /* 8-11 */
134 1, 1, 1, 1, 2, 2, 2, 2, /* 12-19 */
135 3, 3, 3, 4, 4, 4, 5, 6, /* 20-27 */
136 6, 7, 8, 9,10,11,13,14, /* 28-35 */
137 16,18,20,23,25,29,32,36, /* 36-43 */
138 40,45,51,57,64,72,81,91 /* 44-51 */
141 /* lambda2 = pow(lambda,2) * .9 * 256 */
142 static const int i_qp0_cost2_table[52] = {
143 14, 18, 22, 28, 36, 45, 57, 72, /* 0 - 7 */
144 91, 115, 145, 182, 230, 290, 365, 460, /* 8 - 15 */
145 580, 731, 921, 1161, 1462, 1843, 2322, 2925, /* 16 - 23 */
146 3686, 4644, 5851, 7372, 9289, 11703, 14745, 18578, /* 24 - 31 */
147 23407, 29491, 37156, 46814, 58982, 74313, 93628, 117964, /* 32 - 39 */
148 148626, 187257, 235929, 297252, 374514, 471859, 594505, 749029, /* 40 - 47 */
149 943718, 1189010, 1498059, 1887436 /* 48 - 51 */
152 /* TODO: calculate CABAC costs */
153 static const int i_mb_b_cost_table[X264_MBTYPE_MAX] = {
154 9, 9, 9, 9, 0, 0, 0, 1, 3, 7, 7, 7, 3, 7, 7, 7, 5, 9, 0
156 static const int i_mb_b16x8_cost_table[17] = {
157 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 7, 5, 7, 9, 9, 9
159 static const int i_sub_mb_b_cost_table[13] = {
160 7, 5, 5, 3, 7, 5, 7, 3, 7, 7, 7, 5, 1
162 static const int i_sub_mb_p_cost_table[4] = {
166 static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a );
168 uint16_t *x264_cost_mv_fpel[52][4];
170 /* initialize an array of lambda*nbits for all possible mvs */
171 static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
173 static int16_t *p_cost_mv[52];
176 if( !p_cost_mv[a->i_qp] )
178 /* could be faster, but isn't called many times */
179 /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
180 p_cost_mv[a->i_qp] = x264_malloc( (4*4*2048 + 1) * sizeof(int16_t) );
181 p_cost_mv[a->i_qp] += 2*4*2048;
182 for( i = 0; i <= 2*4*2048; i++ )
184 p_cost_mv[a->i_qp][-i] =
185 p_cost_mv[a->i_qp][i] = a->i_lambda * bs_size_se( i );
188 a->p_cost_mv = p_cost_mv[a->i_qp];
190 /* FIXME is this useful for all me methods? */
191 if( h->param.analyse.i_me_method >= X264_ME_ESA && !x264_cost_mv_fpel[a->i_qp][0] )
195 x264_cost_mv_fpel[a->i_qp][j] = x264_malloc( (4*2048 + 1) * sizeof(int16_t) );
196 x264_cost_mv_fpel[a->i_qp][j] += 2*2048;
197 for( i = -2*2048; i < 2*2048; i++ )
198 x264_cost_mv_fpel[a->i_qp][j][i] = p_cost_mv[a->i_qp][i*4+j];
203 static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp )
205 /* conduct the analysis using this lamda and QP */
206 a->i_qp = h->mb.i_qp = i_qp;
207 h->mb.i_chroma_qp = i_chroma_qp_table[x264_clip3( i_qp + h->pps->i_chroma_qp_index_offset, 0, 51 )];
208 a->i_lambda = i_qp0_cost_table[i_qp];
209 a->i_lambda2 = i_qp0_cost2_table[i_qp];
210 a->b_mbrd = h->param.analyse.i_subpel_refine >= 6 &&
211 ( h->sh.i_type != SLICE_TYPE_B || h->param.analyse.b_bframe_rdo );
213 h->mb.i_me_method = h->param.analyse.i_me_method;
214 h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
215 h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P
216 && h->mb.i_subpel_refine >= 5;
217 h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->b_mbrd;
218 h->mb.b_transform_8x8 = 0;
219 h->mb.b_noise_reduction = 0;
225 a->i_satd_i8x8chroma = COST_MAX;
229 h->mb.b_lossless ? 0 :
231 !h->param.analyse.i_trellis && !h->param.analyse.i_noise_reduction;
233 /* II: Inter part P/B frame */
234 if( h->sh.i_type != SLICE_TYPE_I )
237 int i_fmv_range = 4 * h->param.analyse.i_mv_range;
238 // limit motion search to a slightly smaller range than the theoretical limit,
239 // since the search may go a few iterations past its given range
240 int i_fpel_border = 5; // umh unconditional radius
241 int i_spel_border = 8; // 1.5 for subpel_satd, 1.5 for subpel_rd, 2 for bime, round up
243 /* Calculate max allowed MV range */
244 #define CLIP_FMV(mv) x264_clip3( mv, -i_fmv_range, i_fmv_range-1 )
245 h->mb.mv_min[0] = 4*( -16*h->mb.i_mb_x - 24 );
246 h->mb.mv_max[0] = 4*( 16*( h->sps->i_mb_width - h->mb.i_mb_x - 1 ) + 24 );
247 h->mb.mv_min_spel[0] = CLIP_FMV( h->mb.mv_min[0] );
248 h->mb.mv_max_spel[0] = CLIP_FMV( h->mb.mv_max[0] );
249 h->mb.mv_min_fpel[0] = (h->mb.mv_min_spel[0]>>2) + i_fpel_border;
250 h->mb.mv_max_fpel[0] = (h->mb.mv_max_spel[0]>>2) - i_fpel_border;
251 if( h->mb.i_mb_x == 0)
253 int mb_y = h->mb.i_mb_y >> h->sh.b_mbaff;
254 int mb_height = h->sps->i_mb_height >> h->sh.b_mbaff;
255 int thread_mvy_range = i_fmv_range;
257 if( h->param.i_threads > 1 )
259 int pix_y = (h->mb.i_mb_y | h->mb.b_interlaced) * 16;
260 int thresh = pix_y + h->param.analyse.i_mv_range_thread;
261 for( i = (h->sh.i_type == SLICE_TYPE_B); i >= 0; i-- )
263 x264_frame_t **fref = i ? h->fref1 : h->fref0;
264 int i_ref = i ? h->i_ref1 : h->i_ref0;
265 for( j=0; j<i_ref; j++ )
267 x264_frame_cond_wait( fref[j], thresh );
268 thread_mvy_range = X264_MIN( thread_mvy_range, fref[j]->i_lines_completed - pix_y );
271 if( h->param.b_deterministic )
272 thread_mvy_range = h->param.analyse.i_mv_range_thread;
273 if( h->mb.b_interlaced )
274 thread_mvy_range >>= 1;
277 h->mb.mv_min[1] = 4*( -16*mb_y - 24 );
278 h->mb.mv_max[1] = 4*( 16*( mb_height - mb_y - 1 ) + 24 );
279 h->mb.mv_min_spel[1] = x264_clip3( h->mb.mv_min[1], X264_MAX(4*(-512+i_spel_border), -i_fmv_range), i_fmv_range );
280 h->mb.mv_max_spel[1] = CLIP_FMV( h->mb.mv_max[1] );
281 h->mb.mv_max_spel[1] = X264_MIN( h->mb.mv_max_spel[1], thread_mvy_range*4 );
282 h->mb.mv_min_fpel[1] = (h->mb.mv_min_spel[1]>>2) + i_fpel_border;
283 h->mb.mv_max_fpel[1] = (h->mb.mv_max_spel[1]>>2) - i_fpel_border;
289 a->l0.i_cost8x8 = COST_MAX;
291 for( i = 0; i < 4; i++ )
295 a->l0.i_cost4x8[i] = COST_MAX;
299 a->l0.i_cost8x16 = COST_MAX;
300 if( h->sh.i_type == SLICE_TYPE_B )
304 a->l1.i_cost8x8 = COST_MAX;
306 for( i = 0; i < 4; i++ )
311 a->i_cost8x8direct[i] = COST_MAX;
322 a->i_cost16x16direct =
325 a->i_cost8x16bi = COST_MAX;
328 /* Fast intra decision */
329 if( h->mb.i_mb_xy - h->sh.i_first_mb > 4 )
331 if( IS_INTRA( h->mb.i_mb_type_left )
332 || IS_INTRA( h->mb.i_mb_type_top )
333 || IS_INTRA( h->mb.i_mb_type_topleft )
334 || IS_INTRA( h->mb.i_mb_type_topright )
335 || (h->sh.i_type == SLICE_TYPE_P && IS_INTRA( h->fref0[0]->mb_type[h->mb.i_mb_xy] ))
336 || (h->mb.i_mb_xy - h->sh.i_first_mb < 3*(h->stat.frame.i_mb_count[I_4x4] + h->stat.frame.i_mb_count[I_8x8] + h->stat.frame.i_mb_count[I_16x16])) )
337 { /* intra is likely */ }
352 static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
354 if( i_neighbour & MB_TOPLEFT )
356 /* top and left available */
357 *mode++ = I_PRED_16x16_V;
358 *mode++ = I_PRED_16x16_H;
359 *mode++ = I_PRED_16x16_DC;
360 *mode++ = I_PRED_16x16_P;
363 else if( i_neighbour & MB_LEFT )
366 *mode++ = I_PRED_16x16_DC_LEFT;
367 *mode++ = I_PRED_16x16_H;
370 else if( i_neighbour & MB_TOP )
373 *mode++ = I_PRED_16x16_DC_TOP;
374 *mode++ = I_PRED_16x16_V;
380 *mode = I_PRED_16x16_DC_128;
386 static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
388 if( i_neighbour & MB_TOPLEFT )
390 /* top and left available */
391 *mode++ = I_PRED_CHROMA_V;
392 *mode++ = I_PRED_CHROMA_H;
393 *mode++ = I_PRED_CHROMA_DC;
394 *mode++ = I_PRED_CHROMA_P;
397 else if( i_neighbour & MB_LEFT )
400 *mode++ = I_PRED_CHROMA_DC_LEFT;
401 *mode++ = I_PRED_CHROMA_H;
404 else if( i_neighbour & MB_TOP )
407 *mode++ = I_PRED_CHROMA_DC_TOP;
408 *mode++ = I_PRED_CHROMA_V;
414 *mode = I_PRED_CHROMA_DC_128;
420 static void predict_4x4_mode_available( unsigned int i_neighbour,
421 int *mode, int *pi_count )
423 int b_l = i_neighbour & MB_LEFT;
424 int b_t = i_neighbour & MB_TOP;
429 *mode++ = I_PRED_4x4_DC;
430 *mode++ = I_PRED_4x4_H;
431 *mode++ = I_PRED_4x4_V;
432 *mode++ = I_PRED_4x4_DDL;
433 if( i_neighbour & MB_TOPLEFT )
435 *mode++ = I_PRED_4x4_DDR;
436 *mode++ = I_PRED_4x4_VR;
437 *mode++ = I_PRED_4x4_HD;
440 *mode++ = I_PRED_4x4_VL;
441 *mode++ = I_PRED_4x4_HU;
445 *mode++ = I_PRED_4x4_DC_LEFT;
446 *mode++ = I_PRED_4x4_H;
447 *mode++ = I_PRED_4x4_HU;
452 *mode++ = I_PRED_4x4_DC_TOP;
453 *mode++ = I_PRED_4x4_V;
454 *mode++ = I_PRED_4x4_DDL;
455 *mode++ = I_PRED_4x4_VL;
460 *mode++ = I_PRED_4x4_DC_128;
465 static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a )
472 uint8_t *p_dstc[2], *p_srcc[2];
474 if( a->i_satd_i8x8chroma < COST_MAX )
477 /* 8x8 prediction selection for chroma */
478 p_dstc[0] = h->mb.pic.p_fdec[1];
479 p_dstc[1] = h->mb.pic.p_fdec[2];
480 p_srcc[0] = h->mb.pic.p_fenc[1];
481 p_srcc[1] = h->mb.pic.p_fenc[2];
483 predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
484 a->i_satd_i8x8chroma = COST_MAX;
485 if( i_max == 4 && h->pixf.intra_satd_x3_8x8c && h->pixf.mbcmp[0] == h->pixf.satd[0] )
487 int satdu[4], satdv[4];
488 h->pixf.intra_satd_x3_8x8c( p_srcc[0], p_dstc[0], satdu );
489 h->pixf.intra_satd_x3_8x8c( p_srcc[1], p_dstc[1], satdv );
490 h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[0] );
491 h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[1] );
492 satdu[I_PRED_CHROMA_P] =
493 h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE, p_srcc[0], FENC_STRIDE );
494 satdv[I_PRED_CHROMA_P] =
495 h->pixf.mbcmp[PIXEL_8x8]( p_dstc[1], FDEC_STRIDE, p_srcc[1], FENC_STRIDE );
497 for( i=0; i<i_max; i++ )
499 int i_mode = predict_mode[i];
500 int i_satd = satdu[i_mode] + satdv[i_mode]
501 + a->i_lambda * bs_size_ue(i_mode);
503 a->i_satd_i8x8chroma_dir[i] = i_satd;
504 COPY2_IF_LT( a->i_satd_i8x8chroma, i_satd, a->i_predict8x8chroma, i_mode );
509 for( i=0; i<i_max; i++ )
512 int i_mode = predict_mode[i];
514 /* we do the prediction */
515 h->predict_8x8c[i_mode]( p_dstc[0] );
516 h->predict_8x8c[i_mode]( p_dstc[1] );
518 /* we calculate the cost */
519 i_satd = h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE,
520 p_srcc[0], FENC_STRIDE ) +
521 h->pixf.mbcmp[PIXEL_8x8]( p_dstc[1], FDEC_STRIDE,
522 p_srcc[1], FENC_STRIDE ) +
523 a->i_lambda * bs_size_ue( x264_mb_pred_mode8x8c_fix[i_mode] );
525 a->i_satd_i8x8chroma_dir[i] = i_satd;
526 COPY2_IF_LT( a->i_satd_i8x8chroma, i_satd, a->i_predict8x8chroma, i_mode );
530 h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
533 static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
535 const unsigned int flags = h->sh.i_type == SLICE_TYPE_I ? h->param.analyse.intra : h->param.analyse.inter;
536 uint8_t *p_src = h->mb.pic.p_fenc[0];
537 uint8_t *p_dst = h->mb.pic.p_fdec[0];
542 int b_merged_satd = h->pixf.intra_satd_x3_16x16 && h->pixf.mbcmp[0] == h->pixf.satd[0];
544 /*---------------- Try all mode and calculate their score ---------------*/
546 /* 16x16 prediction selection */
547 predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
549 if( b_merged_satd && i_max == 4 )
551 h->pixf.intra_satd_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir );
552 h->predict_16x16[I_PRED_16x16_P]( p_dst );
553 a->i_satd_i16x16_dir[I_PRED_16x16_P] =
554 h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
557 int cost = a->i_satd_i16x16_dir[i] += a->i_lambda * bs_size_ue(i);
558 COPY2_IF_LT( a->i_satd_i16x16, cost, a->i_predict16x16, i );
563 for( i = 0; i < i_max; i++ )
566 int i_mode = predict_mode[i];
567 h->predict_16x16[i_mode]( p_dst );
569 i_satd = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) +
570 a->i_lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] );
571 COPY2_IF_LT( a->i_satd_i16x16, i_satd, a->i_predict16x16, i_mode );
572 a->i_satd_i16x16_dir[i_mode] = i_satd;
576 if( h->sh.i_type == SLICE_TYPE_B )
577 /* cavlc mb type prefix */
578 a->i_satd_i16x16 += a->i_lambda * i_mb_b_cost_table[I_16x16];
579 if( a->b_fast_intra && a->i_satd_i16x16 > 2*i_satd_inter )
582 /* 8x8 prediction selection */
583 if( flags & X264_ANALYSE_I8x8 )
585 DECLARE_ALIGNED_16( uint8_t edge[33] );
586 x264_pixel_cmp_t sa8d = (*h->pixf.mbcmp == *h->pixf.sad) ? h->pixf.sad[PIXEL_8x8] : h->pixf.sa8d[PIXEL_8x8];
587 int i_satd_thresh = a->b_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
589 b_merged_satd = h->pixf.intra_sa8d_x3_8x8 && h->pixf.mbcmp[0] == h->pixf.satd[0];
591 // FIXME some bias like in i4x4?
592 if( h->sh.i_type == SLICE_TYPE_B )
593 i_cost += a->i_lambda * i_mb_b_cost_table[I_8x8];
595 for( idx = 0;; idx++ )
599 uint8_t *p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
600 uint8_t *p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
601 int i_best = COST_MAX;
602 int i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
604 predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max );
605 x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
607 if( b_merged_satd && i_max == 9 )
610 h->pixf.intra_sa8d_x3_8x8( p_src_by, edge, satd );
611 if( i_pred_mode < 3 )
612 satd[i_pred_mode] -= 3 * a->i_lambda;
613 for( i=2; i>=0; i-- )
615 int cost = a->i_satd_i8x8_dir[i][idx] = satd[i] + 4 * a->i_lambda;
616 COPY2_IF_LT( i_best, cost, a->i_predict8x8[idx], i );
623 for( ; i<i_max; i++ )
626 int i_mode = predict_mode[i];
628 h->predict_8x8[i_mode]( p_dst_by, edge );
630 i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE )
631 + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
633 COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode );
634 a->i_satd_i8x8_dir[i_mode][idx] = i_satd;
638 if( idx == 3 || i_cost > i_satd_thresh )
641 /* we need to encode this block now (for next ones) */
642 h->predict_8x8[a->i_predict8x8[idx]]( p_dst_by, edge );
643 x264_mb_encode_i8x8( h, idx, a->i_qp );
645 x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
650 a->i_satd_i8x8 = i_cost;
651 if( h->mb.i_skip_intra )
653 h->mc.copy[PIXEL_16x16]( h->mb.pic.i8x8_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
654 if( h->mb.i_skip_intra == 2 )
655 h->mc.memcpy_aligned( h->mb.pic.i8x8_dct_buf, h->dct.luma8x8, sizeof(h->mb.pic.i8x8_dct_buf) );
660 a->i_satd_i8x8 = COST_MAX;
661 i_cost = i_cost * 4/(idx+1);
663 if( X264_MIN(i_cost, a->i_satd_i16x16) > i_satd_inter*(5+a->b_mbrd)/4 )
667 /* 4x4 prediction selection */
668 if( flags & X264_ANALYSE_I4x4 )
671 int i_satd_thresh = X264_MIN3( i_satd_inter, a->i_satd_i16x16, a->i_satd_i8x8 );
672 b_merged_satd = h->pixf.intra_satd_x3_4x4 && h->pixf.mbcmp[0] == h->pixf.satd[0];
674 i_satd_thresh = i_satd_thresh * (10-a->b_fast_intra)/8;
676 i_cost = a->i_lambda * 24; /* from JVT (SATD0) */
677 if( h->sh.i_type == SLICE_TYPE_B )
678 i_cost += a->i_lambda * i_mb_b_cost_table[I_4x4];
680 for( idx = 0;; idx++ )
682 int x = block_idx_x[idx];
683 int y = block_idx_y[idx];
684 uint8_t *p_src_by = p_src + 4*x + 4*y*FENC_STRIDE;
685 uint8_t *p_dst_by = p_dst + 4*x + 4*y*FDEC_STRIDE;
686 int i_best = COST_MAX;
687 int i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
689 predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max );
691 if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
692 /* emulate missing topright samples */
693 *(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
695 if( b_merged_satd && i_max >= 6 )
698 h->pixf.intra_satd_x3_4x4( p_src_by, p_dst_by, satd );
699 if( i_pred_mode < 3 )
700 satd[i_pred_mode] -= 3 * a->i_lambda;
701 for( i=2; i>=0; i-- )
702 COPY2_IF_LT( i_best, satd[i] + 4 * a->i_lambda,
703 a->i_predict4x4[idx], i );
709 for( ; i<i_max; i++ )
712 int i_mode = predict_mode[i];
714 h->predict_4x4[i_mode]( p_dst_by );
716 i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE,
717 p_src_by, FENC_STRIDE )
718 + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
720 COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode );
724 if( i_cost > i_satd_thresh || idx == 15 )
727 /* we need to encode this block now (for next ones) */
728 h->predict_4x4[a->i_predict4x4[idx]]( p_dst_by );
729 x264_mb_encode_i4x4( h, idx, a->i_qp );
731 h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
735 a->i_satd_i4x4 = i_cost;
736 if( h->mb.i_skip_intra )
738 h->mc.copy[PIXEL_16x16]( h->mb.pic.i4x4_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
739 if( h->mb.i_skip_intra == 2 )
740 h->mc.memcpy_aligned( h->mb.pic.i4x4_dct_buf, h->dct.luma4x4, sizeof(h->mb.pic.i4x4_dct_buf) );
744 a->i_satd_i4x4 = COST_MAX;
748 static void x264_intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh )
750 if( a->i_satd_i16x16 <= i_satd_thresh )
752 h->mb.i_type = I_16x16;
753 x264_analyse_update_cache( h, a );
754 a->i_satd_i16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
757 a->i_satd_i16x16 = COST_MAX;
759 if( a->i_satd_i4x4 <= i_satd_thresh && a->i_satd_i4x4 < COST_MAX )
761 h->mb.i_type = I_4x4;
762 x264_analyse_update_cache( h, a );
763 a->i_satd_i4x4 = x264_rd_cost_mb( h, a->i_lambda2 );
766 a->i_satd_i4x4 = COST_MAX;
768 if( a->i_satd_i8x8 <= i_satd_thresh && a->i_satd_i8x8 < COST_MAX )
770 h->mb.i_type = I_8x8;
771 x264_analyse_update_cache( h, a );
772 a->i_satd_i8x8 = x264_rd_cost_mb( h, a->i_lambda2 );
775 a->i_satd_i8x8 = COST_MAX;
778 static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a )
780 uint8_t *p_src = h->mb.pic.p_fenc[0];
781 uint8_t *p_dst = h->mb.pic.p_fdec[0];
784 int i_max, i_satd, i_best, i_mode, i_thresh;
787 h->mb.i_skip_intra = 0;
789 if( h->mb.i_type == I_16x16 )
791 int old_pred_mode = a->i_predict16x16;
792 i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8;
793 i_best = a->i_satd_i16x16;
794 predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
795 for( i = 0; i < i_max; i++ )
797 int i_mode = predict_mode[i];
798 if( i_mode == old_pred_mode || a->i_satd_i16x16_dir[i_mode] > i_thresh )
800 h->mb.i_intra16x16_pred_mode = i_mode;
801 i_satd = x264_rd_cost_mb( h, a->i_lambda2 );
802 COPY2_IF_LT( i_best, i_satd, a->i_predict16x16, i_mode );
805 else if( h->mb.i_type == I_4x4 )
807 uint32_t pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning
809 for( idx = 0; idx < 16; idx++ )
815 i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx );
816 x = block_idx_x[idx];
817 y = block_idx_y[idx];
819 p_src_by = p_src + 4*x + 4*y*FENC_STRIDE;
820 p_dst_by = p_dst + 4*x + 4*y*FDEC_STRIDE;
821 predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max );
823 if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
824 /* emulate missing topright samples */
825 *(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
827 for( i = 0; i < i_max; i++ )
829 i_mode = predict_mode[i];
830 h->predict_4x4[i_mode]( p_dst_by );
831 i_satd = x264_rd_cost_i4x4( h, a->i_lambda2, idx, i_mode );
833 if( i_best > i_satd )
835 a->i_predict4x4[idx] = i_mode;
837 pels[0] = *(uint32_t*)(p_dst_by+0*FDEC_STRIDE);
838 pels[1] = *(uint32_t*)(p_dst_by+1*FDEC_STRIDE);
839 pels[2] = *(uint32_t*)(p_dst_by+2*FDEC_STRIDE);
840 pels[3] = *(uint32_t*)(p_dst_by+3*FDEC_STRIDE);
841 i_nnz = h->mb.cache.non_zero_count[x264_scan8[idx]];
845 *(uint32_t*)(p_dst_by+0*FDEC_STRIDE) = pels[0];
846 *(uint32_t*)(p_dst_by+1*FDEC_STRIDE) = pels[1];
847 *(uint32_t*)(p_dst_by+2*FDEC_STRIDE) = pels[2];
848 *(uint32_t*)(p_dst_by+3*FDEC_STRIDE) = pels[3];
849 h->mb.cache.non_zero_count[x264_scan8[idx]] = i_nnz;
851 h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
854 else if( h->mb.i_type == I_8x8 )
856 DECLARE_ALIGNED_16( uint8_t edge[33] );
857 for( idx = 0; idx < 4; idx++ )
865 i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8;
868 i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx );
872 p_src_by = p_src + 8*x + 8*y*FENC_STRIDE;
873 p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE;
874 predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max );
875 x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS );
877 for( i = 0; i < i_max; i++ )
879 i_mode = predict_mode[i];
880 if( a->i_satd_i8x8_dir[i_mode][idx] > i_thresh )
882 h->predict_8x8[i_mode]( p_dst_by, edge );
883 i_satd = x264_rd_cost_i8x8( h, a->i_lambda2, idx, i_mode );
885 if( i_best > i_satd )
887 a->i_predict8x8[idx] = i_mode;
890 pels_h = *(uint64_t*)(p_dst_by+7*FDEC_STRIDE);
893 pels_v[j] = p_dst_by[7+j*FDEC_STRIDE];
895 i_nnz[j] = h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]];
899 *(uint64_t*)(p_dst_by+7*FDEC_STRIDE) = pels_h;
902 p_dst_by[7+j*FDEC_STRIDE] = pels_v[j];
904 h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]] = i_nnz[j];
906 x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
910 /* RD selection for chroma prediction */
911 predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
914 i_thresh = a->i_satd_i8x8chroma * 5/4;
916 for( i = j = 0; i < i_max; i++ )
917 if( a->i_satd_i8x8chroma_dir[i] < i_thresh &&
918 predict_mode[i] != a->i_predict8x8chroma )
920 predict_mode[j++] = predict_mode[i];
926 int i_chroma_lambda = i_qp0_cost2_table[h->mb.i_chroma_qp];
927 /* the previous thing encoded was x264_intra_rd(), so the pixels and
928 * coefs for the current chroma mode are still around, so we only
929 * have to recount the bits. */
930 i_best = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 );
931 for( i = 0; i < i_max; i++ )
933 i_mode = predict_mode[i];
934 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
935 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
936 /* if we've already found a mode that needs no residual, then
937 * probably any mode with a residual will be worse.
938 * so avoid dct on the remaining modes to improve speed. */
939 i_satd = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, i_mode, h->mb.i_cbp_chroma != 0x00 );
940 COPY2_IF_LT( i_best, i_satd, a->i_predict8x8chroma, i_mode );
942 h->mb.i_chroma_pred_mode = a->i_predict8x8chroma;
947 #define LOAD_FENC( m, src, xoff, yoff) \
948 (m)->i_stride[0] = h->mb.pic.i_stride[0]; \
949 (m)->i_stride[1] = h->mb.pic.i_stride[1]; \
950 (m)->p_fenc[0] = &(src)[0][(xoff)+(yoff)*FENC_STRIDE]; \
951 (m)->p_fenc[1] = &(src)[1][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE]; \
952 (m)->p_fenc[2] = &(src)[2][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE];
954 #define LOAD_HPELS(m, src, list, ref, xoff, yoff) \
955 (m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
956 (m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \
957 (m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \
958 (m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \
959 (m)->p_fref[4] = &(src)[4][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
960 (m)->p_fref[5] = &(src)[5][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
961 (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]];
963 #define REF_COST(list, ref) \
964 (a->i_lambda * bs_size_te( h->sh.i_num_ref_idx_l##list##_active - 1, ref ))
966 static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a )
970 int mvc[7][2], i_mvc;
971 int i_halfpel_thresh = INT_MAX;
972 int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
974 /* 16x16 Search on all ref frame */
975 m.i_pixel = PIXEL_16x16;
976 m.p_cost_mv = a->p_cost_mv;
977 LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
979 a->l0.me16x16.cost = INT_MAX;
980 for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ )
982 const int i_ref_cost = REF_COST( 0, i_ref );
983 i_halfpel_thresh -= i_ref_cost;
984 m.i_ref_cost = i_ref_cost;
987 /* search with ref */
988 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
989 x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
990 x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
991 x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
994 * SSD threshold would probably be better than SATD */
997 && m.cost-m.cost_mv < 300*a->i_lambda
998 && abs(m.mv[0]-h->mb.cache.pskip_mv[0])
999 + abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1
1000 && x264_macroblock_probe_pskip( h ) )
1002 h->mb.i_type = P_SKIP;
1003 x264_analyse_update_cache( h, a );
1004 assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
1008 m.cost += i_ref_cost;
1009 i_halfpel_thresh += i_ref_cost;
1011 if( m.cost < a->l0.me16x16.cost )
1014 /* save mv for predicting neighbors */
1015 a->l0.mvc[i_ref][0][0] =
1016 h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
1017 a->l0.mvc[i_ref][0][1] =
1018 h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
1021 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
1022 assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
1024 h->mb.i_type = P_L0;
1025 if( a->b_mbrd && a->l0.me16x16.i_ref == 0
1026 && a->l0.me16x16.mv[0] == h->mb.cache.pskip_mv[0]
1027 && a->l0.me16x16.mv[1] == h->mb.cache.pskip_mv[1] )
1029 h->mb.i_partition = D_16x16;
1030 x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] );
1031 a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
1035 static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a )
1039 uint8_t **p_fenc = h->mb.pic.p_fenc;
1040 int i_halfpel_thresh = INT_MAX;
1041 int *p_halfpel_thresh = /*h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : */NULL;
1043 int i_maxref = h->mb.pic.i_fref[0]-1;
1045 h->mb.i_partition = D_8x8;
1047 /* early termination: if 16x16 chose ref 0, then evalute no refs older
1048 * than those used by the neighbors */
1049 if( i_maxref > 0 && a->l0.me16x16.i_ref == 0 &&
1050 h->mb.i_mb_type_top && h->mb.i_mb_type_left )
1053 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 - 1 ] );
1054 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 0 ] );
1055 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 2 ] );
1056 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 - 8 + 4 ] );
1057 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 0 - 1 ] );
1058 i_maxref = X264_MAX( i_maxref, h->mb.cache.ref[0][ X264_SCAN8_0 + 2*8 - 1 ] );
1061 for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
1063 a->l0.mvc[i_ref][0][0] = h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0];
1064 a->l0.mvc[i_ref][0][1] = h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1];
1067 for( i = 0; i < 4; i++ )
1069 x264_me_t *l0m = &a->l0.me8x8[i];
1073 m.i_pixel = PIXEL_8x8;
1074 m.p_cost_mv = a->p_cost_mv;
1076 LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 );
1077 l0m->cost = INT_MAX;
1078 for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
1080 const int i_ref_cost = REF_COST( 0, i_ref );
1081 i_halfpel_thresh -= i_ref_cost;
1082 m.i_ref_cost = i_ref_cost;
1085 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
1086 x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
1087 x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
1088 x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh );
1090 m.cost += i_ref_cost;
1091 i_halfpel_thresh += i_ref_cost;
1092 *(uint64_t*)a->l0.mvc[i_ref][i+1] = *(uint64_t*)m.mv;
1094 if( m.cost < l0m->cost )
1097 x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv[0], l0m->mv[1] );
1098 x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
1101 l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
1104 a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
1105 a->l0.me8x8[2].cost + a->l0.me8x8[3].cost;
1106 h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
1107 h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
1110 static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a )
1112 const int i_ref = a->l0.me16x16.i_ref;
1113 const int i_ref_cost = REF_COST( 0, i_ref );
1114 uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref];
1115 uint8_t **p_fenc = h->mb.pic.p_fenc;
1117 int (*mvc)[2] = a->l0.mvc[i_ref];
1120 /* XXX Needed for x264_mb_predict_mv */
1121 h->mb.i_partition = D_8x8;
1124 *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.me16x16.mv;
1126 for( i = 0; i < 4; i++ )
1128 x264_me_t *m = &a->l0.me8x8[i];
1132 m->i_pixel = PIXEL_8x8;
1133 m->p_cost_mv = a->p_cost_mv;
1134 m->i_ref_cost = i_ref_cost;
1137 LOAD_FENC( m, p_fenc, 8*x8, 8*y8 );
1138 LOAD_HPELS( m, p_fref, 0, i_ref, 8*x8, 8*y8 );
1139 x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp );
1140 x264_me_search( h, m, mvc, i_mvc );
1142 x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, 0, m->mv[0], m->mv[1] );
1144 *(uint64_t*)mvc[i_mvc] = *(uint64_t*)m->mv;
1148 m->cost += i_ref_cost;
1149 m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
1152 /* theoretically this should include 4*ref_cost,
1153 * but 3 seems a better approximation of cabac. */
1154 a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
1155 a->l0.me8x8[2].cost + a->l0.me8x8[3].cost -
1156 REF_COST( 0, a->l0.me16x16.i_ref );
1157 h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
1158 h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
1161 static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a )
1164 uint8_t **p_fenc = h->mb.pic.p_fenc;
1165 DECLARE_ALIGNED_8( int mvc[3][2] );
1168 /* XXX Needed for x264_mb_predict_mv */
1169 h->mb.i_partition = D_16x8;
1171 for( i = 0; i < 2; i++ )
1173 x264_me_t *l0m = &a->l0.me16x8[i];
1174 const int ref8[2] = { a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref };
1175 const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
1177 m.i_pixel = PIXEL_16x8;
1178 m.p_cost_mv = a->p_cost_mv;
1180 LOAD_FENC( &m, p_fenc, 0, 8*i );
1181 l0m->cost = INT_MAX;
1182 for( j = 0; j < i_ref8s; j++ )
1184 const int i_ref = ref8[j];
1185 const int i_ref_cost = REF_COST( 0, i_ref );
1186 m.i_ref_cost = i_ref_cost;
1189 /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */
1190 *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0];
1191 *(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][2*i+1];
1192 *(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][2*i+2];
1194 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i );
1195 x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref );
1196 x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp );
1197 x264_me_search( h, &m, mvc, 3 );
1199 m.cost += i_ref_cost;
1201 if( m.cost < l0m->cost )
1204 x264_macroblock_cache_mv( h, 0, 2*i, 4, 2, 0, l0m->mv[0], l0m->mv[1] );
1205 x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref );
1208 a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost;
1211 static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a )
1214 uint8_t **p_fenc = h->mb.pic.p_fenc;
1215 DECLARE_ALIGNED_8( int mvc[3][2] );
1218 /* XXX Needed for x264_mb_predict_mv */
1219 h->mb.i_partition = D_8x16;
1221 for( i = 0; i < 2; i++ )
1223 x264_me_t *l0m = &a->l0.me8x16[i];
1224 const int ref8[2] = { a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref };
1225 const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
1227 m.i_pixel = PIXEL_8x16;
1228 m.p_cost_mv = a->p_cost_mv;
1230 LOAD_FENC( &m, p_fenc, 8*i, 0 );
1231 l0m->cost = INT_MAX;
1232 for( j = 0; j < i_ref8s; j++ )
1234 const int i_ref = ref8[j];
1235 const int i_ref_cost = REF_COST( 0, i_ref );
1236 m.i_ref_cost = i_ref_cost;
1239 *(uint64_t*)mvc[0] = *(uint64_t*)a->l0.mvc[i_ref][0];
1240 *(uint64_t*)mvc[1] = *(uint64_t*)a->l0.mvc[i_ref][i+1];
1241 *(uint64_t*)mvc[2] = *(uint64_t*)a->l0.mvc[i_ref][i+3];
1243 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 );
1244 x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref );
1245 x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
1246 x264_me_search( h, &m, mvc, 3 );
1248 m.cost += i_ref_cost;
1250 if( m.cost < l0m->cost )
1253 x264_macroblock_cache_mv( h, 2*i, 0, 2, 4, 0, l0m->mv[0], l0m->mv[1] );
1254 x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref );
1257 a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost;
1260 static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
1262 DECLARE_ALIGNED_8( uint8_t pix1[16*8] );
1263 uint8_t *pix2 = pix1+8;
1264 const int i_stride = h->mb.pic.i_stride[1];
1265 const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
1266 const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE;
1268 #define CHROMA4x4MC( width, height, me, x, y ) \
1269 h->mc.mc_chroma( &pix1[x+y*16], 16, &p_fref[4][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height ); \
1270 h->mc.mc_chroma( &pix2[x+y*16], 16, &p_fref[5][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height );
1272 if( pixel == PIXEL_4x4 )
1274 CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][0], 0,0 );
1275 CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][1], 2,0 );
1276 CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][2], 0,2 );
1277 CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][3], 2,2 );
1279 else if( pixel == PIXEL_8x4 )
1281 CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][0], 0,0 );
1282 CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][1], 0,2 );
1286 CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][0], 0,0 );
1287 CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][1], 2,0 );
1290 return h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][oe], FENC_STRIDE, pix1, 16 )
1291 + h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[2][oe], FENC_STRIDE, pix2, 16 );
1294 static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1296 uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1297 uint8_t **p_fenc = h->mb.pic.p_fenc;
1298 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1301 /* XXX Needed for x264_mb_predict_mv */
1302 h->mb.i_partition = D_8x8;
1304 for( i4x4 = 0; i4x4 < 4; i4x4++ )
1306 const int idx = 4*i8x8 + i4x4;
1307 const int x4 = block_idx_x[idx];
1308 const int y4 = block_idx_y[idx];
1309 const int i_mvc = (i4x4 == 0);
1311 x264_me_t *m = &a->l0.me4x4[i8x8][i4x4];
1313 m->i_pixel = PIXEL_4x4;
1314 m->p_cost_mv = a->p_cost_mv;
1316 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1317 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1319 x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
1320 x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc );
1322 x264_macroblock_cache_mv( h, x4, y4, 1, 1, 0, m->mv[0], m->mv[1] );
1324 a->l0.i_cost4x4[i8x8] = a->l0.me4x4[i8x8][0].cost +
1325 a->l0.me4x4[i8x8][1].cost +
1326 a->l0.me4x4[i8x8][2].cost +
1327 a->l0.me4x4[i8x8][3].cost +
1328 REF_COST( 0, i_ref ) +
1329 a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x4];
1330 if( h->mb.b_chroma_me )
1331 a->l0.i_cost4x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x4 );
1334 static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1336 uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1337 uint8_t **p_fenc = h->mb.pic.p_fenc;
1338 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1341 /* XXX Needed for x264_mb_predict_mv */
1342 h->mb.i_partition = D_8x8;
1344 for( i8x4 = 0; i8x4 < 2; i8x4++ )
1346 const int idx = 4*i8x8 + 2*i8x4;
1347 const int x4 = block_idx_x[idx];
1348 const int y4 = block_idx_y[idx];
1349 const int i_mvc = (i8x4 == 0);
1351 x264_me_t *m = &a->l0.me8x4[i8x8][i8x4];
1353 m->i_pixel = PIXEL_8x4;
1354 m->p_cost_mv = a->p_cost_mv;
1356 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1357 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1359 x264_mb_predict_mv( h, 0, idx, 2, m->mvp );
1360 x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
1362 x264_macroblock_cache_mv( h, x4, y4, 2, 1, 0, m->mv[0], m->mv[1] );
1364 a->l0.i_cost8x4[i8x8] = a->l0.me8x4[i8x8][0].cost + a->l0.me8x4[i8x8][1].cost +
1365 REF_COST( 0, i_ref ) +
1366 a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x4];
1367 if( h->mb.b_chroma_me )
1368 a->l0.i_cost8x4[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_8x4 );
1371 static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 )
1373 uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref];
1374 uint8_t **p_fenc = h->mb.pic.p_fenc;
1375 const int i_ref = a->l0.me8x8[i8x8].i_ref;
1378 /* XXX Needed for x264_mb_predict_mv */
1379 h->mb.i_partition = D_8x8;
1381 for( i4x8 = 0; i4x8 < 2; i4x8++ )
1383 const int idx = 4*i8x8 + i4x8;
1384 const int x4 = block_idx_x[idx];
1385 const int y4 = block_idx_y[idx];
1386 const int i_mvc = (i4x8 == 0);
1388 x264_me_t *m = &a->l0.me4x8[i8x8][i4x8];
1390 m->i_pixel = PIXEL_4x8;
1391 m->p_cost_mv = a->p_cost_mv;
1393 LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
1394 LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
1396 x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
1397 x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
1399 x264_macroblock_cache_mv( h, x4, y4, 1, 2, 0, m->mv[0], m->mv[1] );
1401 a->l0.i_cost4x8[i8x8] = a->l0.me4x8[i8x8][0].cost + a->l0.me4x8[i8x8][1].cost +
1402 REF_COST( 0, i_ref ) +
1403 a->i_lambda * i_sub_mb_p_cost_table[D_L0_4x8];
1404 if( h->mb.b_chroma_me )
1405 a->l0.i_cost4x8[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 );
1408 static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a )
1410 /* Assumes that fdec still contains the results of
1411 * x264_mb_predict_mv_direct16x16 and x264_mb_mc */
1413 uint8_t **p_fenc = h->mb.pic.p_fenc;
1414 uint8_t **p_fdec = h->mb.pic.p_fdec;
1417 a->i_cost16x16direct = a->i_lambda * i_mb_b_cost_table[B_DIRECT];
1418 for( i = 0; i < 4; i++ )
1420 const int x = (i&1)*8;
1421 const int y = (i>>1)*8;
1422 a->i_cost16x16direct +=
1423 a->i_cost8x8direct[i] =
1424 h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[0][x+y*FENC_STRIDE], FENC_STRIDE, &p_fdec[0][x+y*FDEC_STRIDE], FDEC_STRIDE );
1427 a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8];
1431 #define WEIGHTED_AVG( size, pix1, stride1, src2, stride2 ) \
1433 if( h->param.analyse.b_weighted_bipred ) \
1434 h->mc.avg_weight[size]( pix1, stride1, src2, stride2, \
1435 h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \
1437 h->mc.avg[size]( pix1, stride1, src2, stride2 ); \
1440 static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
1442 DECLARE_ALIGNED_16( uint8_t pix1[16*16] );
1443 DECLARE_ALIGNED_16( uint8_t pix2[16*16] );
1450 int mvc[8][2], i_mvc;
1451 int i_halfpel_thresh = INT_MAX;
1452 int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
1454 /* 16x16 Search on all ref frame */
1455 m.i_pixel = PIXEL_16x16;
1456 m.p_cost_mv = a->p_cost_mv;
1457 LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
1460 a->l0.me16x16.cost = INT_MAX;
1461 for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ )
1463 /* search with ref */
1464 LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
1465 x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
1466 x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
1467 x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
1470 m.cost += REF_COST( 0, i_ref );
1472 if( m.cost < a->l0.me16x16.cost )
1474 a->l0.i_ref = i_ref;
1478 /* save mv for predicting neighbors */
1479 h->mb.mvr[0][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
1480 h->mb.mvr[0][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
1482 /* subtract ref cost, so we don't have to add it for the other MB types */
1483 a->l0.me16x16.cost -= REF_COST( 0, a->l0.i_ref );
1486 i_halfpel_thresh = INT_MAX;
1487 p_halfpel_thresh = h->mb.pic.i_fref[1]>1 ? &i_halfpel_thresh : NULL;
1488 a->l1.me16x16.cost = INT_MAX;
1489 for( i_ref = 0; i_ref < h->mb.pic.i_fref[1]; i_ref++ )
1491 /* search with ref */
1492 LOAD_HPELS( &m, h->mb.pic.p_fref[1][i_ref], 1, i_ref, 0, 0 );
1493 x264_mb_predict_mv_16x16( h, 1, i_ref, m.mvp );
1494 x264_mb_predict_mv_ref16x16( h, 1, i_ref, mvc, &i_mvc );
1495 x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
1498 m.cost += REF_COST( 1, i_ref );
1500 if( m.cost < a->l1.me16x16.cost )
1502 a->l1.i_ref = i_ref;
1506 /* save mv for predicting neighbors */
1507 h->mb.mvr[1][i_ref][h->mb.i_mb_xy][0] = m.mv[0];
1508 h->mb.mvr[1][i_ref][h->mb.i_mb_xy][1] = m.mv[1];
1510 /* subtract ref cost, so we don't have to add it for the other MB types */
1511 a->l1.me16x16.cost -= REF_COST( 1, a->l1.i_ref );
1513 /* Set global ref, needed for other modes? */
1514 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
1515 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
1517 /* get cost of BI mode */
1518 weight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref];
1519 if ( ((a->l0.me16x16.mv[0] | a->l0.me16x16.mv[1]) & 1) == 0 )
1521 /* l0 reference is halfpel, so get_ref on it will make it faster */
1523 h->mc.get_ref( pix2, &stride2,
1524 h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
1525 a->l0.me16x16.mv[0], a->l0.me16x16.mv[1],
1527 h->mc.mc_luma( pix1, 16,
1528 h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
1529 a->l1.me16x16.mv[0], a->l1.me16x16.mv[1],
1531 weight = 64 - weight;
1535 /* if l0 was qpel, we'll use get_ref on l1 instead */
1536 h->mc.mc_luma( pix1, 16,
1537 h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
1538 a->l0.me16x16.mv[0], a->l0.me16x16.mv[1],
1541 h->mc.get_ref( pix2, &stride2,
1542 h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
1543 a->l1.me16x16.mv[0], a->l1.me16x16.mv[1],
1547 if( h->param.analyse.b_weighted_bipred )
1548 h->mc.avg_weight[PIXEL_16x16]( pix1, 16, src2, stride2, weight );
1550 h->mc.avg[PIXEL_16x16]( pix1, 16, src2, stride2 );
1552 a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix1, 16 )
1553 + REF_COST( 0, a->l0.i_ref )
1554 + REF_COST( 1, a->l1.i_ref )
1555 + a->l0.me16x16.cost_mv
1556 + a->l1.me16x16.cost_mv;
1559 a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI];
1560 a->l0.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L0_L0];
1561 a->l1.me16x16.cost += a->i_lambda * i_mb_b_cost_table[B_L1_L1];
1564 static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i )
1566 const int x = 2*(i%2);
1567 const int y = 2*(i/2);
1569 switch( h->mb.i_sub_partition[i] )
1572 x264_macroblock_cache_mv( h, x, y, 2, 2, 0, a->l0.me8x8[i].mv[0], a->l0.me8x8[i].mv[1] );
1575 x264_macroblock_cache_mv( h, x, y+0, 2, 1, 0, a->l0.me8x4[i][0].mv[0], a->l0.me8x4[i][0].mv[1] );
1576 x264_macroblock_cache_mv( h, x, y+1, 2, 1, 0, a->l0.me8x4[i][1].mv[0], a->l0.me8x4[i][1].mv[1] );
1579 x264_macroblock_cache_mv( h, x+0, y, 1, 2, 0, a->l0.me4x8[i][0].mv[0], a->l0.me4x8[i][0].mv[1] );
1580 x264_macroblock_cache_mv( h, x+1, y, 1, 2, 0, a->l0.me4x8[i][1].mv[0], a->l0.me4x8[i][1].mv[1] );
1583 x264_macroblock_cache_mv( h, x+0, y+0, 1, 1, 0, a->l0.me4x4[i][0].mv[0], a->l0.me4x4[i][0].mv[1] );
1584 x264_macroblock_cache_mv( h, x+1, y+0, 1, 1, 0, a->l0.me4x4[i][1].mv[0], a->l0.me4x4[i][1].mv[1] );
1585 x264_macroblock_cache_mv( h, x+0, y+1, 1, 1, 0, a->l0.me4x4[i][2].mv[0], a->l0.me4x4[i][2].mv[1] );
1586 x264_macroblock_cache_mv( h, x+1, y+1, 1, 1, 0, a->l0.me4x4[i][3].mv[0], a->l0.me4x4[i][3].mv[1] );
1589 x264_log( h, X264_LOG_ERROR, "internal error\n" );
1594 #define CACHE_MV_BI(x,y,dx,dy,me0,me1,part) \
1595 if( x264_mb_partition_listX_table[0][part] ) \
1597 x264_macroblock_cache_ref( h, x,y,dx,dy, 0, a->l0.i_ref ); \
1598 x264_macroblock_cache_mv( h, x,y,dx,dy, 0, me0.mv[0], me0.mv[1] ); \
1602 x264_macroblock_cache_ref( h, x,y,dx,dy, 0, -1 ); \
1603 x264_macroblock_cache_mv( h, x,y,dx,dy, 0, 0, 0 ); \
1605 x264_macroblock_cache_mvd( h, x,y,dx,dy, 0, 0, 0 ); \
1607 if( x264_mb_partition_listX_table[1][part] ) \
1609 x264_macroblock_cache_ref( h, x,y,dx,dy, 1, a->l1.i_ref ); \
1610 x264_macroblock_cache_mv( h, x,y,dx,dy, 1, me1.mv[0], me1.mv[1] ); \
1614 x264_macroblock_cache_ref( h, x,y,dx,dy, 1, -1 ); \
1615 x264_macroblock_cache_mv( h, x,y,dx,dy, 1, 0, 0 ); \
1617 x264_macroblock_cache_mvd( h, x,y,dx,dy, 1, 0, 0 ); \
1620 static inline void x264_mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
1624 if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 )
1626 x264_mb_load_mv_direct8x8( h, i );
1629 x264_macroblock_cache_mvd( h, x, y, 2, 2, 0, 0, 0 );
1630 x264_macroblock_cache_mvd( h, x, y, 2, 2, 1, 0, 0 );
1631 x264_macroblock_cache_skip( h, x, y, 2, 2, 1 );
1636 CACHE_MV_BI( x, y, 2, 2, a->l0.me8x8[i], a->l1.me8x8[i], h->mb.i_sub_partition[i] );
1639 static inline void x264_mb_cache_mv_b16x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
1641 CACHE_MV_BI( 0, 2*i, 4, 2, a->l0.me16x8[i], a->l1.me16x8[i], a->i_mb_partition16x8[i] );
1643 static inline void x264_mb_cache_mv_b8x16( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd )
1645 CACHE_MV_BI( 2*i, 0, 2, 4, a->l0.me8x16[i], a->l1.me8x16[i], a->i_mb_partition8x16[i] );
1649 static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a )
1651 uint8_t **p_fref[2] =
1652 { h->mb.pic.p_fref[0][a->l0.i_ref],
1653 h->mb.pic.p_fref[1][a->l1.i_ref] };
1654 DECLARE_ALIGNED_8( uint8_t pix[2][8*8] );
1657 /* XXX Needed for x264_mb_predict_mv */
1658 h->mb.i_partition = D_8x8;
1662 for( i = 0; i < 4; i++ )
1667 int i_part_cost_bi = 0;
1669 for( l = 0; l < 2; l++ )
1671 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
1672 x264_me_t *m = &lX->me8x8[i];
1674 m->i_pixel = PIXEL_8x8;
1675 m->p_cost_mv = a->p_cost_mv;
1677 LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 );
1678 LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*x8, 8*y8 );
1680 x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
1681 x264_me_search( h, m, &lX->me16x16.mv, 1 );
1683 x264_macroblock_cache_mv( h, 2*x8, 2*y8, 2, 2, l, m->mv[0], m->mv[1] );
1686 h->mc.mc_luma( pix[l], 8, m->p_fref, m->i_stride[0],
1687 m->mv[0], m->mv[1], 8, 8 );
1688 i_part_cost_bi += m->cost_mv;
1689 /* FIXME: ref cost */
1692 WEIGHTED_AVG( PIXEL_8x8, pix[0], 8, pix[1], 8 );
1693 i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 )
1694 + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8];
1695 a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
1696 a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
1698 i_part_cost = a->l0.me8x8[i].cost;
1699 h->mb.i_sub_partition[i] = D_L0_8x8;
1700 COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 );
1701 COPY2_IF_LT( i_part_cost, i_part_cost_bi, h->mb.i_sub_partition[i], D_BI_8x8 );
1702 COPY2_IF_LT( i_part_cost, a->i_cost8x8direct[i], h->mb.i_sub_partition[i], D_DIRECT_8x8 );
1703 a->i_cost8x8bi += i_part_cost;
1705 /* XXX Needed for x264_mb_predict_mv */
1706 x264_mb_cache_mv_b8x8( h, a, i, 0 );
1710 a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8];
1713 static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a )
1715 uint8_t **p_fref[2] =
1716 { h->mb.pic.p_fref[0][a->l0.i_ref],
1717 h->mb.pic.p_fref[1][a->l1.i_ref] };
1718 DECLARE_ALIGNED_16( uint8_t pix[2][16*8] );
1719 DECLARE_ALIGNED_8( int mvc[2][2] );
1722 h->mb.i_partition = D_16x8;
1723 a->i_cost16x8bi = 0;
1725 for( i = 0; i < 2; i++ )
1728 int i_part_cost_bi = 0;
1730 /* TODO: check only the list(s) that were used in b8x8? */
1731 for( l = 0; l < 2; l++ )
1733 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
1734 x264_me_t *m = &lX->me16x8[i];
1736 m->i_pixel = PIXEL_16x8;
1737 m->p_cost_mv = a->p_cost_mv;
1739 LOAD_FENC( m, h->mb.pic.p_fenc, 0, 8*i );
1740 LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 0, 8*i );
1742 *(uint64_t*)mvc[0] = *(uint64_t*)lX->me8x8[2*i].mv;
1743 *(uint64_t*)mvc[1] = *(uint64_t*)lX->me8x8[2*i+1].mv;
1745 x264_mb_predict_mv( h, l, 8*i, 2, m->mvp );
1746 x264_me_search( h, m, mvc, 2 );
1749 h->mc.mc_luma( pix[l], 16, m->p_fref, m->i_stride[0],
1750 m->mv[0], m->mv[1], 16, 8 );
1751 /* FIXME: ref cost */
1752 i_part_cost_bi += m->cost_mv;
1755 WEIGHTED_AVG( PIXEL_16x8, pix[0], 16, pix[1], 16 );
1756 i_part_cost_bi += h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 );
1758 i_part_cost = a->l0.me16x8[i].cost;
1759 a->i_mb_partition16x8[i] = D_L0_8x8; /* not actually 8x8, only the L0 matters */
1760 if( a->l1.me16x8[i].cost < i_part_cost )
1762 i_part_cost = a->l1.me16x8[i].cost;
1763 a->i_mb_partition16x8[i] = D_L1_8x8;
1765 if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
1767 i_part_cost = i_part_cost_bi;
1768 a->i_mb_partition16x8[i] = D_BI_8x8;
1770 a->i_cost16x8bi += i_part_cost;
1772 x264_mb_cache_mv_b16x8( h, a, i, 0 );
1776 a->i_mb_type16x8 = B_L0_L0
1777 + (a->i_mb_partition16x8[0]>>2) * 3
1778 + (a->i_mb_partition16x8[1]>>2);
1779 a->i_cost16x8bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type16x8];
1782 static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a )
1784 uint8_t **p_fref[2] =
1785 { h->mb.pic.p_fref[0][a->l0.i_ref],
1786 h->mb.pic.p_fref[1][a->l1.i_ref] };
1787 DECLARE_ALIGNED_8( uint8_t pix[2][8*16] );
1788 DECLARE_ALIGNED_8( int mvc[2][2] );
1791 h->mb.i_partition = D_8x16;
1792 a->i_cost8x16bi = 0;
1794 for( i = 0; i < 2; i++ )
1797 int i_part_cost_bi = 0;
1799 for( l = 0; l < 2; l++ )
1801 x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0;
1802 x264_me_t *m = &lX->me8x16[i];
1804 m->i_pixel = PIXEL_8x16;
1805 m->p_cost_mv = a->p_cost_mv;
1807 LOAD_FENC( m, h->mb.pic.p_fenc, 8*i, 0 );
1808 LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*i, 0 );
1810 *(uint64_t*)mvc[0] = *(uint64_t*)lX->me8x8[i].mv;
1811 *(uint64_t*)mvc[1] = *(uint64_t*)lX->me8x8[i+2].mv;
1813 x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
1814 x264_me_search( h, m, mvc, 2 );
1817 h->mc.mc_luma( pix[l], 8, m->p_fref, m->i_stride[0],
1818 m->mv[0], m->mv[1], 8, 16 );
1819 /* FIXME: ref cost */
1820 i_part_cost_bi += m->cost_mv;
1823 WEIGHTED_AVG( PIXEL_8x16, pix[0], 8, pix[1], 8 );
1824 i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 );
1826 i_part_cost = a->l0.me8x16[i].cost;
1827 a->i_mb_partition8x16[i] = D_L0_8x8;
1828 if( a->l1.me8x16[i].cost < i_part_cost )
1830 i_part_cost = a->l1.me8x16[i].cost;
1831 a->i_mb_partition8x16[i] = D_L1_8x8;
1833 if( i_part_cost_bi + a->i_lambda * 1 < i_part_cost )
1835 i_part_cost = i_part_cost_bi;
1836 a->i_mb_partition8x16[i] = D_BI_8x8;
1838 a->i_cost8x16bi += i_part_cost;
1840 x264_mb_cache_mv_b8x16( h, a, i, 0 );
1844 a->i_mb_type8x16 = B_L0_L0
1845 + (a->i_mb_partition8x16[0]>>2) * 3
1846 + (a->i_mb_partition8x16[1]>>2);
1847 a->i_cost8x16bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type8x16];
1850 static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd )
1852 int thresh = i_satd * 5/4;
1854 h->mb.i_type = P_L0;
1855 if( a->l0.i_rd16x16 == COST_MAX && a->l0.me16x16.cost <= i_satd * 3/2 )
1857 h->mb.i_partition = D_16x16;
1858 x264_analyse_update_cache( h, a );
1859 a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
1861 a->l0.me16x16.cost = a->l0.i_rd16x16;
1863 if( a->l0.i_cost16x8 <= thresh )
1865 h->mb.i_partition = D_16x8;
1866 x264_analyse_update_cache( h, a );
1867 a->l0.i_cost16x8 = x264_rd_cost_mb( h, a->i_lambda2 );
1870 a->l0.i_cost16x8 = COST_MAX;
1872 if( a->l0.i_cost8x16 <= thresh )
1874 h->mb.i_partition = D_8x16;
1875 x264_analyse_update_cache( h, a );
1876 a->l0.i_cost8x16 = x264_rd_cost_mb( h, a->i_lambda2 );
1879 a->l0.i_cost8x16 = COST_MAX;
1881 if( a->l0.i_cost8x8 <= thresh )
1883 h->mb.i_type = P_8x8;
1884 x264_analyse_update_cache( h, a );
1885 a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 );
1887 if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 )
1889 /* FIXME: RD per subpartition */
1893 for( i=0; i<4; i++ )
1895 part_bak[i] = h->mb.i_sub_partition[i];
1896 b_sub8x8 |= (part_bak[i] != D_L0_8x8);
1900 h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] =
1901 h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8;
1902 i_cost = x264_rd_cost_mb( h, a->i_lambda2 );
1903 if( a->l0.i_cost8x8 < i_cost )
1905 for( i=0; i<4; i++ )
1906 h->mb.i_sub_partition[i] = part_bak[i];
1909 a->l0.i_cost8x8 = i_cost;
1914 a->l0.i_cost8x8 = COST_MAX;
1917 static void x264_mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter )
1919 int thresh = i_satd_inter * 17/16;
1921 if( a->b_direct_available && a->i_rd16x16direct == COST_MAX )
1923 h->mb.i_type = B_DIRECT;
1924 x264_analyse_update_cache( h, a );
1925 a->i_rd16x16direct = x264_rd_cost_mb( h, a->i_lambda2 );
1928 //FIXME not all the update_cache calls are needed
1929 h->mb.i_partition = D_16x16;
1931 if( a->l0.me16x16.cost <= thresh && a->l0.i_rd16x16 == COST_MAX )
1933 h->mb.i_type = B_L0_L0;
1934 x264_analyse_update_cache( h, a );
1935 a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
1939 if( a->l1.me16x16.cost <= thresh && a->l1.i_rd16x16 == COST_MAX )
1941 h->mb.i_type = B_L1_L1;
1942 x264_analyse_update_cache( h, a );
1943 a->l1.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
1947 if( a->i_cost16x16bi <= thresh && a->i_rd16x16bi == COST_MAX )
1949 h->mb.i_type = B_BI_BI;
1950 x264_analyse_update_cache( h, a );
1951 a->i_rd16x16bi = x264_rd_cost_mb( h, a->i_lambda2 );
1955 if( a->i_cost8x8bi <= thresh && a->i_rd8x8bi == COST_MAX )
1957 h->mb.i_type = B_8x8;
1958 h->mb.i_partition = D_8x8;
1959 x264_analyse_update_cache( h, a );
1960 a->i_rd8x8bi = x264_rd_cost_mb( h, a->i_lambda2 );
1961 x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
1965 if( a->i_cost16x8bi <= thresh && a->i_rd16x8bi == COST_MAX )
1967 h->mb.i_type = a->i_mb_type16x8;
1968 h->mb.i_partition = D_16x8;
1969 x264_analyse_update_cache( h, a );
1970 a->i_rd16x8bi = x264_rd_cost_mb( h, a->i_lambda2 );
1974 if( a->i_cost8x16bi <= thresh && a->i_rd8x16bi == COST_MAX )
1976 h->mb.i_type = a->i_mb_type8x16;
1977 h->mb.i_partition = D_8x16;
1978 x264_analyse_update_cache( h, a );
1979 a->i_rd8x16bi = x264_rd_cost_mb( h, a->i_lambda2 );
1983 static void refine_bidir( x264_t *h, x264_mb_analysis_t *a )
1985 const int i_biweight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref];
1988 switch( h->mb.i_partition )
1991 if( h->mb.i_type == B_BI_BI )
1992 x264_me_refine_bidir( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
1995 for( i=0; i<2; i++ )
1996 if( a->i_mb_partition16x8[i] == D_BI_8x8 )
1997 x264_me_refine_bidir( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
2000 for( i=0; i<2; i++ )
2001 if( a->i_mb_partition8x16[i] == D_BI_8x8 )
2002 x264_me_refine_bidir( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
2005 for( i=0; i<4; i++ )
2006 if( h->mb.i_sub_partition[i] == D_BI_8x8 )
2007 x264_me_refine_bidir( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
2012 static inline void x264_mb_analyse_transform( x264_t *h )
2014 if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 )
2016 int i_cost4, i_cost8;
2017 /* FIXME only luma mc is needed */
2020 i_cost8 = h->pixf.sa8d[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE,
2021 h->mb.pic.p_fdec[0], FDEC_STRIDE );
2022 i_cost4 = h->pixf.satd[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE,
2023 h->mb.pic.p_fdec[0], FDEC_STRIDE );
2025 h->mb.b_transform_8x8 = i_cost8 < i_cost4;
2029 static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t *a, int *i_satd, int *i_rd )
2031 if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 )
2034 x264_analyse_update_cache( h, a );
2035 h->mb.b_transform_8x8 = !h->mb.b_transform_8x8;
2036 /* FIXME only luma is needed, but the score for comparison already includes chroma */
2037 i_rd8 = x264_rd_cost_mb( h, a->i_lambda2 );
2039 if( *i_rd >= i_rd8 )
2042 *i_satd = (int64_t)(*i_satd) * i_rd8 / *i_rd;
2043 /* prevent a rare division by zero in estimated intra cost */
2050 h->mb.b_transform_8x8 = !h->mb.b_transform_8x8;
2055 /*****************************************************************************
2056 * x264_macroblock_analyse:
2057 *****************************************************************************/
2058 void x264_macroblock_analyse( x264_t *h )
2060 x264_mb_analysis_t analysis;
2061 int i_cost = COST_MAX;
2065 x264_mb_analyse_init( h, &analysis, x264_ratecontrol_qp( h ) );
2067 /*--------------------------- Do the analysis ---------------------------*/
2068 if( h->sh.i_type == SLICE_TYPE_I )
2070 x264_mb_analyse_intra( h, &analysis, COST_MAX );
2071 if( analysis.b_mbrd )
2072 x264_intra_rd( h, &analysis, COST_MAX );
2074 i_cost = analysis.i_satd_i16x16;
2075 h->mb.i_type = I_16x16;
2076 if( analysis.i_satd_i4x4 < i_cost )
2078 i_cost = analysis.i_satd_i4x4;
2079 h->mb.i_type = I_4x4;
2081 if( analysis.i_satd_i8x8 < i_cost )
2082 h->mb.i_type = I_8x8;
2084 if( h->mb.i_subpel_refine >= 7 )
2085 x264_intra_rd_refine( h, &analysis );
2087 else if( h->sh.i_type == SLICE_TYPE_P )
2090 int i_intra_cost, i_intra_type;
2092 h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 0 );
2094 /* Fast P_SKIP detection */
2095 analysis.b_try_pskip = 0;
2096 if( h->param.analyse.b_fast_pskip )
2098 if( h->param.i_threads > 1 && h->mb.cache.pskip_mv[1] > h->mb.mv_max_spel[1] )
2099 // FIXME don't need to check this if the reference frame is done
2101 else if( h->param.analyse.i_subpel_refine >= 3 )
2102 analysis.b_try_pskip = 1;
2103 else if( h->mb.i_mb_type_left == P_SKIP ||
2104 h->mb.i_mb_type_top == P_SKIP ||
2105 h->mb.i_mb_type_topleft == P_SKIP ||
2106 h->mb.i_mb_type_topright == P_SKIP )
2107 b_skip = x264_macroblock_probe_pskip( h );
2110 h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 1 );
2114 h->mb.i_type = P_SKIP;
2115 h->mb.i_partition = D_16x16;
2116 assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 );
2120 const unsigned int flags = h->param.analyse.inter;
2124 int i_satd_inter, i_satd_intra;
2126 x264_mb_analyse_load_costs( h, &analysis );
2128 x264_mb_analyse_inter_p16x16( h, &analysis );
2130 if( h->mb.i_type == P_SKIP )
2133 if( flags & X264_ANALYSE_PSUB16x16 )
2135 if( h->param.analyse.b_mixed_references )
2136 x264_mb_analyse_inter_p8x8_mixed_ref( h, &analysis );
2138 x264_mb_analyse_inter_p8x8( h, &analysis );
2141 /* Select best inter mode */
2143 i_partition = D_16x16;
2144 i_cost = analysis.l0.me16x16.cost;
2146 if( ( flags & X264_ANALYSE_PSUB16x16 ) &&
2147 analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost )
2150 i_partition = D_8x8;
2151 i_cost = analysis.l0.i_cost8x8;
2154 if( flags & X264_ANALYSE_PSUB8x8 )
2156 for( i = 0; i < 4; i++ )
2158 x264_mb_analyse_inter_p4x4( h, &analysis, i );
2159 if( analysis.l0.i_cost4x4[i] < analysis.l0.me8x8[i].cost )
2161 int i_cost8x8 = analysis.l0.i_cost4x4[i];
2162 h->mb.i_sub_partition[i] = D_L0_4x4;
2164 x264_mb_analyse_inter_p8x4( h, &analysis, i );
2165 COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost8x4[i],
2166 h->mb.i_sub_partition[i], D_L0_8x4 );
2168 x264_mb_analyse_inter_p4x8( h, &analysis, i );
2169 COPY2_IF_LT( i_cost8x8, analysis.l0.i_cost4x8[i],
2170 h->mb.i_sub_partition[i], D_L0_4x8 );
2172 i_cost += i_cost8x8 - analysis.l0.me8x8[i].cost;
2174 x264_mb_cache_mv_p8x8( h, &analysis, i );
2176 analysis.l0.i_cost8x8 = i_cost;
2180 /* Now do 16x8/8x16 */
2181 i_thresh16x8 = analysis.l0.me8x8[1].cost_mv + analysis.l0.me8x8[2].cost_mv;
2182 if( ( flags & X264_ANALYSE_PSUB16x16 ) &&
2183 analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost + i_thresh16x8 )
2185 x264_mb_analyse_inter_p16x8( h, &analysis );
2186 COPY3_IF_LT( i_cost, analysis.l0.i_cost16x8, i_type, P_L0, i_partition, D_16x8 );
2188 x264_mb_analyse_inter_p8x16( h, &analysis );
2189 COPY3_IF_LT( i_cost, analysis.l0.i_cost8x16, i_type, P_L0, i_partition, D_8x16 );
2192 h->mb.i_partition = i_partition;
2195 //FIXME mb_type costs?
2196 if( analysis.b_mbrd )
2200 else if( i_partition == D_16x16 )
2202 x264_me_refine_qpel( h, &analysis.l0.me16x16 );
2203 i_cost = analysis.l0.me16x16.cost;
2205 else if( i_partition == D_16x8 )
2207 x264_me_refine_qpel( h, &analysis.l0.me16x8[0] );
2208 x264_me_refine_qpel( h, &analysis.l0.me16x8[1] );
2209 i_cost = analysis.l0.me16x8[0].cost + analysis.l0.me16x8[1].cost;
2211 else if( i_partition == D_8x16 )
2213 x264_me_refine_qpel( h, &analysis.l0.me8x16[0] );
2214 x264_me_refine_qpel( h, &analysis.l0.me8x16[1] );
2215 i_cost = analysis.l0.me8x16[0].cost + analysis.l0.me8x16[1].cost;
2217 else if( i_partition == D_8x8 )
2221 for( i8x8 = 0; i8x8 < 4; i8x8++ )
2223 switch( h->mb.i_sub_partition[i8x8] )
2226 x264_me_refine_qpel( h, &analysis.l0.me8x8[i8x8] );
2227 i_cost += analysis.l0.me8x8[i8x8].cost;
2230 x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][0] );
2231 x264_me_refine_qpel( h, &analysis.l0.me8x4[i8x8][1] );
2232 i_cost += analysis.l0.me8x4[i8x8][0].cost +
2233 analysis.l0.me8x4[i8x8][1].cost;
2236 x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][0] );
2237 x264_me_refine_qpel( h, &analysis.l0.me4x8[i8x8][1] );
2238 i_cost += analysis.l0.me4x8[i8x8][0].cost +
2239 analysis.l0.me4x8[i8x8][1].cost;
2243 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][0] );
2244 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][1] );
2245 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][2] );
2246 x264_me_refine_qpel( h, &analysis.l0.me4x4[i8x8][3] );
2247 i_cost += analysis.l0.me4x4[i8x8][0].cost +
2248 analysis.l0.me4x4[i8x8][1].cost +
2249 analysis.l0.me4x4[i8x8][2].cost +
2250 analysis.l0.me4x4[i8x8][3].cost;
2253 x264_log( h, X264_LOG_ERROR, "internal error (!8x8 && !4x4)\n" );
2259 if( h->mb.b_chroma_me )
2261 x264_mb_analyse_intra_chroma( h, &analysis );
2262 x264_mb_analyse_intra( h, &analysis, i_cost - analysis.i_satd_i8x8chroma );
2263 analysis.i_satd_i16x16 += analysis.i_satd_i8x8chroma;
2264 analysis.i_satd_i8x8 += analysis.i_satd_i8x8chroma;
2265 analysis.i_satd_i4x4 += analysis.i_satd_i8x8chroma;
2268 x264_mb_analyse_intra( h, &analysis, i_cost );
2270 i_satd_inter = i_cost;
2271 i_satd_intra = X264_MIN3( analysis.i_satd_i16x16,
2272 analysis.i_satd_i8x8,
2273 analysis.i_satd_i4x4 );
2275 if( analysis.b_mbrd )
2277 x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
2279 i_partition = D_16x16;
2280 i_cost = analysis.l0.me16x16.cost;
2281 COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
2282 COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
2283 COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
2284 h->mb.i_type = i_type;
2285 h->mb.i_partition = i_partition;
2286 if( i_cost < COST_MAX )
2287 x264_mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
2288 x264_intra_rd( h, &analysis, i_satd_inter * 5/4 );
2291 i_intra_type = I_16x16;
2292 i_intra_cost = analysis.i_satd_i16x16;
2293 COPY2_IF_LT( i_intra_cost, analysis.i_satd_i8x8, i_intra_type, I_8x8 );
2294 COPY2_IF_LT( i_intra_cost, analysis.i_satd_i4x4, i_intra_type, I_4x4 );
2295 COPY2_IF_LT( i_cost, i_intra_cost, i_type, i_intra_type );
2297 if( i_intra_cost == COST_MAX )
2298 i_intra_cost = i_cost * i_satd_intra / i_satd_inter + 1;
2300 h->mb.i_type = i_type;
2301 h->stat.frame.i_intra_cost += i_intra_cost;
2302 h->stat.frame.i_inter_cost += i_cost;
2303 h->stat.frame.i_mbs_analysed++;
2305 if( h->mb.i_subpel_refine >= 7 )
2307 if( IS_INTRA( h->mb.i_type ) )
2309 x264_intra_rd_refine( h, &analysis );
2311 else if( i_partition == D_16x16 )
2313 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
2314 x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0 );
2316 else if( i_partition == D_16x8 )
2318 x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, analysis.l0.me16x8[0].i_ref );
2319 x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, analysis.l0.me16x8[1].i_ref );
2320 x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0 );
2321 x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 2 );
2323 else if( i_partition == D_8x16 )
2325 x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, analysis.l0.me8x16[0].i_ref );
2326 x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, analysis.l0.me8x16[1].i_ref );
2327 x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0 );
2328 x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 1 );
2330 else if( i_partition == D_8x8 )
2333 x264_analyse_update_cache( h, &analysis );
2334 for( i8x8 = 0; i8x8 < 4; i8x8++ )
2335 if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 )
2336 x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8 );
2341 else if( h->sh.i_type == SLICE_TYPE_B )
2343 int i_bskip_cost = COST_MAX;
2346 h->mb.i_type = B_SKIP;
2347 if( h->mb.b_direct_auto_write )
2349 /* direct=auto heuristic: prefer whichever mode allows more Skip macroblocks */
2350 for( i = 0; i < 2; i++ )
2353 h->sh.b_direct_spatial_mv_pred ^= 1;
2354 analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, i && analysis.b_direct_available ? &b_changed : NULL );
2355 if( analysis.b_direct_available )
2360 b_skip = x264_macroblock_probe_bskip( h );
2362 h->stat.frame.i_direct_score[ h->sh.b_direct_spatial_mv_pred ] += b_skip;
2369 analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, NULL );
2371 if( analysis.b_direct_available )
2373 if( !h->mb.b_direct_auto_write )
2375 if( h->mb.b_lossless )
2377 /* chance of skip is too small to bother */
2379 else if( analysis.b_mbrd )
2381 i_bskip_cost = ssd_mb( h );
2383 /* 6 = minimum cavlc cost of a non-skipped MB */
2384 if( i_bskip_cost <= ((6 * analysis.i_lambda2 + 128) >> 8) )
2386 h->mb.i_type = B_SKIP;
2387 x264_analyse_update_cache( h, &analysis );
2391 else if( !h->mb.b_direct_auto_write )
2393 /* Conditioning the probe on neighboring block types
2394 * doesn't seem to help speed or quality. */
2395 b_skip = x264_macroblock_probe_bskip( h );
2401 const unsigned int flags = h->param.analyse.inter;
2405 x264_mb_analyse_load_costs( h, &analysis );
2407 /* select best inter mode */
2408 /* direct must be first */
2409 if( analysis.b_direct_available )
2410 x264_mb_analyse_inter_direct( h, &analysis );
2412 x264_mb_analyse_inter_b16x16( h, &analysis );
2415 i_partition = D_16x16;
2416 i_cost = analysis.l0.me16x16.cost;
2417 COPY2_IF_LT( i_cost, analysis.l1.me16x16.cost, i_type, B_L1_L1 );
2418 COPY2_IF_LT( i_cost, analysis.i_cost16x16bi, i_type, B_BI_BI );
2419 COPY2_IF_LT( i_cost, analysis.i_cost16x16direct, i_type, B_DIRECT );
2421 if( analysis.b_mbrd && analysis.i_cost16x16direct <= i_cost * 33/32 )
2423 x264_mb_analyse_b_rd( h, &analysis, i_cost );
2424 if( i_bskip_cost < analysis.i_rd16x16direct &&
2425 i_bskip_cost < analysis.i_rd16x16bi &&
2426 i_bskip_cost < analysis.l0.i_rd16x16 &&
2427 i_bskip_cost < analysis.l1.i_rd16x16 )
2429 h->mb.i_type = B_SKIP;
2430 x264_analyse_update_cache( h, &analysis );
2435 if( flags & X264_ANALYSE_BSUB16x16 )
2437 x264_mb_analyse_inter_b8x8( h, &analysis );
2438 if( analysis.i_cost8x8bi < i_cost )
2441 i_partition = D_8x8;
2442 i_cost = analysis.i_cost8x8bi;
2444 if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[1] ||
2445 h->mb.i_sub_partition[2] == h->mb.i_sub_partition[3] )
2447 x264_mb_analyse_inter_b16x8( h, &analysis );
2448 COPY3_IF_LT( i_cost, analysis.i_cost16x8bi,
2449 i_type, analysis.i_mb_type16x8,
2450 i_partition, D_16x8 );
2452 if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[2] ||
2453 h->mb.i_sub_partition[1] == h->mb.i_sub_partition[3] )
2455 x264_mb_analyse_inter_b8x16( h, &analysis );
2456 COPY3_IF_LT( i_cost, analysis.i_cost8x16bi,
2457 i_type, analysis.i_mb_type8x16,
2458 i_partition, D_8x16 );
2463 if( analysis.b_mbrd )
2468 else if( i_partition == D_16x16 )
2470 analysis.l0.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
2471 analysis.l1.me16x16.cost -= analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
2472 if( i_type == B_L0_L0 )
2474 x264_me_refine_qpel( h, &analysis.l0.me16x16 );
2475 i_cost = analysis.l0.me16x16.cost
2476 + analysis.i_lambda * i_mb_b_cost_table[B_L0_L0];
2478 else if( i_type == B_L1_L1 )
2480 x264_me_refine_qpel( h, &analysis.l1.me16x16 );
2481 i_cost = analysis.l1.me16x16.cost
2482 + analysis.i_lambda * i_mb_b_cost_table[B_L1_L1];
2484 else if( i_type == B_BI_BI )
2486 x264_me_refine_qpel( h, &analysis.l0.me16x16 );
2487 x264_me_refine_qpel( h, &analysis.l1.me16x16 );
2490 else if( i_partition == D_16x8 )
2492 for( i=0; i<2; i++ )
2494 if( analysis.i_mb_partition16x8[i] != D_L1_8x8 )
2495 x264_me_refine_qpel( h, &analysis.l0.me16x8[i] );
2496 if( analysis.i_mb_partition16x8[i] != D_L0_8x8 )
2497 x264_me_refine_qpel( h, &analysis.l1.me16x8[i] );
2500 else if( i_partition == D_8x16 )
2502 for( i=0; i<2; i++ )
2504 if( analysis.i_mb_partition8x16[i] != D_L1_8x8 )
2505 x264_me_refine_qpel( h, &analysis.l0.me8x16[i] );
2506 if( analysis.i_mb_partition8x16[i] != D_L0_8x8 )
2507 x264_me_refine_qpel( h, &analysis.l1.me8x16[i] );
2510 else if( i_partition == D_8x8 )
2512 for( i=0; i<4; i++ )
2515 int i_part_cost_old;
2517 int i_part_type = h->mb.i_sub_partition[i];
2518 int b_bidir = (i_part_type == D_BI_8x8);
2520 if( i_part_type == D_DIRECT_8x8 )
2522 if( x264_mb_partition_listX_table[0][i_part_type] )
2524 m = &analysis.l0.me8x8[i];
2525 i_part_cost_old = m->cost;
2526 i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L0_8x8];
2527 m->cost -= i_type_cost;
2528 x264_me_refine_qpel( h, m );
2530 analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
2532 if( x264_mb_partition_listX_table[1][i_part_type] )
2534 m = &analysis.l1.me8x8[i];
2535 i_part_cost_old = m->cost;
2536 i_type_cost = analysis.i_lambda * i_sub_mb_b_cost_table[D_L1_8x8];
2537 m->cost -= i_type_cost;
2538 x264_me_refine_qpel( h, m );
2540 analysis.i_cost8x8bi += m->cost + i_type_cost - i_part_cost_old;
2542 /* TODO: update mvp? */
2546 x264_mb_analyse_intra( h, &analysis, i_cost );
2548 if( analysis.b_mbrd )
2550 int i_satd_inter = i_cost;
2551 x264_mb_analyse_b_rd( h, &analysis, i_satd_inter );
2553 i_cost = i_bskip_cost;
2554 i_partition = D_16x16;
2555 COPY2_IF_LT( i_cost, analysis.l0.i_rd16x16, i_type, B_L0_L0 );
2556 COPY2_IF_LT( i_cost, analysis.l1.i_rd16x16, i_type, B_L1_L1 );
2557 COPY2_IF_LT( i_cost, analysis.i_rd16x16bi, i_type, B_BI_BI );
2558 COPY2_IF_LT( i_cost, analysis.i_rd16x16direct, i_type, B_DIRECT );
2559 COPY3_IF_LT( i_cost, analysis.i_rd16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 );
2560 COPY3_IF_LT( i_cost, analysis.i_rd8x16bi, i_type, analysis.i_mb_type8x16, i_partition, D_8x16 );
2561 COPY3_IF_LT( i_cost, analysis.i_rd8x8bi, i_type, B_8x8, i_partition, D_8x8 );
2563 h->mb.i_type = i_type;
2564 h->mb.i_partition = i_partition;
2565 x264_mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost );
2566 x264_intra_rd( h, &analysis, i_satd_inter * 17/16 );
2569 COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 );
2570 COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 );
2571 COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 );
2573 h->mb.i_type = i_type;
2574 h->mb.i_partition = i_partition;
2576 if( h->mb.i_subpel_refine >= 7 && IS_INTRA( i_type ) )
2577 x264_intra_rd_refine( h, &analysis );
2578 else if( h->param.analyse.b_bidir_me )
2579 refine_bidir( h, &analysis );
2583 x264_analyse_update_cache( h, &analysis );
2585 if( !analysis.b_mbrd )
2586 x264_mb_analyse_transform( h );
2588 h->mb.b_trellis = h->param.analyse.i_trellis;
2589 h->mb.b_noise_reduction = h->param.analyse.i_noise_reduction;
2590 if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction )
2591 h->mb.i_skip_intra = 0;
2594 /*-------------------- Update MB from the analysis ----------------------*/
2595 static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a )
2599 switch( h->mb.i_type )
2602 for( i = 0; i < 16; i++ )
2603 h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] = a->i_predict4x4[i];
2605 x264_mb_analyse_intra_chroma( h, a );
2608 for( i = 0; i < 4; i++ )
2609 x264_macroblock_cache_intra8x8_pred( h, 2*(i&1), 2*(i>>1), a->i_predict8x8[i] );
2611 x264_mb_analyse_intra_chroma( h, a );
2614 h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
2615 x264_mb_analyse_intra_chroma( h, a );
2619 switch( h->mb.i_partition )
2622 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
2623 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] );
2627 x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].i_ref );
2628 x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].i_ref );
2629 x264_macroblock_cache_mv ( h, 0, 0, 4, 2, 0, a->l0.me16x8[0].mv[0], a->l0.me16x8[0].mv[1] );
2630 x264_macroblock_cache_mv ( h, 0, 2, 4, 2, 0, a->l0.me16x8[1].mv[0], a->l0.me16x8[1].mv[1] );
2634 x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].i_ref );
2635 x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].i_ref );
2636 x264_macroblock_cache_mv ( h, 0, 0, 2, 4, 0, a->l0.me8x16[0].mv[0], a->l0.me8x16[0].mv[1] );
2637 x264_macroblock_cache_mv ( h, 2, 0, 2, 4, 0, a->l0.me8x16[1].mv[0], a->l0.me8x16[1].mv[1] );
2641 x264_log( h, X264_LOG_ERROR, "internal error P_L0 and partition=%d\n", h->mb.i_partition );
2647 x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref );
2648 x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref );
2649 x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref );
2650 x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref );
2651 for( i = 0; i < 4; i++ )
2652 x264_mb_cache_mv_p8x8( h, a, i );
2657 h->mb.i_partition = D_16x16;
2658 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
2659 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, h->mb.cache.pskip_mv[0],
2660 h->mb.cache.pskip_mv[1] );
2666 x264_mb_load_mv_direct8x8( h, 0 );
2667 x264_mb_load_mv_direct8x8( h, 1 );
2668 x264_mb_load_mv_direct8x8( h, 2 );
2669 x264_mb_load_mv_direct8x8( h, 3 );
2673 /* optimize: cache might not need to be rewritten */
2674 for( i = 0; i < 4; i++ )
2675 x264_mb_cache_mv_b8x8( h, a, i, 1 );
2678 default: /* the rest of the B types */
2679 switch( h->mb.i_partition )
2682 switch( h->mb.i_type )
2685 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
2686 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] );
2688 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, -1 );
2689 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 1, 0, 0 );
2690 x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 1, 0, 0 );
2693 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, -1 );
2694 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, 0, 0 );
2695 x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 0, 0, 0 );
2697 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
2698 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv[0], a->l1.me16x16.mv[1] );
2701 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref );
2702 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv[0], a->l0.me16x16.mv[1] );
2704 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref );
2705 x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv[0], a->l1.me16x16.mv[1] );
2710 x264_mb_cache_mv_b16x8( h, a, 0, 1 );
2711 x264_mb_cache_mv_b16x8( h, a, 1, 1 );
2714 x264_mb_cache_mv_b8x16( h, a, 0, 1 );
2715 x264_mb_cache_mv_b8x16( h, a, 1, 1 );
2718 x264_log( h, X264_LOG_ERROR, "internal error (invalid MB type)\n" );
2724 if( h->param.i_threads > 1 && !IS_INTRA(h->mb.i_type) )
2727 for( l=0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
2730 int ref = h->mb.cache.ref[l][x264_scan8[0]];
2733 completed = (l ? h->fref1 : h->fref0)[ ref >> h->mb.b_interlaced ]->i_lines_completed;
2734 if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - h->mb.b_interlaced)) + h->mb.i_mb_y*16 > completed )
2736 x264_log( h, X264_LOG_WARNING, "internal error (MV out of thread range)\n");
2737 fprintf(stderr, "mb type: %d \n", h->mb.i_type);
2738 fprintf(stderr, "mv: l%dr%d (%d,%d) \n", l, ref,
2739 h->mb.cache.mv[l][x264_scan8[15]][0],
2740 h->mb.cache.mv[l][x264_scan8[15]][1] );
2741 fprintf(stderr, "limit: %d \n", h->mb.mv_max_spel[1]);
2742 fprintf(stderr, "mb_xy: %d,%d \n", h->mb.i_mb_x, h->mb.i_mb_y);
2743 fprintf(stderr, "completed: %d \n", completed );
2744 x264_log( h, X264_LOG_WARNING, "recovering by using intra mode\n");
2745 x264_mb_analyse_intra( h, a, COST_MAX );
2746 h->mb.i_type = I_16x16;
2747 h->mb.i_intra16x16_pred_mode = a->i_predict16x16;
2748 x264_mb_analyse_intra_chroma( h, a );
2755 #include "slicetype.c"