X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=encoder%2Fanalyse.c;h=c1c9314a0a38d2958179e1a63e6aa5c19026a52d;hb=9dd6842dc649734219b1207481c6746bbc6e2198;hp=17efe03289afc4cd17d28efbd5a0c56a7f8581b0;hpb=d4e077867f79a555efb83e45d93dc6f170b1fb3e;p=x264 diff --git a/encoder/analyse.c b/encoder/analyse.c index 17efe032..c1c9314a 100644 --- a/encoder/analyse.c +++ b/encoder/analyse.c @@ -1,11 +1,11 @@ /***************************************************************************** * analyse.c: h264 encoder library ***************************************************************************** - * Copyright (C) 2003 x264 project - * $Id: analyse.c,v 1.1 2004/06/03 19:27:08 fenrir Exp $ + * Copyright (C) 2003-2008 x264 project * * Authors: Laurent Aimar * Loren Merritt + * Fiona Glaser * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,16 +19,17 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ +#define _ISOC99_SOURCE #include -#include #ifndef _MSC_VER #include #endif #include "common/common.h" +#include "common/cpu.h" #include "macroblock.h" #include "me.h" #include "ratecontrol.h" @@ -45,7 +46,7 @@ typedef struct /* 8x8 */ int i_cost8x8; /* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */ - DECLARE_ALIGNED_4( int16_t mvc[32][5][2] ); + ALIGNED_4( int16_t mvc[32][5][2] ); x264_me_t me8x8[4]; /* Sub 4x4 */ @@ -76,8 +77,10 @@ typedef struct int i_lambda; int i_lambda2; int i_qp; - int16_t *p_cost_mv; - int b_mbrd; + uint16_t *p_cost_mv; + uint16_t *p_cost_ref0; + uint16_t *p_cost_ref1; + int i_mbrd; /* I: Intra part */ @@ -91,12 +94,15 @@ typedef struct int i_predict16x16; int i_satd_i8x8; + int i_cbp_i8x8_luma; int i_satd_i8x8_dir[12][4]; int i_predict8x8[4]; int i_satd_i4x4; int i_predict4x4[16]; + int i_satd_pcm; + /* Chroma part */ int i_satd_i8x8chroma; int i_satd_i8x8chroma_dir[4]; @@ -128,7 +134,7 @@ typedef struct } x264_mb_analysis_t; /* lambda = pow(2,qp/6-2) */ -static const int i_qp0_cost_table[52] = { +const int x264_lambda_tab[52] = { 1, 1, 1, 1, 1, 1, 1, 1, /* 0-7 */ 1, 1, 1, 1, /* 8-11 */ 1, 1, 1, 1, 2, 2, 2, 2, /* 12-19 */ @@ -139,7 +145,7 @@ static const int i_qp0_cost_table[52] = { }; /* lambda2 = pow(lambda,2) * .9 * 256 */ -static const int i_qp0_cost2_table[52] = { +const int x264_lambda2_tab[52] = { 14, 18, 22, 28, 36, 45, 57, 72, /* 0 - 7 */ 91, 115, 145, 182, 230, 290, 365, 460, /* 8 - 15 */ 580, 731, 921, 1161, 1462, 1843, 2322, 2925, /* 16 - 23 */ @@ -149,6 +155,72 @@ static const int i_qp0_cost2_table[52] = { 943718, 1189010, 1498059, 1887436 /* 48 - 51 */ }; +const uint8_t x264_exp2_lut[64] = { + 1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 44, 47, + 50, 53, 57, 60, 64, 67, 71, 74, 78, 81, 85, 89, 93, 96, 100, 104, + 108, 112, 116, 120, 124, 128, 132, 137, 141, 145, 150, 154, 159, 163, 168, 172, + 177, 182, 186, 191, 196, 201, 206, 211, 216, 221, 226, 232, 237, 242, 248, 253, +}; + +const float x264_log2_lut[128] = { + 0.00000, 0.01123, 0.02237, 0.03342, 0.04439, 0.05528, 0.06609, 0.07682, + 0.08746, 0.09803, 0.10852, 0.11894, 0.12928, 0.13955, 0.14975, 0.15987, + 0.16993, 0.17991, 0.18982, 0.19967, 0.20945, 0.21917, 0.22882, 0.23840, + 0.24793, 0.25739, 0.26679, 0.27612, 0.28540, 0.29462, 0.30378, 0.31288, + 0.32193, 0.33092, 0.33985, 0.34873, 0.35755, 0.36632, 0.37504, 0.38370, + 0.39232, 0.40088, 0.40939, 0.41785, 0.42626, 0.43463, 0.44294, 0.45121, + 0.45943, 0.46761, 0.47573, 0.48382, 0.49185, 0.49985, 0.50779, 0.51570, + 0.52356, 0.53138, 0.53916, 0.54689, 0.55459, 0.56224, 0.56986, 0.57743, + 0.58496, 0.59246, 0.59991, 0.60733, 0.61471, 0.62205, 0.62936, 0.63662, + 0.64386, 0.65105, 0.65821, 0.66534, 0.67243, 0.67948, 0.68650, 0.69349, + 0.70044, 0.70736, 0.71425, 0.72110, 0.72792, 0.73471, 0.74147, 0.74819, + 0.75489, 0.76155, 0.76818, 0.77479, 0.78136, 0.78790, 0.79442, 0.80090, + 0.80735, 0.81378, 0.82018, 0.82655, 0.83289, 0.83920, 0.84549, 0.85175, + 0.85798, 0.86419, 0.87036, 0.87652, 0.88264, 0.88874, 0.89482, 0.90087, + 0.90689, 0.91289, 0.91886, 0.92481, 0.93074, 0.93664, 0.94251, 0.94837, + 0.95420, 0.96000, 0.96578, 0.97154, 0.97728, 0.98299, 0.98868, 0.99435, +}; + +/* Avoid an int/float conversion. */ +const float x264_log2_lz_lut[32] = { + 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 +}; + +// should the intra and inter lambdas be different? +// I'm just matching the behaviour of deadzone quant. +static const int x264_trellis_lambda2_tab[2][52] = { + // inter lambda = .85 * .85 * 2**(qp/3. + 10 - LAMBDA_BITS) + { 46, 58, 73, 92, 117, 147, + 185, 233, 294, 370, 466, 587, + 740, 932, 1174, 1480, 1864, 2349, + 2959, 3728, 4697, 5918, 7457, 9395, + 11837, 14914, 18790, 23674, 29828, 37581, + 47349, 59656, 75163, 94699, 119313, 150326, + 189399, 238627, 300652, 378798, 477255, 601304, + 757596, 954511, 1202608, 1515192, 1909022, 2405217, + 3030384, 3818045, 4810435, 6060769 }, + // intra lambda = .65 * .65 * 2**(qp/3. + 10 - LAMBDA_BITS) + { 27, 34, 43, 54, 68, 86, + 108, 136, 172, 216, 273, 343, + 433, 545, 687, 865, 1090, 1374, + 1731, 2180, 2747, 3461, 4361, 5494, + 6922, 8721, 10988, 13844, 17442, 21976, + 27688, 34885, 43953, 55377, 69771, 87906, + 110755, 139543, 175813, 221511, 279087, 351627, + 443023, 558174, 703255, 886046, 1116348, 1406511, + 1772093, 2232697, 2813022, 3544186 } +}; + +static const uint16_t x264_chroma_lambda2_offset_tab[] = { + 16, 20, 25, 32, 40, 50, + 64, 80, 101, 128, 161, 203, + 256, 322, 406, 512, 645, 812, + 1024, 1290, 1625, 2048, 2580, 3250, + 4096, 5160, 6501, 8192, 10321, 13003, + 16384, 20642, 26007, 32768, 41285, 52015, + 65535 +}; + /* TODO: calculate CABAC costs */ static const int i_mb_b_cost_table[X264_MBTYPE_MAX] = { 9, 9, 9, 9, 0, 0, 0, 1, 3, 7, 7, 7, 3, 7, 7, 7, 5, 9, 0 @@ -165,56 +237,97 @@ static const int i_sub_mb_p_cost_table[4] = { static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ); -uint16_t *x264_cost_mv_fpel[52][4]; +static uint16_t x264_cost_ref[92][3][33]; +static x264_pthread_mutex_t cost_ref_mutex = X264_PTHREAD_MUTEX_INITIALIZER; -/* initialize an array of lambda*nbits for all possible mvs */ -static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a ) +int x264_analyse_init_costs( x264_t *h, int qp ) { - static int16_t *p_cost_mv[52]; int i, j; - - if( !p_cost_mv[a->i_qp] ) + int lambda = x264_lambda_tab[qp]; + if( h->cost_mv[lambda] ) + return 0; + /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */ + CHECKED_MALLOC( h->cost_mv[lambda], (4*4*2048 + 1) * sizeof(uint16_t) ); + h->cost_mv[lambda] += 2*4*2048; + for( i = 0; i <= 2*4*2048; i++ ) + { + h->cost_mv[lambda][-i] = + h->cost_mv[lambda][i] = lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f; + } + x264_pthread_mutex_lock( &cost_ref_mutex ); + for( i = 0; i < 3; i++ ) + for( j = 0; j < 33; j++ ) + x264_cost_ref[lambda][i][j] = i ? lambda * bs_size_te( i, j ) : 0; + x264_pthread_mutex_unlock( &cost_ref_mutex ); + if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[lambda][0] ) { - /* could be faster, but isn't called many times */ - /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */ - p_cost_mv[a->i_qp] = x264_malloc( (4*4*2048 + 1) * sizeof(int16_t) ); - p_cost_mv[a->i_qp] += 2*4*2048; - for( i = 0; i <= 2*4*2048; i++ ) + for( j=0; j<4; j++ ) { - p_cost_mv[a->i_qp][-i] = - p_cost_mv[a->i_qp][i] = a->i_lambda * bs_size_se( i ); + CHECKED_MALLOC( h->cost_mv_fpel[lambda][j], (4*2048 + 1) * sizeof(uint16_t) ); + h->cost_mv_fpel[lambda][j] += 2*2048; + for( i = -2*2048; i < 2*2048; i++ ) + h->cost_mv_fpel[lambda][j][i] = h->cost_mv[lambda][i*4+j]; } } - a->p_cost_mv = p_cost_mv[a->i_qp]; + return 0; +fail: + return -1; +} - /* FIXME is this useful for all me methods? */ - if( h->param.analyse.i_me_method >= X264_ME_ESA && !x264_cost_mv_fpel[a->i_qp][0] ) +void x264_analyse_free_costs( x264_t *h ) +{ + int i, j; + for( i = 0; i < 92; i++ ) { - for( j=0; j<4; j++ ) - { - x264_cost_mv_fpel[a->i_qp][j] = x264_malloc( (4*2048 + 1) * sizeof(int16_t) ); - x264_cost_mv_fpel[a->i_qp][j] += 2*2048; - for( i = -2*2048; i < 2*2048; i++ ) - x264_cost_mv_fpel[a->i_qp][j][i] = p_cost_mv[a->i_qp][i*4+j]; - } + if( h->cost_mv[i] ) + x264_free( h->cost_mv[i] - 2*4*2048 ); + if( h->cost_mv_fpel[i][0] ) + for( j = 0; j < 4; j++ ) + x264_free( h->cost_mv_fpel[i][j] - 2*2048 ); } } +/* initialize an array of lambda*nbits for all possible mvs */ +static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a ) +{ + a->p_cost_mv = h->cost_mv[a->i_lambda]; + a->p_cost_ref0 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)]; + a->p_cost_ref1 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)]; +} + static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) { + int i = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B); + + /* mbrd == 1 -> RD mode decision */ + /* mbrd == 2 -> RD refinement */ + /* mbrd == 3 -> QPRD */ + a->i_mbrd = (i>=6) + (i>=8) + (h->param.analyse.i_subpel_refine>=10); + /* conduct the analysis using this lamda and QP */ a->i_qp = h->mb.i_qp = i_qp; - h->mb.i_chroma_qp = i_chroma_qp_table[x264_clip3( i_qp + h->pps->i_chroma_qp_index_offset, 0, 51 )]; - a->i_lambda = i_qp0_cost_table[i_qp]; - a->i_lambda2 = i_qp0_cost2_table[i_qp]; - a->b_mbrd = h->param.analyse.i_subpel_refine >= 6 && - ( h->sh.i_type != SLICE_TYPE_B || h->param.analyse.b_bframe_rdo ); + h->mb.i_chroma_qp = h->chroma_qp_table[i_qp]; + + a->i_lambda = x264_lambda_tab[i_qp]; + a->i_lambda2 = x264_lambda2_tab[i_qp]; + + h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->i_mbrd; + if( h->param.analyse.i_trellis ) + { + h->mb.i_trellis_lambda2[0][0] = x264_trellis_lambda2_tab[0][h->mb.i_qp]; + h->mb.i_trellis_lambda2[0][1] = x264_trellis_lambda2_tab[1][h->mb.i_qp]; + h->mb.i_trellis_lambda2[1][0] = x264_trellis_lambda2_tab[0][h->mb.i_chroma_qp]; + h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][h->mb.i_chroma_qp]; + } + h->mb.i_psy_rd_lambda = a->i_lambda; + /* Adjusting chroma lambda based on QP offset hurts PSNR but improves visual quality. */ + h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256; h->mb.i_me_method = h->param.analyse.i_me_method; h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine; h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P && h->mb.i_subpel_refine >= 5; - h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->b_mbrd; + h->mb.b_transform_8x8 = 0; h->mb.b_noise_reduction = 0; @@ -224,10 +337,13 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) a->i_satd_i4x4 = a->i_satd_i8x8chroma = COST_MAX; + /* non-RD PCM decision is inaccurate (as is psy-rd), so don't do it */ + a->i_satd_pcm = !h->mb.i_psy_rd && a->i_mbrd ? ((uint64_t)X264_PCM_COST*a->i_lambda2 + 128) >> 8 : COST_MAX; + a->b_fast_intra = 0; h->mb.i_skip_intra = h->mb.b_lossless ? 0 : - a->b_mbrd ? 2 : + a->i_mbrd ? 2 : !h->param.analyse.i_trellis && !h->param.analyse.i_noise_reduction; /* II: Inter part P/B frame */ @@ -237,8 +353,7 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) int i_fmv_range = 4 * h->param.analyse.i_mv_range; // limit motion search to a slightly smaller range than the theoretical limit, // since the search may go a few iterations past its given range - int i_fpel_border = 5; // umh unconditional radius - int i_spel_border = 8; // 1.5 for subpel_satd, 1.5 for subpel_rd, 2 for bime, round up + int i_fpel_border = 6; // umh: 1 for diamond, 2 for octagon, 2 for hpel /* Calculate max allowed MV range */ #define CLIP_FMV(mv) x264_clip3( mv, -i_fmv_range, i_fmv_range-1 ) @@ -276,7 +391,7 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) h->mb.mv_min[1] = 4*( -16*mb_y - 24 ); h->mb.mv_max[1] = 4*( 16*( mb_height - mb_y - 1 ) + 24 ); - h->mb.mv_min_spel[1] = x264_clip3( h->mb.mv_min[1], X264_MAX(4*(-512+i_spel_border), -i_fmv_range), i_fmv_range ); + h->mb.mv_min_spel[1] = x264_clip3( h->mb.mv_min[1], -i_fmv_range, i_fmv_range ); h->mb.mv_max_spel[1] = CLIP_FMV( h->mb.mv_max[1] ); h->mb.mv_max_spel[1] = X264_MIN( h->mb.mv_max_spel[1], thread_mvy_range*4 ); h->mb.mv_min_fpel[1] = (h->mb.mv_min_spel[1]>>2) + i_fpel_border; @@ -352,23 +467,30 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) /* Max = 4 */ static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, int *pi_count ) { - if( i_neighbour & MB_TOPLEFT ) + int b_top = i_neighbour & MB_TOP; + int b_left = i_neighbour & MB_LEFT; + if( b_top && b_left ) { /* top and left available */ *mode++ = I_PRED_16x16_V; *mode++ = I_PRED_16x16_H; *mode++ = I_PRED_16x16_DC; - *mode++ = I_PRED_16x16_P; - *pi_count = 4; + *pi_count = 3; + if( i_neighbour & MB_TOPLEFT ) + { + /* top left available*/ + *mode++ = I_PRED_16x16_P; + *pi_count = 4; + } } - else if( i_neighbour & MB_LEFT ) + else if( b_left ) { /* left available*/ *mode++ = I_PRED_16x16_DC_LEFT; *mode++ = I_PRED_16x16_H; *pi_count = 2; } - else if( i_neighbour & MB_TOP ) + else if( b_top ) { /* top available*/ *mode++ = I_PRED_16x16_DC_TOP; @@ -386,23 +508,30 @@ static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, i /* Max = 4 */ static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mode, int *pi_count ) { - if( i_neighbour & MB_TOPLEFT ) + int b_top = i_neighbour & MB_TOP; + int b_left = i_neighbour & MB_LEFT; + if( b_top && b_left ) { /* top and left available */ *mode++ = I_PRED_CHROMA_V; *mode++ = I_PRED_CHROMA_H; *mode++ = I_PRED_CHROMA_DC; - *mode++ = I_PRED_CHROMA_P; - *pi_count = 4; + *pi_count = 3; + if( i_neighbour & MB_TOPLEFT ) + { + /* top left available */ + *mode++ = I_PRED_CHROMA_P; + *pi_count = 4; + } } - else if( i_neighbour & MB_LEFT ) + else if( b_left ) { /* left available*/ *mode++ = I_PRED_CHROMA_DC_LEFT; *mode++ = I_PRED_CHROMA_H; *pi_count = 2; } - else if( i_neighbour & MB_TOP ) + else if( b_top ) { /* top available*/ *mode++ = I_PRED_CHROMA_DC_TOP; @@ -421,10 +550,9 @@ static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mod static void predict_4x4_mode_available( unsigned int i_neighbour, int *mode, int *pi_count ) { - int b_l = i_neighbour & MB_LEFT; - int b_t = i_neighbour & MB_TOP; - - if( b_l && b_t ) + int b_top = i_neighbour & MB_TOP; + int b_left = i_neighbour & MB_LEFT; + if( b_top && b_left ) { *pi_count = 6; *mode++ = I_PRED_4x4_DC; @@ -441,14 +569,14 @@ static void predict_4x4_mode_available( unsigned int i_neighbour, *mode++ = I_PRED_4x4_VL; *mode++ = I_PRED_4x4_HU; } - else if( b_l ) + else if( b_left ) { *mode++ = I_PRED_4x4_DC_LEFT; *mode++ = I_PRED_4x4_H; *mode++ = I_PRED_4x4_HU; *pi_count = 3; } - else if( b_t ) + else if( b_top ) { *mode++ = I_PRED_4x4_DC_TOP; *mode++ = I_PRED_4x4_V; @@ -463,12 +591,65 @@ static void predict_4x4_mode_available( unsigned int i_neighbour, } } +/* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */ +static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct ) +{ + ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[8][8] ); + ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[4][4] ); + ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0}; + int i; + + if( do_both_dct || h->mb.b_transform_8x8 ) + { + h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], zero ); + for( i = 0; i < 4; i++ ) + h->zigzagf.scan_8x8( h->mb.pic.fenc_dct8[i], dct8x8[i] ); + } + if( do_both_dct || !h->mb.b_transform_8x8 ) + { + h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], zero ); + for( i = 0; i < 16; i++ ) + h->zigzagf.scan_4x4( h->mb.pic.fenc_dct4[i], dct4x4[i] ); + } +} + +/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */ +static inline void x264_mb_cache_fenc_satd( x264_t *h ) +{ + ALIGNED_16( static uint8_t zero[16] ) = {0}; + uint8_t *fenc; + int x, y, satd_sum = 0, sa8d_sum = 0; + if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis ) + x264_psy_trellis_init( h, h->param.analyse.b_transform_8x8 ); + if( !h->mb.i_psy_rd ) + return; + for( y = 0; y < 4; y++ ) + for( x = 0; x < 4; x++ ) + { + fenc = h->mb.pic.p_fenc[0]+x*4+y*4*FENC_STRIDE; + h->mb.pic.fenc_satd[y][x] = h->pixf.satd[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE ) + - (h->pixf.sad[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE )>>1); + satd_sum += h->mb.pic.fenc_satd[y][x]; + } + for( y = 0; y < 2; y++ ) + for( x = 0; x < 2; x++ ) + { + fenc = h->mb.pic.p_fenc[0]+x*8+y*8*FENC_STRIDE; + h->mb.pic.fenc_sa8d[y][x] = h->pixf.sa8d[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE ) + - (h->pixf.sad[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE )>>2); + sa8d_sum += h->mb.pic.fenc_sa8d[y][x]; + } + h->mb.pic.fenc_satd_sum = satd_sum; + h->mb.pic.fenc_sa8d_sum = sa8d_sum; +} + static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a ) { int i; int i_max; int predict_mode[4]; + int b_merged_satd = !!h->pixf.intra_mbcmp_x3_8x8c && !h->mb.b_lossless; uint8_t *p_dstc[2], *p_srcc[2]; @@ -483,18 +664,18 @@ static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a ) predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max ); a->i_satd_i8x8chroma = COST_MAX; - if( i_max == 4 && h->pixf.intra_satd_x3_8x8c && h->pixf.mbcmp[0] == h->pixf.satd[0] ) + if( i_max == 4 && b_merged_satd ) { int satdu[4], satdv[4]; - h->pixf.intra_satd_x3_8x8c( p_srcc[0], p_dstc[0], satdu ); - h->pixf.intra_satd_x3_8x8c( p_srcc[1], p_dstc[1], satdv ); + h->pixf.intra_mbcmp_x3_8x8c( p_srcc[0], p_dstc[0], satdu ); + h->pixf.intra_mbcmp_x3_8x8c( p_srcc[1], p_dstc[1], satdv ); h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[0] ); h->predict_8x8c[I_PRED_CHROMA_P]( p_dstc[1] ); satdu[I_PRED_CHROMA_P] = h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE, p_srcc[0], FENC_STRIDE ); satdv[I_PRED_CHROMA_P] = h->pixf.mbcmp[PIXEL_8x8]( p_dstc[1], FDEC_STRIDE, p_srcc[1], FENC_STRIDE ); - + for( i=0; ipredict_8x8c[i_mode]( p_dstc[0] ); - h->predict_8x8c[i_mode]( p_dstc[1] ); + if( h->mb.b_lossless ) + x264_predict_lossless_8x8_chroma( h, i_mode ); + else + { + h->predict_8x8c[i_mode]( p_dstc[0] ); + h->predict_8x8c[i_mode]( p_dstc[1] ); + } /* we calculate the cost */ i_satd = h->pixf.mbcmp[PIXEL_8x8]( p_dstc[0], FDEC_STRIDE, @@ -540,7 +726,7 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ int i, idx; int i_max; int predict_mode[9]; - int b_merged_satd = h->pixf.intra_satd_x3_16x16 && h->pixf.mbcmp[0] == h->pixf.satd[0]; + int b_merged_satd = !!h->pixf.intra_mbcmp_x3_16x16 && !h->mb.b_lossless; /*---------------- Try all mode and calculate their score ---------------*/ @@ -549,7 +735,7 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ if( b_merged_satd && i_max == 4 ) { - h->pixf.intra_satd_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir ); + h->pixf.intra_mbcmp_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir ); h->predict_16x16[I_PRED_16x16_P]( p_dst ); a->i_satd_i16x16_dir[I_PRED_16x16_P] = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ); @@ -565,7 +751,11 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ { int i_satd; int i_mode = predict_mode[i]; - h->predict_16x16[i_mode]( p_dst ); + + if( h->mb.b_lossless ) + x264_predict_lossless_16x16( h, i_mode ); + else + h->predict_16x16[i_mode]( p_dst ); i_satd = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) + a->i_lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] ); @@ -583,11 +773,12 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ /* 8x8 prediction selection */ if( flags & X264_ANALYSE_I8x8 ) { - DECLARE_ALIGNED_16( uint8_t edge[33] ); - x264_pixel_cmp_t sa8d = (*h->pixf.mbcmp == *h->pixf.sad) ? h->pixf.sad[PIXEL_8x8] : h->pixf.sa8d[PIXEL_8x8]; - int i_satd_thresh = a->b_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 ); + ALIGNED_ARRAY_16( uint8_t, edge,[33] ); + x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8]; + int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 ); int i_cost = 0; - b_merged_satd = h->pixf.intra_sa8d_x3_8x8 && h->pixf.mbcmp[0] == h->pixf.satd[0]; + h->mb.i_cbp_luma = 0; + b_merged_satd = h->pixf.intra_mbcmp_x3_8x8 && !h->mb.b_lossless; // FIXME some bias like in i4x4? if( h->sh.i_type == SLICE_TYPE_B ) @@ -603,14 +794,13 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ int i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx ); predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max ); - x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); + h->predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); if( b_merged_satd && i_max == 9 ) { - int satd[3]; - h->pixf.intra_sa8d_x3_8x8( p_src_by, edge, satd ); - if( i_pred_mode < 3 ) - satd[i_pred_mode] -= 3 * a->i_lambda; + int satd[9]; + h->pixf.intra_mbcmp_x3_8x8( p_src_by, edge, satd ); + satd[i_pred_mode] -= 3 * a->i_lambda; for( i=2; i>=0; i-- ) { int cost = a->i_satd_i8x8_dir[i][idx] = satd[i] + 4 * a->i_lambda; @@ -626,7 +816,10 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ int i_satd; int i_mode = predict_mode[i]; - h->predict_8x8[i_mode]( p_dst_by, edge ); + if( h->mb.b_lossless ) + x264_predict_lossless_8x8( h, p_dst_by, idx, i_mode, edge ); + else + h->predict_8x8[i_mode]( p_dst_by, edge ); i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ) + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4); @@ -652,16 +845,22 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ if( h->mb.i_skip_intra ) { h->mc.copy[PIXEL_16x16]( h->mb.pic.i8x8_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 ); + h->mb.pic.i8x8_nnz_buf[0] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]]; + h->mb.pic.i8x8_nnz_buf[1] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]]; + h->mb.pic.i8x8_nnz_buf[2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]]; + h->mb.pic.i8x8_nnz_buf[3] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]]; + h->mb.pic.i8x8_cbp = h->mb.i_cbp_luma; if( h->mb.i_skip_intra == 2 ) h->mc.memcpy_aligned( h->mb.pic.i8x8_dct_buf, h->dct.luma8x8, sizeof(h->mb.pic.i8x8_dct_buf) ); } } else { + static const uint16_t cost_div_fix8[3] = {1024,512,341}; a->i_satd_i8x8 = COST_MAX; - i_cost = i_cost * 4/(idx+1); + i_cost = (i_cost * cost_div_fix8[idx]) >> 8; } - if( X264_MIN(i_cost, a->i_satd_i16x16) > i_satd_inter*(5+a->b_mbrd)/4 ) + if( X264_MIN(i_cost, a->i_satd_i16x16) > i_satd_inter*(5+!!a->i_mbrd)/4 ) return; } @@ -670,8 +869,9 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ { int i_cost; int i_satd_thresh = X264_MIN3( i_satd_inter, a->i_satd_i16x16, a->i_satd_i8x8 ); - b_merged_satd = h->pixf.intra_satd_x3_4x4 && h->pixf.mbcmp[0] == h->pixf.satd[0]; - if( a->b_mbrd ) + h->mb.i_cbp_luma = 0; + b_merged_satd = h->pixf.intra_mbcmp_x3_4x4 && !h->mb.b_lossless; + if( a->i_mbrd ) i_satd_thresh = i_satd_thresh * (10-a->b_fast_intra)/8; i_cost = a->i_lambda * 24; /* from JVT (SATD0) */ @@ -680,10 +880,8 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ for( idx = 0;; idx++ ) { - int x = block_idx_x[idx]; - int y = block_idx_y[idx]; - uint8_t *p_src_by = p_src + 4*x + 4*y*FENC_STRIDE; - uint8_t *p_dst_by = p_dst + 4*x + 4*y*FDEC_STRIDE; + uint8_t *p_src_by = p_src + block_idx_xy_fenc[idx]; + uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx]; int i_best = COST_MAX; int i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx ); @@ -695,10 +893,9 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ if( b_merged_satd && i_max >= 6 ) { - int satd[3]; - h->pixf.intra_satd_x3_4x4( p_src_by, p_dst_by, satd ); - if( i_pred_mode < 3 ) - satd[i_pred_mode] -= 3 * a->i_lambda; + int satd[9]; + h->pixf.intra_mbcmp_x3_4x4( p_src_by, p_dst_by, satd ); + satd[i_pred_mode] -= 3 * a->i_lambda; for( i=2; i>=0; i-- ) COPY2_IF_LT( i_best, satd[i] + 4 * a->i_lambda, a->i_predict4x4[idx], i ); @@ -711,8 +908,10 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ { int i_satd; int i_mode = predict_mode[i]; - - h->predict_4x4[i_mode]( p_dst_by ); + if( h->mb.b_lossless ) + x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode ); + else + h->predict_4x4[i_mode]( p_dst_by ); i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ) @@ -737,6 +936,11 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ if( h->mb.i_skip_intra ) { h->mc.copy[PIXEL_16x16]( h->mb.pic.i4x4_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 ); + h->mb.pic.i4x4_nnz_buf[0] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]]; + h->mb.pic.i4x4_nnz_buf[1] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]]; + h->mb.pic.i4x4_nnz_buf[2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]]; + h->mb.pic.i4x4_nnz_buf[3] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]]; + h->mb.pic.i4x4_cbp = h->mb.i_cbp_luma; if( h->mb.i_skip_intra == 2 ) h->mc.memcpy_aligned( h->mb.pic.i4x4_dct_buf, h->dct.luma4x4, sizeof(h->mb.pic.i4x4_dct_buf) ); } @@ -771,6 +975,7 @@ static void x264_intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh ) h->mb.i_type = I_8x8; x264_analyse_update_cache( h, a ); a->i_satd_i8x8 = x264_rd_cost_mb( h, a->i_lambda2 ); + a->i_cbp_i8x8_luma = h->mb.i_cbp_luma; } else a->i_satd_i8x8 = COST_MAX; @@ -778,12 +983,11 @@ static void x264_intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh ) static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) { - uint8_t *p_src = h->mb.pic.p_fenc[0]; uint8_t *p_dst = h->mb.pic.p_fdec[0]; int i, j, idx, x, y; - int i_max, i_satd, i_best, i_mode, i_thresh; - int i_pred_mode; + int i_max, i_mode, i_thresh; + uint64_t i_satd, i_best; int predict_mode[9]; h->mb.i_skip_intra = 0; @@ -803,22 +1007,59 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) COPY2_IF_LT( i_best, i_satd, a->i_predict16x16, i_mode ); } } - else if( h->mb.i_type == I_4x4 ) + + /* RD selection for chroma prediction */ + predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max ); + if( i_max > 1 ) + { + i_thresh = a->i_satd_i8x8chroma * 5/4; + + for( i = j = 0; i < i_max; i++ ) + if( a->i_satd_i8x8chroma_dir[i] < i_thresh && + predict_mode[i] != a->i_predict8x8chroma ) + { + predict_mode[j++] = predict_mode[i]; + } + i_max = j; + + if( i_max > 0 ) + { + int i_cbp_chroma_best = h->mb.i_cbp_chroma; + int i_chroma_lambda = x264_lambda2_tab[h->mb.i_chroma_qp]; + /* the previous thing encoded was x264_intra_rd(), so the pixels and + * coefs for the current chroma mode are still around, so we only + * have to recount the bits. */ + i_best = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 ); + for( i = 0; i < i_max; i++ ) + { + i_mode = predict_mode[i]; + if( h->mb.b_lossless ) + x264_predict_lossless_8x8_chroma( h, i_mode ); + else + { + h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] ); + h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] ); + } + /* if we've already found a mode that needs no residual, then + * probably any mode with a residual will be worse. + * so avoid dct on the remaining modes to improve speed. */ + i_satd = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, i_mode, h->mb.i_cbp_chroma != 0x00 ); + COPY3_IF_LT( i_best, i_satd, a->i_predict8x8chroma, i_mode, i_cbp_chroma_best, h->mb.i_cbp_chroma ); + } + h->mb.i_chroma_pred_mode = a->i_predict8x8chroma; + h->mb.i_cbp_chroma = i_cbp_chroma_best; + } + } + + if( h->mb.i_type == I_4x4 ) { uint32_t pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning int i_nnz = 0; for( idx = 0; idx < 16; idx++ ) { - uint8_t *p_src_by; - uint8_t *p_dst_by; - i_best = COST_MAX; + uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx]; + i_best = COST_MAX64; - i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx ); - x = block_idx_x[idx]; - y = block_idx_y[idx]; - - p_src_by = p_src + 4*x + 4*y*FENC_STRIDE; - p_dst_by = p_dst + 4*x + 4*y*FDEC_STRIDE; predict_4x4_mode_available( h->mb.i_neighbour4[idx], predict_mode, &i_max ); if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP ) @@ -828,7 +1069,10 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) for( i = 0; i < i_max; i++ ) { i_mode = predict_mode[i]; - h->predict_4x4[i_mode]( p_dst_by ); + if( h->mb.b_lossless ) + x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode ); + else + h->predict_4x4[i_mode]( p_dst_by ); i_satd = x264_rd_cost_i4x4( h, a->i_lambda2, idx, i_mode ); if( i_best > i_satd ) @@ -854,95 +1098,62 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) } else if( h->mb.i_type == I_8x8 ) { - DECLARE_ALIGNED_16( uint8_t edge[33] ); + ALIGNED_ARRAY_16( uint8_t, edge,[33] ); for( idx = 0; idx < 4; idx++ ) { uint64_t pels_h = 0; uint8_t pels_v[7]; - int i_nnz[3]; - uint8_t *p_src_by; + uint16_t i_nnz[2]; uint8_t *p_dst_by; int j; + int cbp_luma_new = 0; i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8; - i_best = COST_MAX; - i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx ); + i_best = COST_MAX64; x = idx&1; y = idx>>1; - p_src_by = p_src + 8*x + 8*y*FENC_STRIDE; p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE; predict_4x4_mode_available( h->mb.i_neighbour8[idx], predict_mode, &i_max ); - x264_predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); + h->predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); for( i = 0; i < i_max; i++ ) { i_mode = predict_mode[i]; if( a->i_satd_i8x8_dir[i_mode][idx] > i_thresh ) continue; - h->predict_8x8[i_mode]( p_dst_by, edge ); + if( h->mb.b_lossless ) + x264_predict_lossless_8x8( h, p_dst_by, idx, i_mode, edge ); + else + h->predict_8x8[i_mode]( p_dst_by, edge ); + h->mb.i_cbp_luma = a->i_cbp_i8x8_luma; i_satd = x264_rd_cost_i8x8( h, a->i_lambda2, idx, i_mode ); if( i_best > i_satd ) { a->i_predict8x8[idx] = i_mode; + cbp_luma_new = h->mb.i_cbp_luma; i_best = i_satd; pels_h = *(uint64_t*)(p_dst_by+7*FDEC_STRIDE); if( !(idx&1) ) for( j=0; j<7; j++ ) pels_v[j] = p_dst_by[7+j*FDEC_STRIDE]; - for( j=0; j<3; j++ ) - i_nnz[j] = h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]]; + i_nnz[0] = *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+0]]; + i_nnz[1] = *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+2]]; } } - + a->i_cbp_i8x8_luma = cbp_luma_new; *(uint64_t*)(p_dst_by+7*FDEC_STRIDE) = pels_h; if( !(idx&1) ) for( j=0; j<7; j++ ) p_dst_by[7+j*FDEC_STRIDE] = pels_v[j]; - for( j=0; j<3; j++ ) - h->mb.cache.non_zero_count[x264_scan8[4*idx+j+1]] = i_nnz[j]; + *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] = i_nnz[0]; + *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] = i_nnz[1]; x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] ); } } - - /* RD selection for chroma prediction */ - predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max ); - if( i_max > 1 ) - { - i_thresh = a->i_satd_i8x8chroma * 5/4; - - for( i = j = 0; i < i_max; i++ ) - if( a->i_satd_i8x8chroma_dir[i] < i_thresh && - predict_mode[i] != a->i_predict8x8chroma ) - { - predict_mode[j++] = predict_mode[i]; - } - i_max = j; - - if( i_max > 0 ) - { - int i_chroma_lambda = i_qp0_cost2_table[h->mb.i_chroma_qp]; - /* the previous thing encoded was x264_intra_rd(), so the pixels and - * coefs for the current chroma mode are still around, so we only - * have to recount the bits. */ - i_best = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 ); - for( i = 0; i < i_max; i++ ) - { - i_mode = predict_mode[i]; - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] ); - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] ); - /* if we've already found a mode that needs no residual, then - * probably any mode with a residual will be worse. - * so avoid dct on the remaining modes to improve speed. */ - i_satd = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, i_mode, h->mb.i_cbp_chroma != 0x00 ); - COPY2_IF_LT( i_best, i_satd, a->i_predict8x8chroma, i_mode ); - } - h->mb.i_chroma_pred_mode = a->i_predict8x8chroma; - } - } } #define LOAD_FENC( m, src, xoff, yoff) \ @@ -962,13 +1173,13 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]]; #define REF_COST(list, ref) \ - (a->i_lambda * bs_size_te( h->sh.i_num_ref_idx_l##list##_active - 1, ref )) + (a->p_cost_ref##list[ref]) static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) { x264_me_t m; int i_ref, i_mvc; - DECLARE_ALIGNED_4( int16_t mvc[7][2] ); + ALIGNED_4( int16_t mvc[8][2] ); int i_halfpel_thresh = INT_MAX; int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL; @@ -1010,10 +1221,10 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) i_halfpel_thresh += i_ref_cost; if( m.cost < a->l0.me16x16.cost ) - a->l0.me16x16 = m; + h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); /* save mv for predicting neighbors */ - *(uint32_t*)a->l0.mvc[i_ref][0] = + *(uint32_t*)a->l0.mvc[i_ref][0] = *(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv; } @@ -1021,12 +1232,17 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 ); h->mb.i_type = P_L0; - if( a->b_mbrd && a->l0.me16x16.i_ref == 0 - && *(uint32_t*)a->l0.me16x16.mv == *(uint32_t*)h->mb.cache.pskip_mv ) + if( a->i_mbrd ) { - h->mb.i_partition = D_16x16; - x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv ); - a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 ); + x264_mb_cache_fenc_satd( h ); + if( a->l0.me16x16.i_ref == 0 && *(uint32_t*)a->l0.me16x16.mv == *(uint32_t*)h->mb.cache.pskip_mv ) + { + h->mb.i_partition = D_16x16; + x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv ); + a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 ); + if( !(h->mb.i_cbp_luma|h->mb.i_cbp_chroma) ) + h->mb.i_type = P_SKIP; + } } } @@ -1072,22 +1288,22 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t l0m->cost = INT_MAX; for( i_ref = 0; i_ref <= i_maxref; i_ref++ ) { - const int i_ref_cost = REF_COST( 0, i_ref ); - i_halfpel_thresh -= i_ref_cost; - m.i_ref_cost = i_ref_cost; - m.i_ref = i_ref; - - LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 ); - x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref ); - x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); - x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh ); - - m.cost += i_ref_cost; - i_halfpel_thresh += i_ref_cost; - *(uint32_t*)a->l0.mvc[i_ref][i+1] = *(uint32_t*)m.mv; - - if( m.cost < l0m->cost ) - *l0m = m; + const int i_ref_cost = REF_COST( 0, i_ref ); + i_halfpel_thresh -= i_ref_cost; + m.i_ref_cost = i_ref_cost; + m.i_ref = i_ref; + + LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 ); + x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref ); + x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); + x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh ); + + m.cost += i_ref_cost; + i_halfpel_thresh += i_ref_cost; + *(uint32_t*)a->l0.mvc[i_ref][i+1] = *(uint32_t*)m.mv; + + if( m.cost < l0m->cost ) + h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) ); } x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv ); x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref ); @@ -1098,6 +1314,10 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + a->l0.me8x8[2].cost + a->l0.me8x8[3].cost; + /* P_8x8 ref0 has no ref cost */ + if( !h->param.b_cabac && !(a->l0.me8x8[0].i_ref | a->l0.me8x8[1].i_ref | + a->l0.me8x8[2].i_ref | a->l0.me8x8[3].i_ref) ) + a->l0.i_cost8x8 -= REF_COST( 0, 0 ) * 4; h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; } @@ -1105,7 +1325,7 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) { const int i_ref = a->l0.me16x16.i_ref; - const int i_ref_cost = REF_COST( 0, i_ref ); + const int i_ref_cost = h->param.b_cabac || i_ref ? REF_COST( 0, i_ref ) : 0; uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref]; uint8_t **p_fenc = h->mb.pic.p_fenc; int i_mvc; @@ -1144,11 +1364,12 @@ static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8]; } + a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + + a->l0.me8x8[2].cost + a->l0.me8x8[3].cost; /* theoretically this should include 4*ref_cost, * but 3 seems a better approximation of cabac. */ - a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost + - a->l0.me8x8[2].cost + a->l0.me8x8[3].cost - - REF_COST( 0, a->l0.me16x16.i_ref ); + if( h->param.b_cabac ) + a->l0.i_cost8x8 -= i_ref_cost; h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; } @@ -1157,7 +1378,7 @@ static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) { x264_me_t m; uint8_t **p_fenc = h->mb.pic.p_fenc; - DECLARE_ALIGNED_4( int16_t mvc[3][2] ); + ALIGNED_4( int16_t mvc[3][2] ); int i, j; /* XXX Needed for x264_mb_predict_mv */ @@ -1176,25 +1397,25 @@ static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) l0m->cost = INT_MAX; for( j = 0; j < i_ref8s; j++ ) { - const int i_ref = ref8[j]; - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; - m.i_ref = i_ref; + const int i_ref = ref8[j]; + const int i_ref_cost = REF_COST( 0, i_ref ); + m.i_ref_cost = i_ref_cost; + m.i_ref = i_ref; - /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */ - *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0]; - *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][2*i+1]; - *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][2*i+2]; + /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */ + *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0]; + *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][2*i+1]; + *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][2*i+2]; - LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i ); - x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref ); - x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp ); - x264_me_search( h, &m, mvc, 3 ); + LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i ); + x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref ); + x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp ); + x264_me_search( h, &m, mvc, 3 ); - m.cost += i_ref_cost; + m.cost += i_ref_cost; - if( m.cost < l0m->cost ) - *l0m = m; + if( m.cost < l0m->cost ) + h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) ); } x264_macroblock_cache_mv_ptr( h, 0, 2*i, 4, 2, 0, l0m->mv ); x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref ); @@ -1207,7 +1428,7 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) { x264_me_t m; uint8_t **p_fenc = h->mb.pic.p_fenc; - DECLARE_ALIGNED_4( int16_t mvc[3][2] ); + ALIGNED_4( int16_t mvc[3][2] ); int i, j; /* XXX Needed for x264_mb_predict_mv */ @@ -1226,24 +1447,24 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) l0m->cost = INT_MAX; for( j = 0; j < i_ref8s; j++ ) { - const int i_ref = ref8[j]; - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; - m.i_ref = i_ref; + const int i_ref = ref8[j]; + const int i_ref_cost = REF_COST( 0, i_ref ); + m.i_ref_cost = i_ref_cost; + m.i_ref = i_ref; - *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0]; - *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][i+1]; - *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][i+3]; + *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0]; + *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][i+1]; + *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][i+3]; - LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 ); - x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref ); - x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); - x264_me_search( h, &m, mvc, 3 ); + LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 ); + x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref ); + x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp ); + x264_me_search( h, &m, mvc, 3 ); - m.cost += i_ref_cost; + m.cost += i_ref_cost; - if( m.cost < l0m->cost ) - *l0m = m; + if( m.cost < l0m->cost ) + h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) ); } x264_macroblock_cache_mv_ptr( h, 2*i, 0, 2, 4, 0, l0m->mv ); x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref ); @@ -1254,7 +1475,7 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel ) { - DECLARE_ALIGNED_8( uint8_t pix1[16*8] ); + ALIGNED_8( uint8_t pix1[16*8] ); uint8_t *pix2 = pix1+8; const int i_stride = h->mb.pic.i_stride[1]; const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride; @@ -1423,26 +1644,21 @@ static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a ) } } -#define WEIGHTED_AVG( size, pix1, stride1, src2, stride2 ) \ - { \ - if( h->param.analyse.b_weighted_bipred ) \ - h->mc.avg_weight[size]( pix1, stride1, src2, stride2, \ - h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \ - else \ - h->mc.avg[size]( pix1, stride1, src2, stride2 ); \ - } +#define WEIGHTED_AVG( size, pix, stride, src1, stride1, src2, stride2 ) \ +{ \ + h->mc.avg[size]( pix, stride, src1, stride1, src2, stride2, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \ +} static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) { - DECLARE_ALIGNED_16( uint8_t pix1[16*16] ); - DECLARE_ALIGNED_16( uint8_t pix2[16*16] ); - uint8_t *src2; - int stride2 = 16; - int weight; + ALIGNED_ARRAY_16( uint8_t, pix0,[16*16] ); + ALIGNED_ARRAY_16( uint8_t, pix1,[16*16] ); + uint8_t *src0, *src1; + int stride0 = 16, stride1 = 16; x264_me_t m; int i_ref, i_mvc; - DECLARE_ALIGNED_4( int16_t mvc[8][2] ); + ALIGNED_4( int16_t mvc[9][2] ); int i_halfpel_thresh = INT_MAX; int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL; @@ -1467,7 +1683,7 @@ static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) if( m.cost < a->l0.me16x16.cost ) { a->l0.i_ref = i_ref; - a->l0.me16x16 = m; + h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); } /* save mv for predicting neighbors */ @@ -1494,7 +1710,7 @@ static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) if( m.cost < a->l1.me16x16.cost ) { a->l1.i_ref = i_ref; - a->l1.me16x16 = m; + h->mc.memcpy_aligned( &a->l1.me16x16, &m, sizeof(x264_me_t) ); } /* save mv for predicting neighbors */ @@ -1508,41 +1724,16 @@ static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref ); /* get cost of BI mode */ - weight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref]; - if ( (*(uint32_t*)a->l0.me16x16.mv & 0x10001) == 0 ) - { - /* l0 reference is halfpel, so get_ref on it will make it faster */ - src2 = - h->mc.get_ref( pix2, &stride2, - h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], - a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], - 16, 16 ); - h->mc.mc_luma( pix1, 16, - h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], - a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], - 16, 16 ); - weight = 64 - weight; - } - else - { - /* if l0 was qpel, we'll use get_ref on l1 instead */ - h->mc.mc_luma( pix1, 16, - h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], - a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], - 16, 16 ); - src2 = - h->mc.get_ref( pix2, &stride2, - h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], - a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], - 16, 16 ); - } + src0 = h->mc.get_ref( pix0, &stride0, + h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], + a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16 ); + src1 = h->mc.get_ref( pix1, &stride1, + h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], + a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16 ); - if( h->param.analyse.b_weighted_bipred ) - h->mc.avg_weight[PIXEL_16x16]( pix1, 16, src2, stride2, weight ); - else - h->mc.avg[PIXEL_16x16]( pix1, 16, src2, stride2 ); + h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); - a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix1, 16 ) + a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 ) + REF_COST( 0, a->l0.i_ref ) + REF_COST( 1, a->l1.i_ref ) + a->l0.me16x16.cost_mv @@ -1644,7 +1835,7 @@ static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ) uint8_t **p_fref[2] = { h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.p_fref[1][a->l1.i_ref] }; - DECLARE_ALIGNED_8( uint8_t pix[2][8*8] ); + ALIGNED_8( uint8_t pix[2][8*8] ); int i, l; /* XXX Needed for x264_mb_predict_mv */ @@ -1658,6 +1849,8 @@ static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ) const int y8 = i/2; int i_part_cost; int i_part_cost_bi = 0; + int stride[2] = {8,8}; + uint8_t *src[2]; for( l = 0; l < 2; l++ ) { @@ -1676,13 +1869,12 @@ static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ) x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, l, m->mv ); /* BI mode */ - h->mc.mc_luma( pix[l], 8, m->p_fref, m->i_stride[0], - m->mv[0], m->mv[1], 8, 8 ); + src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], + m->mv[0], m->mv[1], 8, 8 ); i_part_cost_bi += m->cost_mv; /* FIXME: ref cost */ } - - WEIGHTED_AVG( PIXEL_8x8, pix[0], 8, pix[1], 8 ); + h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ) + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8]; a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8]; @@ -1708,8 +1900,8 @@ static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) uint8_t **p_fref[2] = { h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.p_fref[1][a->l1.i_ref] }; - DECLARE_ALIGNED_16( uint8_t pix[2][16*8] ); - DECLARE_ALIGNED_4( int16_t mvc[2][2] ); + ALIGNED_ARRAY_16( uint8_t, pix,[2],[16*8] ); + ALIGNED_4( int16_t mvc[2][2] ); int i, l; h->mb.i_partition = D_16x8; @@ -1719,6 +1911,8 @@ static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) { int i_part_cost; int i_part_cost_bi = 0; + int stride[2] = {16,16}; + uint8_t *src[2]; /* TODO: check only the list(s) that were used in b8x8? */ for( l = 0; l < 2; l++ ) @@ -1739,13 +1933,12 @@ static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) x264_me_search( h, m, mvc, 2 ); /* BI mode */ - h->mc.mc_luma( pix[l], 16, m->p_fref, m->i_stride[0], - m->mv[0], m->mv[1], 16, 8 ); + src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], + m->mv[0], m->mv[1], 16, 8 ); /* FIXME: ref cost */ i_part_cost_bi += m->cost_mv; } - - WEIGHTED_AVG( PIXEL_16x8, pix[0], 16, pix[1], 16 ); + h->mc.avg[PIXEL_16x8]( pix[0], 16, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); i_part_cost_bi += h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 ); i_part_cost = a->l0.me16x8[i].cost; @@ -1777,8 +1970,8 @@ static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a ) uint8_t **p_fref[2] = { h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.p_fref[1][a->l1.i_ref] }; - DECLARE_ALIGNED_8( uint8_t pix[2][8*16] ); - DECLARE_ALIGNED_4( int16_t mvc[2][2] ); + ALIGNED_8( uint8_t pix[2][8*16] ); + ALIGNED_4( int16_t mvc[2][2] ); int i, l; h->mb.i_partition = D_8x16; @@ -1788,6 +1981,8 @@ static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a ) { int i_part_cost; int i_part_cost_bi = 0; + int stride[2] = {8,8}; + uint8_t *src[2]; for( l = 0; l < 2; l++ ) { @@ -1807,13 +2002,13 @@ static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a ) x264_me_search( h, m, mvc, 2 ); /* BI mode */ - h->mc.mc_luma( pix[l], 8, m->p_fref, m->i_stride[0], - m->mv[0], m->mv[1], 8, 16 ); + src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], + m->mv[0], m->mv[1], 8, 16 ); /* FIXME: ref cost */ i_part_cost_bi += m->cost_mv; } - WEIGHTED_AVG( PIXEL_8x16, pix[0], 8, pix[1], 8 ); + h->mc.avg[PIXEL_8x16]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ); i_part_cost = a->l0.me8x16[i].cost; @@ -1851,7 +2046,6 @@ static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd ) x264_analyse_update_cache( h, a ); a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 ); } - a->l0.me16x16.cost = a->l0.i_rd16x16; if( a->l0.i_cost16x8 <= thresh ) { @@ -1875,35 +2069,38 @@ static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd ) { h->mb.i_type = P_8x8; h->mb.i_partition = D_8x8; - x264_analyse_update_cache( h, a ); - a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 ); - if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 ) { - /* FIXME: RD per subpartition */ - int part_bak[4]; - int i, i_cost; - int b_sub8x8 = 0; - for( i=0; i<4; i++ ) - { - part_bak[i] = h->mb.i_sub_partition[i]; - b_sub8x8 |= (part_bak[i] != D_L0_8x8); - } - if( b_sub8x8 ) + int i; + x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref ); + x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref ); + x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref ); + x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref ); + /* FIXME: In the 8x8 blocks where RDO isn't run, the NNZ values used for context selection + * for future blocks are those left over from previous RDO calls. */ + for( i = 0; i < 4; i++ ) { - h->mb.i_sub_partition[0] = h->mb.i_sub_partition[1] = - h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; - x264_analyse_update_cache( h, a ); - i_cost = x264_rd_cost_mb( h, a->i_lambda2 ); - if( a->l0.i_cost8x8 < i_cost ) + int costs[4] = {a->l0.i_cost4x4[i], a->l0.i_cost8x4[i], a->l0.i_cost4x8[i], a->l0.me8x8[i].cost}; + int thresh = X264_MIN4( costs[0], costs[1], costs[2], costs[3] ) * 5 / 4; + int subtype, btype = D_L0_8x8; + uint64_t bcost = COST_MAX64; + for( subtype = D_L0_4x4; subtype <= D_L0_8x8; subtype++ ) { - for( i=0; i<4; i++ ) - h->mb.i_sub_partition[i] = part_bak[i]; + uint64_t cost; + if( costs[subtype] > thresh || (subtype == D_L0_8x8 && bcost == COST_MAX64) ) + continue; + h->mb.i_sub_partition[i] = subtype; + x264_mb_cache_mv_p8x8( h, a, i ); + cost = x264_rd_cost_part( h, a->i_lambda2, i<<2, PIXEL_8x8 ); + COPY2_IF_LT( bcost, cost, btype, subtype ); } - else - a->l0.i_cost8x8 = i_cost; + h->mb.i_sub_partition[i] = btype; + x264_mb_cache_mv_p8x8( h, a, i ); } } + else + x264_analyse_update_cache( h, a ); + a->l0.i_cost8x8 = x264_rd_cost_mb( h, a->i_lambda2 ); } else a->l0.i_cost8x8 = COST_MAX; @@ -1911,7 +2108,7 @@ static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd ) static void x264_mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter ) { - int thresh = i_satd_inter * 17/16; + int thresh = i_satd_inter * (17 + (!!h->mb.i_psy_rd))/16; if( a->b_direct_available && a->i_rd16x16direct == COST_MAX ) { @@ -1979,38 +2176,41 @@ static void x264_mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_i } } -static void refine_bidir( x264_t *h, x264_mb_analysis_t *a ) +static void x264_refine_bidir( x264_t *h, x264_mb_analysis_t *a ) { const int i_biweight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref]; int i; + if( IS_INTRA(h->mb.i_type) ) + return; + switch( h->mb.i_partition ) { - case D_16x16: - if( h->mb.i_type == B_BI_BI ) - x264_me_refine_bidir( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight ); - break; - case D_16x8: - for( i=0; i<2; i++ ) - if( a->i_mb_partition16x8[i] == D_BI_8x8 ) - x264_me_refine_bidir( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight ); - break; - case D_8x16: - for( i=0; i<2; i++ ) - if( a->i_mb_partition8x16[i] == D_BI_8x8 ) - x264_me_refine_bidir( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight ); - break; - case D_8x8: - for( i=0; i<4; i++ ) - if( h->mb.i_sub_partition[i] == D_BI_8x8 ) - x264_me_refine_bidir( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight ); - break; + case D_16x16: + if( h->mb.i_type == B_BI_BI ) + x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight ); + break; + case D_16x8: + for( i=0; i<2; i++ ) + if( a->i_mb_partition16x8[i] == D_BI_8x8 ) + x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight ); + break; + case D_8x16: + for( i=0; i<2; i++ ) + if( a->i_mb_partition8x16[i] == D_BI_8x8 ) + x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight ); + break; + case D_8x8: + for( i=0; i<4; i++ ) + if( h->mb.i_sub_partition[i] == D_BI_8x8 ) + x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight ); + break; } } static inline void x264_mb_analyse_transform( x264_t *h ) { - if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 ) + if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 && !h->mb.b_lossless ) { int i_cost4, i_cost8; /* Only luma MC is really needed, but the full MC is re-used in macroblock_encode. */ @@ -2032,7 +2232,7 @@ static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t * { int i_rd8; x264_analyse_update_cache( h, a ); - h->mb.b_transform_8x8 = !h->mb.b_transform_8x8; + h->mb.b_transform_8x8 ^= 1; /* FIXME only luma is needed, but the score for comparison already includes chroma */ i_rd8 = x264_rd_cost_mb( h, a->i_lambda2 ); @@ -2040,17 +2240,90 @@ static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t * { if( *i_rd > 0 ) *i_satd = (int64_t)(*i_satd) * i_rd8 / *i_rd; - /* prevent a rare division by zero in estimated intra cost */ - if( *i_satd == 0 ) - *i_satd = 1; - *i_rd = i_rd8; } else - h->mb.b_transform_8x8 = !h->mb.b_transform_8x8; + h->mb.b_transform_8x8 ^= 1; } } +/* Rate-distortion optimal QP selection. + * FIXME: More than half of the benefit of this function seems to be + * in the way it improves the coding of chroma DC (by decimating or + * finding a better way to code a single DC coefficient.) + * There must be a more efficient way to get that portion of the benefit + * without doing full QP-RD, but RD-decimation doesn't seem to do the + * trick. */ +static inline void x264_mb_analyse_qp_rd( x264_t *h, x264_mb_analysis_t *a ) +{ + int bcost, cost, direction, failures, prevcost, origcost; + int orig_qp = h->mb.i_qp, bqp = h->mb.i_qp; + int last_qp_tried = 0; + origcost = bcost = x264_rd_cost_mb( h, a->i_lambda2 ); + + /* If CBP is already zero, don't raise the quantizer any higher. */ + for( direction = h->mb.cbp[h->mb.i_mb_xy] ? 1 : -1; direction >= -1; direction-=2 ) + { + /* Without psy-RD, require monotonicity when moving quant away from previous + * macroblock's quant; allow 1 failure when moving quant towards previous quant. + * With psy-RD, allow 1 failure when moving quant away from previous quant, + * allow 2 failures when moving quant towards previous quant. + * Psy-RD generally seems to result in more chaotic RD score-vs-quantizer curves. */ + int threshold = (!!h->mb.i_psy_rd); + /* Raise the threshold for failures if we're moving towards the last QP. */ + if( ( h->mb.i_last_qp < orig_qp && direction == -1 ) || + ( h->mb.i_last_qp > orig_qp && direction == 1 ) ) + threshold++; + h->mb.i_qp = orig_qp; + failures = 0; + prevcost = origcost; + h->mb.i_qp += direction; + while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= h->param.rc.i_qp_max ) + { + if( h->mb.i_last_qp == h->mb.i_qp ) + last_qp_tried = 1; + h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; + cost = x264_rd_cost_mb( h, a->i_lambda2 ); + COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp ); + + /* We can't assume that the costs are monotonic over QPs. + * Tie case-as-failure seems to give better results. */ + if( cost < prevcost ) + failures = 0; + else + failures++; + prevcost = cost; + + if( failures > threshold ) + break; + if( direction == 1 && !h->mb.cbp[h->mb.i_mb_xy] ) + break; + h->mb.i_qp += direction; + } + } + + /* Always try the last block's QP. */ + if( !last_qp_tried ) + { + h->mb.i_qp = h->mb.i_last_qp; + h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; + cost = x264_rd_cost_mb( h, a->i_lambda2 ); + COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp ); + } + + h->mb.i_qp = bqp; + h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; + + /* Check transform again; decision from before may no longer be optimal. */ + if( h->mb.i_qp != orig_qp && h->param.analyse.b_transform_8x8 && + x264_mb_transform_8x8_allowed( h ) ) + { + h->mb.b_transform_8x8 ^= 1; + cost = x264_rd_cost_mb( h, a->i_lambda2 ); + if( cost > bcost ) + h->mb.b_transform_8x8 ^= 1; + } +} /***************************************************************************** * x264_macroblock_analyse: @@ -2063,34 +2336,38 @@ void x264_macroblock_analyse( x264_t *h ) h->mb.i_qp = x264_ratecontrol_qp( h ); if( h->param.rc.i_aq_mode ) + { x264_adaptive_quant( h ); + /* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB, + * to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */ + if( h->param.analyse.i_subpel_refine < 10 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 ) + h->mb.i_qp = h->mb.i_last_qp; + } x264_mb_analyse_init( h, &analysis, h->mb.i_qp ); /*--------------------------- Do the analysis ---------------------------*/ if( h->sh.i_type == SLICE_TYPE_I ) { + if( analysis.i_mbrd ) + x264_mb_cache_fenc_satd( h ); x264_mb_analyse_intra( h, &analysis, COST_MAX ); - if( analysis.b_mbrd ) + if( analysis.i_mbrd ) x264_intra_rd( h, &analysis, COST_MAX ); i_cost = analysis.i_satd_i16x16; h->mb.i_type = I_16x16; - if( analysis.i_satd_i4x4 < i_cost ) - { - i_cost = analysis.i_satd_i4x4; - h->mb.i_type = I_4x4; - } - if( analysis.i_satd_i8x8 < i_cost ) - h->mb.i_type = I_8x8; + COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, h->mb.i_type, I_4x4 ); + COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, h->mb.i_type, I_8x8 ); + if( analysis.i_satd_pcm < i_cost ) + h->mb.i_type = I_PCM; - if( h->mb.i_subpel_refine >= 7 ) + else if( analysis.i_mbrd >= 2 ) x264_intra_rd_refine( h, &analysis ); } else if( h->sh.i_type == SLICE_TYPE_P ) { int b_skip = 0; - int i_intra_cost, i_intra_type; h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 0 ); @@ -2196,7 +2473,7 @@ void x264_macroblock_analyse( x264_t *h ) /* refine qpel */ //FIXME mb_type costs? - if( analysis.b_mbrd ) + if( analysis.i_mbrd ) { /* refine later */ } @@ -2275,12 +2552,12 @@ void x264_macroblock_analyse( x264_t *h ) analysis.i_satd_i8x8, analysis.i_satd_i4x4 ); - if( analysis.b_mbrd ) + if( analysis.i_mbrd ) { x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) ); i_type = P_L0; i_partition = D_16x16; - i_cost = analysis.l0.me16x16.cost; + i_cost = analysis.l0.i_rd16x16; COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 ); COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 ); COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 ); @@ -2291,21 +2568,14 @@ void x264_macroblock_analyse( x264_t *h ) x264_intra_rd( h, &analysis, i_satd_inter * 5/4 ); } - i_intra_type = I_16x16; - i_intra_cost = analysis.i_satd_i16x16; - COPY2_IF_LT( i_intra_cost, analysis.i_satd_i8x8, i_intra_type, I_8x8 ); - COPY2_IF_LT( i_intra_cost, analysis.i_satd_i4x4, i_intra_type, I_4x4 ); - COPY2_IF_LT( i_cost, i_intra_cost, i_type, i_intra_type ); - - if( i_intra_cost == COST_MAX ) - i_intra_cost = i_cost * i_satd_intra / i_satd_inter + 1; + COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 ); + COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 ); + COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 ); + COPY2_IF_LT( i_cost, analysis.i_satd_pcm, i_type, I_PCM ); h->mb.i_type = i_type; - h->stat.frame.i_intra_cost += i_intra_cost; - h->stat.frame.i_inter_cost += i_cost; - h->stat.frame.i_mbs_analysed++; - if( h->mb.i_subpel_refine >= 7 ) + if( analysis.i_mbrd >= 2 && h->mb.i_type != I_PCM ) { if( IS_INTRA( h->mb.i_type ) ) { @@ -2314,7 +2584,8 @@ void x264_macroblock_analyse( x264_t *h ) else if( i_partition == D_16x16 ) { x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref ); - x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0 ); + analysis.l0.me16x16.cost = i_cost; + x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 ); } else if( i_partition == D_16x8 ) { @@ -2322,8 +2593,8 @@ void x264_macroblock_analyse( x264_t *h ) h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; x264_macroblock_cache_ref( h, 0, 0, 4, 2, 0, analysis.l0.me16x8[0].i_ref ); x264_macroblock_cache_ref( h, 0, 2, 4, 2, 0, analysis.l0.me16x8[1].i_ref ); - x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0 ); - x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 2 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[0], analysis.i_lambda2, 0, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[1], analysis.i_lambda2, 8, 0 ); } else if( i_partition == D_8x16 ) { @@ -2331,16 +2602,37 @@ void x264_macroblock_analyse( x264_t *h ) h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; x264_macroblock_cache_ref( h, 0, 0, 2, 4, 0, analysis.l0.me8x16[0].i_ref ); x264_macroblock_cache_ref( h, 2, 0, 2, 4, 0, analysis.l0.me8x16[1].i_ref ); - x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0 ); - x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 1 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[0], analysis.i_lambda2, 0, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[1], analysis.i_lambda2, 4, 0 ); } else if( i_partition == D_8x8 ) { int i8x8; x264_analyse_update_cache( h, &analysis ); for( i8x8 = 0; i8x8 < 4; i8x8++ ) - if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 ) - x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8 ); + { + if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 ) + { + x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i8x8], analysis.i_lambda2, i8x8*4, 0 ); + } + else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 ) + { + x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 ); + } + else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 ) + { + x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 ); + } + else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 ) + { + x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 ); + x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 ); + } + } } } } @@ -2350,6 +2642,9 @@ void x264_macroblock_analyse( x264_t *h ) int i_bskip_cost = COST_MAX; int b_skip = 0; + if( analysis.i_mbrd ) + x264_mb_cache_fenc_satd( h ); + h->mb.i_type = B_SKIP; if( h->mb.b_direct_auto_write ) { @@ -2379,11 +2674,7 @@ void x264_macroblock_analyse( x264_t *h ) { if( !h->mb.b_direct_auto_write ) x264_mb_mc( h ); - if( h->mb.b_lossless ) - { - /* chance of skip is too small to bother */ - } - else if( analysis.b_mbrd ) + if( analysis.i_mbrd ) { i_bskip_cost = ssd_mb( h ); /* 6 = minimum cavlc cost of a non-skipped MB */ @@ -2402,7 +2693,7 @@ void x264_macroblock_analyse( x264_t *h ) const unsigned int flags = h->param.analyse.inter; int i_type; int i_partition; - int i_satd_inter = 0; // shut up uninitialized warning + int i_satd_inter; h->mb.b_skip_mc = 0; x264_mb_analyse_load_costs( h, &analysis ); @@ -2421,7 +2712,7 @@ void x264_macroblock_analyse( x264_t *h ) COPY2_IF_LT( i_cost, analysis.i_cost16x16bi, i_type, B_BI_BI ); COPY2_IF_LT( i_cost, analysis.i_cost16x16direct, i_type, B_DIRECT ); - if( analysis.b_mbrd && analysis.i_cost16x16direct <= i_cost * 33/32 ) + if( analysis.i_mbrd && analysis.i_cost16x16direct <= i_cost * 33/32 ) { x264_mb_analyse_b_rd( h, &analysis, i_cost ); if( i_bskip_cost < analysis.i_rd16x16direct && @@ -2463,7 +2754,7 @@ void x264_macroblock_analyse( x264_t *h ) } } - if( analysis.b_mbrd ) + if( analysis.i_mbrd ) { /* refine later */ } @@ -2546,9 +2837,10 @@ void x264_macroblock_analyse( x264_t *h ) } } - if( analysis.b_mbrd ) + i_satd_inter = i_cost; + + if( analysis.i_mbrd ) { - i_satd_inter = i_cost; x264_mb_analyse_b_rd( h, &analysis, i_satd_inter ); i_type = B_SKIP; i_cost = i_bskip_cost; @@ -2564,10 +2856,10 @@ void x264_macroblock_analyse( x264_t *h ) h->mb.i_type = i_type; h->mb.i_partition = i_partition; } - + x264_mb_analyse_intra( h, &analysis, i_satd_inter ); - if( analysis.b_mbrd ) + if( analysis.i_mbrd ) { x264_mb_analyse_transform_rd( h, &analysis, &i_satd_inter, &i_cost ); x264_intra_rd( h, &analysis, i_satd_inter * 17/16 ); @@ -2576,24 +2868,103 @@ void x264_macroblock_analyse( x264_t *h ) COPY2_IF_LT( i_cost, analysis.i_satd_i16x16, i_type, I_16x16 ); COPY2_IF_LT( i_cost, analysis.i_satd_i8x8, i_type, I_8x8 ); COPY2_IF_LT( i_cost, analysis.i_satd_i4x4, i_type, I_4x4 ); + COPY2_IF_LT( i_cost, analysis.i_satd_pcm, i_type, I_PCM ); h->mb.i_type = i_type; h->mb.i_partition = i_partition; - if( analysis.b_mbrd && h->mb.i_subpel_refine >= 7 && IS_INTRA( i_type ) ) + if( analysis.i_mbrd >= 2 && IS_INTRA( i_type ) && i_type != I_PCM ) x264_intra_rd_refine( h, &analysis ); - else if( h->param.analyse.b_bidir_me ) - refine_bidir( h, &analysis ); + if( h->mb.i_subpel_refine >= 5 ) + x264_refine_bidir( h, &analysis ); + + if( analysis.i_mbrd >= 2 && i_type > B_DIRECT && i_type < B_SKIP ) + { + const int i_biweight = h->mb.bipred_weight[analysis.l0.i_ref][analysis.l1.i_ref]; + x264_analyse_update_cache( h, &analysis ); + + if( i_partition == D_16x16 ) + { + if( i_type == B_L0_L0 ) + { + analysis.l0.me16x16.cost = i_cost; + x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 ); + } + else if( i_type == B_L1_L1 ) + { + analysis.l1.me16x16.cost = i_cost; + x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 ); + } + else if( i_type == B_BI_BI ) + x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 ); + } + else if( i_partition == D_16x8 ) + { + for( i = 0; i < 2; i++ ) + { + h->mb.i_sub_partition[i*2] = h->mb.i_sub_partition[i*2+1] = analysis.i_mb_partition16x8[i]; + if( analysis.i_mb_partition16x8[i] == D_L0_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l0.me16x8[i], analysis.i_lambda2, i*8, 0 ); + else if( analysis.i_mb_partition16x8[i] == D_L1_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*8, 1 ); + else if( analysis.i_mb_partition16x8[i] == D_BI_8x8 ) + x264_me_refine_bidir_rd( h, &analysis.l0.me16x8[i], &analysis.l1.me16x8[i], i_biweight, i*2, analysis.i_lambda2 ); + } + } + else if( i_partition == D_8x16 ) + { + for( i = 0; i < 2; i++ ) + { + h->mb.i_sub_partition[i] = h->mb.i_sub_partition[i+2] = analysis.i_mb_partition8x16[i]; + if( analysis.i_mb_partition8x16[i] == D_L0_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l0.me8x16[i], analysis.i_lambda2, i*4, 0 ); + else if( analysis.i_mb_partition8x16[i] == D_L1_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i*4, 1 ); + else if( analysis.i_mb_partition8x16[i] == D_BI_8x8 ) + x264_me_refine_bidir_rd( h, &analysis.l0.me8x16[i], &analysis.l1.me8x16[i], i_biweight, i, analysis.i_lambda2 ); + } + } + else if( i_partition == D_8x8 ) + { + for( i = 0; i < 4; i++ ) + { + if( h->mb.i_sub_partition[i] == D_L0_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i*4, 0 ); + else if( h->mb.i_sub_partition[i] == D_L1_8x8 ) + x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i*4, 1 ); + else if( h->mb.i_sub_partition[i] == D_BI_8x8 ) + x264_me_refine_bidir_rd( h, &analysis.l0.me8x8[i], &analysis.l1.me8x8[i], i_biweight, i, analysis.i_lambda2 ); + } + } + } } } x264_analyse_update_cache( h, &analysis ); - if( !analysis.b_mbrd ) + /* In rare cases we can end up qpel-RDing our way back to a larger partition size + * without realizing it. Check for this and account for it if necessary. */ + if( analysis.i_mbrd >= 2 ) + { + /* Don't bother with bipred or 8x8-and-below, the odds are incredibly low. */ + static const uint8_t check_mv_lists[X264_MBTYPE_MAX] = {[P_L0]=1, [B_L0_L0]=1, [B_L1_L1]=2}; + int list = check_mv_lists[h->mb.i_type] - 1; + if( list >= 0 && h->mb.i_partition != D_16x16 && + *(uint32_t*)&h->mb.cache.mv[list][x264_scan8[0]] == *(uint32_t*)&h->mb.cache.mv[list][x264_scan8[12]] && + h->mb.cache.ref[list][x264_scan8[0]] == h->mb.cache.ref[list][x264_scan8[12]] ) + h->mb.i_partition = D_16x16; + } + + if( !analysis.i_mbrd ) x264_mb_analyse_transform( h ); + if( analysis.i_mbrd == 3 && !IS_SKIP(h->mb.i_type) ) + x264_mb_analyse_qp_rd( h, &analysis ); + h->mb.b_trellis = h->param.analyse.i_trellis; - h->mb.b_noise_reduction = h->param.analyse.i_noise_reduction; + h->mb.b_noise_reduction = !!h->param.analyse.i_noise_reduction; + if( !IS_SKIP(h->mb.i_type) && h->mb.i_psy_trellis && h->param.analyse.i_trellis == 1 ) + x264_psy_trellis_init( h, 0 ); if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction ) h->mb.i_skip_intra = 0; } @@ -2622,6 +2993,9 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) x264_mb_analyse_intra_chroma( h, a ); break; + case I_PCM: + break; + case P_L0: switch( h->mb.i_partition ) {