X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=encoder%2Fanalyse.c;h=b6907200dc0c72929db330e6d43ba137215a7cb5;hb=6cbc47d476f610218c7e973d2c806b24bb4dd1b9;hp=7a65354a7df3f99f48cc4107fed8d9cffce88806;hpb=f5af5f14e5d924a3b57d6bfbd1219a334771727b;p=x264 diff --git a/encoder/analyse.c b/encoder/analyse.c index 7a65354a..b6907200 100644 --- a/encoder/analyse.c +++ b/encoder/analyse.c @@ -1,7 +1,7 @@ /***************************************************************************** - * analyse.c: h264 encoder library + * analyse.c: macroblock analysis ***************************************************************************** - * Copyright (C) 2003-2008 x264 project + * Copyright (C) 2003-2011 x264 project * * Authors: Laurent Aimar * Loren Merritt @@ -20,14 +20,14 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. + * + * This program is also available under a commercial proprietary license. + * For more information, contact us at licensing@x264.com. *****************************************************************************/ #define _ISOC99_SOURCE -#include -#include #include "common/common.h" -#include "common/cpu.h" #include "macroblock.h" #include "me.h" #include "ratecontrol.h" @@ -37,9 +37,9 @@ typedef struct { /* 16x16 */ - int i_ref; int i_rd16x16; x264_me_t me16x16; + x264_me_t bi16x16; /* for b16x16 BI mode, since MVs can differ from l0/l1 */ /* 8x8 */ int i_cost8x8; @@ -84,7 +84,8 @@ typedef struct /* Take some shortcuts in intra search if intra is deemed unlikely */ int b_fast_intra; int b_force_intra; /* For Periodic Intra Refresh. Only supported in P-frames. */ - int b_try_pskip; + int b_avoid_topright; /* For Periodic Intra Refresh: don't predict from top-right pixels. */ + int b_try_skip; /* Luma part */ int i_satd_i16x16; @@ -114,6 +115,9 @@ typedef struct int i_cost16x16direct; int i_cost8x8bi; int i_cost8x8direct[4]; + int i_satd8x8[3][4]; /* [L0,L1,BI][8x8 0..3] SATD only */ + int i_cost_est16x8[2]; /* Per-partition estimated cost */ + int i_cost_est8x16[2]; int i_cost16x8bi; int i_cost8x16bi; int i_rd16x16bi; @@ -132,35 +136,48 @@ typedef struct } x264_mb_analysis_t; /* lambda = pow(2,qp/6-2) */ -const uint8_t x264_lambda_tab[52] = { - 1, 1, 1, 1, 1, 1, 1, 1, /* 0-7 */ - 1, 1, 1, 1, /* 8-11 */ - 1, 1, 1, 1, 2, 2, 2, 2, /* 12-19 */ - 3, 3, 3, 4, 4, 4, 5, 6, /* 20-27 */ - 6, 7, 8, 9,10,11,13,14, /* 28-35 */ - 16,18,20,23,25,29,32,36, /* 36-43 */ - 40,45,51,57,64,72,81,91 /* 44-51 */ +const uint16_t x264_lambda_tab[QP_MAX_MAX+1] = +{ + 1, 1, 1, 1, 1, 1, 1, 1, /* 0- 7 */ + 1, 1, 1, 1, 1, 1, 1, 1, /* 8-15 */ + 2, 2, 2, 2, 3, 3, 3, 4, /* 16-23 */ + 4, 4, 5, 6, 6, 7, 8, 9, /* 24-31 */ + 10, 11, 13, 14, 16, 18, 20, 23, /* 32-39 */ + 25, 29, 32, 36, 40, 45, 51, 57, /* 40-47 */ + 64, 72, 81, 91, 102, 114, 128, 144, /* 48-55 */ + 161, 181, 203, 228, 256, 287, 323, 362, /* 56-63 */ + 406, 456, 512, 575, 645, 724, 813, 912, /* 64-71 */ +1024,1149,1290,1448,1625,1825,2048,2299, /* 72-79 */ +2048,2299, /* 80-81 */ }; /* lambda2 = pow(lambda,2) * .9 * 256 */ -const int x264_lambda2_tab[52] = { - 14, 18, 22, 28, 36, 45, 57, 72, /* 0 - 7 */ - 91, 115, 145, 182, 230, 290, 365, 460, /* 8 - 15 */ - 580, 731, 921, 1161, 1462, 1843, 2322, 2925, /* 16 - 23 */ - 3686, 4644, 5851, 7372, 9289, 11703, 14745, 18578, /* 24 - 31 */ - 23407, 29491, 37156, 46814, 58982, 74313, 93628, 117964, /* 32 - 39 */ -148626, 187257, 235929, 297252, 374514, 471859, 594505, 749029, /* 40 - 47 */ -943718, 1189010, 1498059, 1887436 /* 48 - 51 */ +/* Capped to avoid overflow */ +const int x264_lambda2_tab[QP_MAX_MAX+1] = +{ + 14, 18, 22, 28, 36, 45, 57, 72, /* 0- 7 */ + 91, 115, 145, 182, 230, 290, 365, 460, /* 8-15 */ + 580, 731, 921, 1161, 1462, 1843, 2322, 2925, /* 16-23 */ + 3686, 4644, 5851, 7372, 9289, 11703, 14745, 18578, /* 24-31 */ + 23407, 29491, 37156, 46814, 58982, 74313, 93628, 117964, /* 32-39 */ + 148626, 187257, 235929, 297252, 374514, 471859, 594505, 749029, /* 40-47 */ + 943718, 1189010, 1498059, 1887436, 2378021, 2996119, 3774873, 4756042, /* 48-55 */ + 5992238, 7549747, 9512085, 11984476, 15099494, 19024170,23968953,30198988, /* 56-63 */ + 38048341, 47937906, 60397977, 76096683, 95875813,120795955, /* 64-69 */ +134217727,134217727,134217727,134217727,134217727,134217727, /* 70-75 */ +134217727,134217727,134217727,134217727,134217727,134217727, /* 76-81 */ }; -const uint8_t x264_exp2_lut[64] = { +const uint8_t x264_exp2_lut[64] = +{ 0, 3, 6, 8, 11, 14, 17, 20, 23, 26, 29, 32, 36, 39, 42, 45, 48, 52, 55, 58, 62, 65, 69, 72, 76, 80, 83, 87, 91, 94, 98, 102, 106, 110, 114, 118, 122, 126, 130, 135, 139, 143, 147, 152, 156, 161, 165, 170, 175, 179, 184, 189, 194, 198, 203, 208, 214, 219, 224, 229, 234, 240, 245, 250 }; -const float x264_log2_lut[128] = { +const float x264_log2_lut[128] = +{ 0.00000, 0.01123, 0.02237, 0.03342, 0.04439, 0.05528, 0.06609, 0.07682, 0.08746, 0.09803, 0.10852, 0.11894, 0.12928, 0.13955, 0.14975, 0.15987, 0.16993, 0.17991, 0.18982, 0.19967, 0.20945, 0.21917, 0.22882, 0.23840, @@ -180,36 +197,53 @@ const float x264_log2_lut[128] = { }; /* Avoid an int/float conversion. */ -const float x264_log2_lz_lut[32] = { +const float x264_log2_lz_lut[32] = +{ 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 }; // should the intra and inter lambdas be different? // I'm just matching the behaviour of deadzone quant. -static const int x264_trellis_lambda2_tab[2][52] = { +static const int x264_trellis_lambda2_tab[2][QP_MAX_MAX+1] = +{ // inter lambda = .85 * .85 * 2**(qp/3. + 10 - LAMBDA_BITS) - { 46, 58, 73, 92, 117, 147, - 185, 233, 294, 370, 466, 587, - 740, 932, 1174, 1480, 1864, 2349, - 2959, 3728, 4697, 5918, 7457, 9395, - 11837, 14914, 18790, 23674, 29828, 37581, - 47349, 59656, 75163, 94699, 119313, 150326, - 189399, 238627, 300652, 378798, 477255, 601304, - 757596, 954511, 1202608, 1515192, 1909022, 2405217, - 3030384, 3818045, 4810435, 6060769 }, + { + 46, 58, 73, 92, 117, 147, + 185, 233, 294, 370, 466, 587, + 740, 932, 1174, 1480, 1864, 2349, + 2959, 3728, 4697, 5918, 7457, 9395, + 11837, 14914, 18790, 23674, 29828, 37581, + 47349, 59656, 75163, 94699, 119313, 150326, + 189399, 238627, 300652, 378798, 477255, 601304, + 757596, 954511, 1202608, 1515192, 1909022, 2405217, + 3030384, 3818045, 4810435, 6060769, 7636091, 9620872, + 12121539, 15272182, 19241743, 24243077, 30544363, 38483486, + 48486154, 61088726, 76966972, 96972308, + 122177453,134217727,134217727,134217727,134217727,134217727, + 134217727,134217727,134217727,134217727,134217727,134217727, + }, // intra lambda = .65 * .65 * 2**(qp/3. + 10 - LAMBDA_BITS) - { 27, 34, 43, 54, 68, 86, - 108, 136, 172, 216, 273, 343, - 433, 545, 687, 865, 1090, 1374, - 1731, 2180, 2747, 3461, 4361, 5494, - 6922, 8721, 10988, 13844, 17442, 21976, - 27688, 34885, 43953, 55377, 69771, 87906, - 110755, 139543, 175813, 221511, 279087, 351627, - 443023, 558174, 703255, 886046, 1116348, 1406511, - 1772093, 2232697, 2813022, 3544186 } + { + 27, 34, 43, 54, 68, 86, + 108, 136, 172, 216, 273, 343, + 433, 545, 687, 865, 1090, 1374, + 1731, 2180, 2747, 3461, 4361, 5494, + 6922, 8721, 10988, 13844, 17442, 21976, + 27688, 34885, 43953, 55377, 69771, 87906, + 110755, 139543, 175813, 221511, 279087, 351627, + 443023, 558174, 703255, 886046, 1116348, 1406511, + 1772093, 2232697, 2813022, 3544186, 4465396, 5626046, + 7088374, 8930791, 11252092, 14176748, 17861583, 22504184, + 28353495, 35723165, 45008368, 56706990, + 71446330, 90016736,113413980,134217727,134217727,134217727, + 134217727,134217727,134217727,134217727,134217727,134217727, + 134217727,134217727,134217727,134217727,134217727,134217727, + } }; -static const uint16_t x264_chroma_lambda2_offset_tab[] = { +#define MAX_CHROMA_LAMBDA_OFFSET 36 +static const uint16_t x264_chroma_lambda2_offset_tab[MAX_CHROMA_LAMBDA_OFFSET+1] = +{ 16, 20, 25, 32, 40, 50, 64, 80, 101, 128, 161, 203, 256, 322, 406, 512, 645, 812, @@ -220,51 +254,54 @@ static const uint16_t x264_chroma_lambda2_offset_tab[] = { }; /* TODO: calculate CABAC costs */ -static const uint8_t i_mb_b_cost_table[X264_MBTYPE_MAX] = { +static const uint8_t i_mb_b_cost_table[X264_MBTYPE_MAX] = +{ 9, 9, 9, 9, 0, 0, 0, 1, 3, 7, 7, 7, 3, 7, 7, 7, 5, 9, 0 }; -static const uint8_t i_mb_b16x8_cost_table[17] = { +static const uint8_t i_mb_b16x8_cost_table[17] = +{ 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 7, 5, 7, 9, 9, 9 }; -static const uint8_t i_sub_mb_b_cost_table[13] = { +static const uint8_t i_sub_mb_b_cost_table[13] = +{ 7, 5, 5, 3, 7, 5, 7, 3, 7, 7, 7, 5, 1 }; -static const uint8_t i_sub_mb_p_cost_table[4] = { +static const uint8_t i_sub_mb_p_cost_table[4] = +{ 5, 3, 3, 1 }; static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ); -static uint16_t x264_cost_ref[92][3][33]; +static uint16_t x264_cost_ref[QP_MAX+1][3][33]; static UNUSED x264_pthread_mutex_t cost_ref_mutex = X264_PTHREAD_MUTEX_INITIALIZER; int x264_analyse_init_costs( x264_t *h, int qp ) { - int i, j; int lambda = x264_lambda_tab[qp]; - if( h->cost_mv[lambda] ) + if( h->cost_mv[qp] ) return 0; /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */ - CHECKED_MALLOC( h->cost_mv[lambda], (4*4*2048 + 1) * sizeof(uint16_t) ); - h->cost_mv[lambda] += 2*4*2048; - for( i = 0; i <= 2*4*2048; i++ ) + CHECKED_MALLOC( h->cost_mv[qp], (4*4*2048 + 1) * sizeof(uint16_t) ); + h->cost_mv[qp] += 2*4*2048; + for( int i = 0; i <= 2*4*2048; i++ ) { - h->cost_mv[lambda][-i] = - h->cost_mv[lambda][i] = lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f; + h->cost_mv[qp][-i] = + h->cost_mv[qp][i] = X264_MIN( lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f, (1<<16)-1 ); } x264_pthread_mutex_lock( &cost_ref_mutex ); - for( i = 0; i < 3; i++ ) - for( j = 0; j < 33; j++ ) - x264_cost_ref[lambda][i][j] = i ? lambda * bs_size_te( i, j ) : 0; + for( int i = 0; i < 3; i++ ) + for( int j = 0; j < 33; j++ ) + x264_cost_ref[qp][i][j] = X264_MIN( i ? lambda * bs_size_te( i, j ) : 0, (1<<16)-1 ); x264_pthread_mutex_unlock( &cost_ref_mutex ); - if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[lambda][0] ) + if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[qp][0] ) { - for( j=0; j<4; j++ ) + for( int j = 0; j < 4; j++ ) { - CHECKED_MALLOC( h->cost_mv_fpel[lambda][j], (4*2048 + 1) * sizeof(uint16_t) ); - h->cost_mv_fpel[lambda][j] += 2*2048; - for( i = -2*2048; i < 2*2048; i++ ) - h->cost_mv_fpel[lambda][j][i] = h->cost_mv[lambda][i*4+j]; + CHECKED_MALLOC( h->cost_mv_fpel[qp][j], (4*2048 + 1) * sizeof(uint16_t) ); + h->cost_mv_fpel[qp][j] += 2*2048; + for( int i = -2*2048; i < 2*2048; i++ ) + h->cost_mv_fpel[qp][j][i] = h->cost_mv[qp][i*4+j]; } } return 0; @@ -274,44 +311,39 @@ fail: void x264_analyse_free_costs( x264_t *h ) { - int i, j; - for( i = 0; i < 92; i++ ) + for( int i = 0; i < QP_MAX+1; i++ ) { if( h->cost_mv[i] ) x264_free( h->cost_mv[i] - 2*4*2048 ); if( h->cost_mv_fpel[i][0] ) - for( j = 0; j < 4; j++ ) + for( int j = 0; j < 4; j++ ) x264_free( h->cost_mv_fpel[i][j] - 2*2048 ); } } void x264_analyse_weight_frame( x264_t *h, int end ) { - int j; - for( j=0; ji_ref0; j++ ) + for( int j = 0; j < h->i_ref[0]; j++ ) { if( h->sh.weight[j][0].weightfn ) { - x264_frame_t *frame = h->fref0[j]; + x264_frame_t *frame = h->fref[0][j]; int width = frame->i_width[0] + 2*PADH; int i_padv = PADV << h->param.b_interlaced; int offset, height; - uint8_t *src = frame->filtered[0] - frame->i_stride[0]*i_padv - PADH; - int k; - height = X264_MIN( 16 + end + i_padv, h->fref0[j]->i_lines[0] + i_padv*2 ) - h->fenc->i_lines_weighted; + pixel *src = frame->filtered[0] - frame->i_stride[0]*i_padv - PADH; + height = X264_MIN( 16 + end + i_padv, h->fref[0][j]->i_lines[0] + i_padv*2 ) - h->fenc->i_lines_weighted; offset = h->fenc->i_lines_weighted*frame->i_stride[0]; h->fenc->i_lines_weighted += height; if( height ) - { - for( k = j; k < h->i_ref0; k++ ) + for( int k = j; k < h->i_ref[0]; k++ ) if( h->sh.weight[k][0].weightfn ) { - uint8_t *dst = h->fenc->weighted[k] - h->fenc->i_stride[0]*i_padv - PADH; + pixel *dst = h->fenc->weighted[k] - h->fenc->i_stride[0]*i_padv - PADH; x264_weight_scale_plane( h, dst + offset, frame->i_stride[0], src + offset, frame->i_stride[0], width, height, &h->sh.weight[k][0] ); } - } break; } } @@ -320,52 +352,63 @@ void x264_analyse_weight_frame( x264_t *h, int end ) /* initialize an array of lambda*nbits for all possible mvs */ static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a ) { - a->p_cost_mv = h->cost_mv[a->i_lambda]; - a->p_cost_ref[0] = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)]; - a->p_cost_ref[1] = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)]; + a->p_cost_mv = h->cost_mv[a->i_qp]; + a->p_cost_ref[0] = x264_cost_ref[a->i_qp][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)]; + a->p_cost_ref[1] = x264_cost_ref[a->i_qp][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)]; } -static void x264_mb_analyse_init_qp( x264_t *h, x264_mb_analysis_t *a, int i_qp ) +static void x264_mb_analyse_init_qp( x264_t *h, x264_mb_analysis_t *a, int qp ) { - /* conduct the analysis using this lamda and QP */ - a->i_qp = h->mb.i_qp = i_qp; - h->mb.i_chroma_qp = h->chroma_qp_table[i_qp]; - - a->i_lambda = x264_lambda_tab[i_qp]; - a->i_lambda2 = x264_lambda2_tab[i_qp]; + int effective_chroma_qp = h->chroma_qp_table[SPEC_QP(qp)] + X264_MAX( qp - QP_MAX_SPEC, 0 ); + a->i_lambda = x264_lambda_tab[qp]; + a->i_lambda2 = x264_lambda2_tab[qp]; h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->i_mbrd; if( h->param.analyse.i_trellis ) { - h->mb.i_trellis_lambda2[0][0] = x264_trellis_lambda2_tab[0][h->mb.i_qp]; - h->mb.i_trellis_lambda2[0][1] = x264_trellis_lambda2_tab[1][h->mb.i_qp]; - h->mb.i_trellis_lambda2[1][0] = x264_trellis_lambda2_tab[0][h->mb.i_chroma_qp]; - h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][h->mb.i_chroma_qp]; + h->mb.i_trellis_lambda2[0][0] = x264_trellis_lambda2_tab[0][qp]; + h->mb.i_trellis_lambda2[0][1] = x264_trellis_lambda2_tab[1][qp]; + h->mb.i_trellis_lambda2[1][0] = x264_trellis_lambda2_tab[0][effective_chroma_qp]; + h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][effective_chroma_qp]; } h->mb.i_psy_rd_lambda = a->i_lambda; /* Adjusting chroma lambda based on QP offset hurts PSNR but improves visual quality. */ - h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256; + int chroma_offset_idx = X264_MIN( qp-effective_chroma_qp+12, MAX_CHROMA_LAMBDA_OFFSET ); + h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[chroma_offset_idx] : 256; + if( qp > QP_MAX_SPEC ) + { + h->nr_offset = h->nr_offset_emergency[qp-QP_MAX_SPEC-1]; + h->nr_residual_sum = h->nr_residual_sum_buf[1]; + h->nr_count = h->nr_count_buf[1]; + h->mb.b_noise_reduction = 1; + qp = QP_MAX_SPEC; /* Out-of-spec QPs are just used for calculating lambda values. */ + } + else + { + h->nr_offset = h->nr_offset_denoise; + h->nr_residual_sum = h->nr_residual_sum_buf[0]; + h->nr_count = h->nr_count_buf[0]; + h->mb.b_noise_reduction = 0; + } + + a->i_qp = h->mb.i_qp = qp; + h->mb.i_chroma_qp = h->chroma_qp_table[qp]; } -static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) +static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int qp ) { - int i = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B); + int subme = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B); /* mbrd == 1 -> RD mode decision */ /* mbrd == 2 -> RD refinement */ /* mbrd == 3 -> QPRD */ - a->i_mbrd = (i>=6) + (i>=8) + (h->param.analyse.i_subpel_refine>=10); - - x264_mb_analyse_init_qp( h, a, i_qp ); + a->i_mbrd = (subme>=6) + (subme>=8) + (h->param.analyse.i_subpel_refine>=10); + h->mb.b_deblock_rdo = h->param.analyse.i_subpel_refine >= 9 && h->sh.i_disable_deblocking_filter_idc != 1; - h->mb.i_me_method = h->param.analyse.i_me_method; - h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine; - h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P - && h->mb.i_subpel_refine >= 5; + x264_mb_analyse_init_qp( h, a, qp ); h->mb.b_transform_8x8 = 0; - h->mb.b_noise_reduction = 0; /* I: Intra part */ a->i_satd_i16x16 = @@ -377,6 +420,7 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) a->i_satd_pcm = !h->mb.i_psy_rd && a->i_mbrd ? ((uint64_t)X264_PCM_COST*a->i_lambda2 + 128) >> 8 : COST_MAX; a->b_fast_intra = 0; + a->b_avoid_topright = 0; h->mb.i_skip_intra = h->mb.b_lossless ? 0 : a->i_mbrd ? 2 : @@ -385,7 +429,6 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) /* II: Inter part P/B frame */ if( h->sh.i_type != SLICE_TYPE_I ) { - int i, j; int i_fmv_range = 4 * h->param.analyse.i_mv_range; // limit motion search to a slightly smaller range than the theoretical limit, // since the search may go a few iterations past its given range @@ -399,7 +442,7 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) h->mb.mv_max_spel[0] = CLIP_FMV( h->mb.mv_max[0] ); if( h->param.b_intra_refresh && h->sh.i_type == SLICE_TYPE_P ) { - int max_x = (h->fref0[0]->i_pir_end_col * 16 - 3)*4; /* 3 pixels of hpel border */ + int max_x = (h->fref[0][0]->i_pir_end_col * 16 - 3)*4; /* 3 pixels of hpel border */ int max_mv = max_x - 4*16*h->mb.i_mb_x; /* If we're left of the refresh bar, don't reference right of it. */ if( max_mv > 0 && h->mb.i_mb_x < h->fdec->i_pir_start_col ) @@ -413,20 +456,16 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) int mb_height = h->sps->i_mb_height >> h->sh.b_mbaff; int thread_mvy_range = i_fmv_range; - if( h->param.i_threads > 1 && !h->param.b_sliced_threads ) + if( h->i_thread_frames > 1 ) { int pix_y = (h->mb.i_mb_y | h->mb.b_interlaced) * 16; int thresh = pix_y + h->param.analyse.i_mv_range_thread; - for( i = (h->sh.i_type == SLICE_TYPE_B); i >= 0; i-- ) - { - x264_frame_t **fref = i ? h->fref1 : h->fref0; - int i_ref = i ? h->i_ref1 : h->i_ref0; - for( j=0; jsh.i_type == SLICE_TYPE_B); i >= 0; i-- ) + for( int j = 0; j < h->i_ref[i]; j++ ) { - x264_frame_cond_wait( fref[j]->orig, thresh ); - thread_mvy_range = X264_MIN( thread_mvy_range, fref[j]->orig->i_lines_completed - pix_y ); + x264_frame_cond_wait( h->fref[i][j]->orig, thresh ); + thread_mvy_range = X264_MIN( thread_mvy_range, h->fref[i][j]->orig->i_lines_completed - pix_y ); } - } if( h->param.b_deterministic ) thread_mvy_range = h->param.analyse.i_mv_range_thread; @@ -448,31 +487,18 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) a->l0.me16x16.cost = a->l0.i_rd16x16 = - a->l0.i_cost8x8 = COST_MAX; - - for( i = 0; i < 4; i++ ) - { - a->l0.i_cost4x4[i] = - a->l0.i_cost8x4[i] = - a->l0.i_cost4x8[i] = COST_MAX; - } - + a->l0.i_cost8x8 = a->l0.i_cost16x8 = a->l0.i_cost8x16 = COST_MAX; if( h->sh.i_type == SLICE_TYPE_B ) { a->l1.me16x16.cost = a->l1.i_rd16x16 = - a->l1.i_cost8x8 = COST_MAX; - - for( i = 0; i < 4; i++ ) - { - a->l1.i_cost4x4[i] = - a->l1.i_cost8x4[i] = - a->l1.i_cost4x8[i] = - a->i_cost8x8direct[i] = COST_MAX; - } - + a->l1.i_cost8x8 = + a->i_cost8x8direct[0] = + a->i_cost8x8direct[1] = + a->i_cost8x8direct[2] = + a->i_cost8x8direct[3] = a->l1.i_cost16x8 = a->l1.i_cost8x16 = a->i_rd16x16bi = @@ -486,16 +512,25 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) a->i_cost16x8bi = a->i_cost8x16bi = COST_MAX; } + else if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 ) + for( int i = 0; i < 4; i++ ) + { + a->l0.i_cost4x4[i] = + a->l0.i_cost8x4[i] = + a->l0.i_cost4x8[i] = COST_MAX; + } /* Fast intra decision */ if( h->mb.i_mb_xy - h->sh.i_first_mb > 4 ) { - if( IS_INTRA( h->mb.i_mb_type_left ) - || IS_INTRA( h->mb.i_mb_type_top ) - || IS_INTRA( h->mb.i_mb_type_topleft ) - || IS_INTRA( h->mb.i_mb_type_topright ) - || (h->sh.i_type == SLICE_TYPE_P && IS_INTRA( h->fref0[0]->mb_type[h->mb.i_mb_xy] )) - || (h->mb.i_mb_xy - h->sh.i_first_mb < 3*(h->stat.frame.i_mb_count[I_4x4] + h->stat.frame.i_mb_count[I_8x8] + h->stat.frame.i_mb_count[I_16x16])) ) + /* Always run in fast-intra mode for subme < 3 */ + if( h->mb.i_subpel_refine > 2 && + ( IS_INTRA( h->mb.i_mb_type_left ) || + IS_INTRA( h->mb.i_mb_type_top ) || + IS_INTRA( h->mb.i_mb_type_topleft ) || + IS_INTRA( h->mb.i_mb_type_topright ) || + (h->sh.i_type == SLICE_TYPE_P && IS_INTRA( h->fref[0][0]->mb_type[h->mb.i_mb_xy] )) || + (h->mb.i_mb_xy - h->sh.i_first_mb < 3*(h->stat.frame.i_mb_count[I_4x4] + h->stat.frame.i_mb_count[I_8x8] + h->stat.frame.i_mb_count[I_16x16])) ) ) { /* intra is likely */ } else { @@ -508,6 +543,7 @@ static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp ) { a->b_force_intra = 1; a->b_fast_intra = 0; + a->b_avoid_topright = h->mb.i_mb_x == h->fdec->i_pir_end_col; } else a->b_force_intra = 0; @@ -535,37 +571,58 @@ static const int8_t i8x8chroma_mode_available[5][5] = {I_PRED_CHROMA_V, I_PRED_CHROMA_H, I_PRED_CHROMA_DC, I_PRED_CHROMA_P, -1}, }; -static const int8_t i4x4_mode_available[5][10] = +static const int8_t i4x4_mode_available[2][5][10] = { - {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1}, - {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1, -1, -1, -1}, - {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, I_PRED_4x4_HU, -1, -1, -1, -1}, - {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_VL, I_PRED_4x4_HU, -1}, + { + {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_VL, I_PRED_4x4_HU, -1, -1, -1, -1}, + {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_VL, I_PRED_4x4_HU, -1}, + }, + { + {I_PRED_4x4_DC_128, -1, -1, -1, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC_LEFT, I_PRED_4x4_H, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC_TOP, I_PRED_4x4_V, -1, -1, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_HU, -1, -1, -1, -1, -1, -1}, + {I_PRED_4x4_DC, I_PRED_4x4_H, I_PRED_4x4_V, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1}, + } }; -static inline const int8_t *predict_16x16_mode_available( int i_neighbour ) +static ALWAYS_INLINE const int8_t *predict_16x16_mode_available( int i_neighbour ) +{ + int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT); + idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT); + return i16x16_mode_available[idx]; +} + +static ALWAYS_INLINE const int8_t *predict_8x8chroma_mode_available( int i_neighbour ) { int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT); - return i16x16_mode_available[(idx&MB_TOPLEFT)?4:idx]; + idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT); + return i8x8chroma_mode_available[idx]; } -static inline const int8_t *predict_8x8chroma_mode_available( int i_neighbour ) +static ALWAYS_INLINE const int8_t *predict_8x8_mode_available( int force_intra, int i_neighbour, int i ) { + int avoid_topright = force_intra && (i&1); int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT); - return i8x8chroma_mode_available[(idx&MB_TOPLEFT)?4:idx]; + idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT); + return i4x4_mode_available[avoid_topright][idx]; } -static inline const int8_t *predict_4x4_mode_available( int i_neighbour ) +static ALWAYS_INLINE const int8_t *predict_4x4_mode_available( int force_intra, int i_neighbour, int i ) { + int avoid_topright = force_intra && ((i&5) == 5); int idx = i_neighbour & (MB_TOP|MB_LEFT|MB_TOPLEFT); - return i4x4_mode_available[(idx&MB_TOPLEFT)?4:idx]; + idx = (idx == (MB_TOP|MB_LEFT|MB_TOPLEFT)) ? 4 : idx & (MB_TOP|MB_LEFT); + return i4x4_mode_available[avoid_topright][idx]; } /* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */ static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct ) { - ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0}; + ALIGNED_16( static pixel zero[16*FDEC_STRIDE] ) = {0}; if( do_both_dct || h->mb.b_transform_8x8 ) h->dctf.sub16x16_dct8( h->mb.pic.fenc_dct8, h->mb.pic.p_fenc[0], zero ); @@ -573,47 +630,28 @@ static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct ) h->dctf.sub16x16_dct( h->mb.pic.fenc_dct4, h->mb.pic.p_fenc[0], zero ); } -/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */ -static inline void x264_mb_cache_fenc_satd( x264_t *h ) +/* Reset fenc satd scores cache for psy RD */ +static inline void x264_mb_init_fenc_cache( x264_t *h, int b_satd ) { - ALIGNED_16( static uint8_t zero[16] ) = {0}; - uint8_t *fenc; - int x, y, satd_sum = 0, sa8d_sum = 0; if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis ) x264_psy_trellis_init( h, h->param.analyse.b_transform_8x8 ); if( !h->mb.i_psy_rd ) return; - for( y = 0; y < 4; y++ ) - for( x = 0; x < 4; x++ ) - { - fenc = h->mb.pic.p_fenc[0]+x*4+y*4*FENC_STRIDE; - h->mb.pic.fenc_satd[y][x] = h->pixf.satd[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE ) - - (h->pixf.sad[PIXEL_4x4]( zero, 0, fenc, FENC_STRIDE )>>1); - satd_sum += h->mb.pic.fenc_satd[y][x]; - } - for( y = 0; y < 2; y++ ) - for( x = 0; x < 2; x++ ) - { - fenc = h->mb.pic.p_fenc[0]+x*8+y*8*FENC_STRIDE; - h->mb.pic.fenc_sa8d[y][x] = h->pixf.sa8d[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE ) - - (h->pixf.sad[PIXEL_8x8]( zero, 0, fenc, FENC_STRIDE )>>2); - sa8d_sum += h->mb.pic.fenc_sa8d[y][x]; - } - h->mb.pic.fenc_satd_sum = satd_sum; - h->mb.pic.fenc_sa8d_sum = sa8d_sum; + /* Writes beyond the end of the array, but not a problem since fenc_satd_cache is right after. */ + h->mc.memzero_aligned( h->mb.pic.fenc_hadamard_cache, sizeof(h->mb.pic.fenc_hadamard_cache) ); + if( b_satd ) + h->mc.memzero_aligned( h->mb.pic.fenc_satd_cache, sizeof(h->mb.pic.fenc_satd_cache) ); } static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a ) { - int b_merged_satd = !!h->pixf.intra_mbcmp_x3_8x8c && !h->mb.b_lossless; - if( a->i_satd_i8x8chroma < COST_MAX ) return; const int8_t *predict_mode = predict_8x8chroma_mode_available( h->mb.i_neighbour_intra ); /* 8x8 prediction selection for chroma */ - if( predict_mode[3] >= 0 && b_merged_satd ) + if( predict_mode[3] >= 0 && !h->mb.b_lossless ) { int satdu[4], satdv[4]; h->pixf.intra_mbcmp_x3_8x8c( h->mb.pic.p_fenc[1], h->mb.pic.p_fdec[1], satdu ); @@ -664,27 +702,49 @@ static void x264_mb_analyse_intra_chroma( x264_t *h, x264_mb_analysis_t *a ) static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_inter ) { const unsigned int flags = h->sh.i_type == SLICE_TYPE_I ? h->param.analyse.intra : h->param.analyse.inter; - uint8_t *p_src = h->mb.pic.p_fenc[0]; - uint8_t *p_dst = h->mb.pic.p_fdec[0]; - - int i, idx; - int b_merged_satd = !!h->pixf.intra_mbcmp_x3_16x16 && !h->mb.b_lossless; + pixel *p_src = h->mb.pic.p_fenc[0]; + pixel *p_dst = h->mb.pic.p_fdec[0]; + static const int8_t intra_analysis_shortcut[2][2][2][5] = + { + {{{I_PRED_4x4_HU, -1, -1, -1, -1}, + {I_PRED_4x4_DDL, I_PRED_4x4_VL, -1, -1, -1}}, + {{I_PRED_4x4_DDR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1}, + {I_PRED_4x4_DDL, I_PRED_4x4_DDR, I_PRED_4x4_VR, I_PRED_4x4_VL, -1}}}, + {{{I_PRED_4x4_HU, -1, -1, -1, -1}, + {-1, -1, -1, -1, -1}}, + {{I_PRED_4x4_DDR, I_PRED_4x4_HD, I_PRED_4x4_HU, -1, -1}, + {I_PRED_4x4_DDR, I_PRED_4x4_VR, -1, -1, -1}}}, + }; + + int idx; + int lambda = a->i_lambda; /*---------------- Try all mode and calculate their score ---------------*/ /* 16x16 prediction selection */ const int8_t *predict_mode = predict_16x16_mode_available( h->mb.i_neighbour_intra ); - if( b_merged_satd && predict_mode[3] >= 0 ) + /* Not heavily tuned */ + static const uint8_t i16x16_thresh_lut[11] = { 2, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4 }; + int i16x16_thresh = a->b_fast_intra ? (i16x16_thresh_lut[h->mb.i_subpel_refine]*i_satd_inter)>>1 : COST_MAX; + + if( !h->mb.b_lossless && predict_mode[3] >= 0 ) { h->pixf.intra_mbcmp_x3_16x16( p_src, p_dst, a->i_satd_i16x16_dir ); - h->predict_16x16[I_PRED_16x16_P]( p_dst ); - a->i_satd_i16x16_dir[I_PRED_16x16_P] = - h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ); - for( i=0; i<4; i++ ) + a->i_satd_i16x16_dir[0] += lambda * bs_size_ue(0); + a->i_satd_i16x16_dir[1] += lambda * bs_size_ue(1); + a->i_satd_i16x16_dir[2] += lambda * bs_size_ue(2); + COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[0], a->i_predict16x16, 0 ); + COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[1], a->i_predict16x16, 1 ); + COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[2], a->i_predict16x16, 2 ); + + /* Plane is expensive, so don't check it unless one of the previous modes was useful. */ + if( a->i_satd_i16x16 <= i16x16_thresh ) { - int cost = a->i_satd_i16x16_dir[i] += a->i_lambda * bs_size_ue(i); - COPY2_IF_LT( a->i_satd_i16x16, cost, a->i_predict16x16, i ); + h->predict_16x16[I_PRED_16x16_P]( p_dst ); + a->i_satd_i16x16_dir[I_PRED_16x16_P] = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ); + a->i_satd_i16x16_dir[I_PRED_16x16_P] += lambda * bs_size_ue(3); + COPY2_IF_LT( a->i_satd_i16x16, a->i_satd_i16x16_dir[I_PRED_16x16_P], a->i_predict16x16, 3 ); } } else @@ -700,7 +760,7 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ h->predict_16x16[i_mode]( p_dst ); i_satd = h->pixf.mbcmp[PIXEL_16x16]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) + - a->i_lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] ); + lambda * bs_size_ue( x264_mb_pred_mode16x16_fix[i_mode] ); COPY2_IF_LT( a->i_satd_i16x16, i_satd, a->i_predict16x16, i_mode ); a->i_satd_i16x16_dir[i_mode] = i_satd; } @@ -708,50 +768,59 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ if( h->sh.i_type == SLICE_TYPE_B ) /* cavlc mb type prefix */ - a->i_satd_i16x16 += a->i_lambda * i_mb_b_cost_table[I_16x16]; - if( a->b_fast_intra && a->i_satd_i16x16 > 2*i_satd_inter ) + a->i_satd_i16x16 += lambda * i_mb_b_cost_table[I_16x16]; + + if( a->i_satd_i16x16 > i16x16_thresh ) return; /* 8x8 prediction selection */ if( flags & X264_ANALYSE_I8x8 ) { - ALIGNED_ARRAY_16( uint8_t, edge,[33] ); + ALIGNED_ARRAY_16( pixel, edge,[33] ); x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8]; int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 ); - int i_cost = 0; - h->mb.i_cbp_luma = 0; - b_merged_satd = h->pixf.intra_mbcmp_x3_8x8 && !h->mb.b_lossless; // FIXME some bias like in i4x4? + int i_cost = lambda * 4; /* base predmode costs */ + h->mb.i_cbp_luma = 0; + if( h->sh.i_type == SLICE_TYPE_B ) - i_cost += a->i_lambda * i_mb_b_cost_table[I_8x8]; + i_cost += lambda * i_mb_b_cost_table[I_8x8]; for( idx = 0;; idx++ ) { int x = idx&1; int y = idx>>1; - uint8_t *p_src_by = p_src + 8*x + 8*y*FENC_STRIDE; - uint8_t *p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE; + pixel *p_src_by = p_src + 8*x + 8*y*FENC_STRIDE; + pixel *p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE; int i_best = COST_MAX; int i_pred_mode = x264_mb_predict_intra4x4_mode( h, 4*idx ); - predict_mode = predict_4x4_mode_available( h->mb.i_neighbour8[idx] ); + predict_mode = predict_8x8_mode_available( a->b_avoid_topright, h->mb.i_neighbour8[idx], idx ); h->predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); - if( b_merged_satd && predict_mode[8] >= 0 ) + if( !h->mb.b_lossless && predict_mode[5] >= 0 ) { int satd[9]; h->pixf.intra_mbcmp_x3_8x8( p_src_by, edge, satd ); - satd[i_pred_mode] -= 3 * a->i_lambda; - for( i=2; i>=0; i-- ) + int favor_vertical = satd[I_PRED_4x4_H] > satd[I_PRED_4x4_V]; + satd[i_pred_mode] -= 3 * lambda; + for( int i = 2; i >= 0; i-- ) { - int cost = a->i_satd_i8x8_dir[i][idx] = satd[i] + 4 * a->i_lambda; + int cost = satd[i]; + a->i_satd_i8x8_dir[i][idx] = cost + 4 * lambda; COPY2_IF_LT( i_best, cost, a->i_predict8x8[idx], i ); } - predict_mode += 3; + + /* Take analysis shortcuts: don't analyse modes that are too + * far away direction-wise from the favored mode. */ + if( a->i_mbrd < 1 + a->b_fast_intra ) + predict_mode = intra_analysis_shortcut[a->b_avoid_topright][predict_mode[8] >= 0][favor_vertical]; + else + predict_mode += 3; } - for( ; *predict_mode >= 0; predict_mode++ ) + for( ; *predict_mode >= 0 && (i_best >= 0 || a->i_mbrd >= 2); predict_mode++ ) { int i_satd; int i_mode = *predict_mode; @@ -761,14 +830,14 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ else h->predict_8x8[i_mode]( p_dst_by, edge ); - i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ) + a->i_lambda * 4; + i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ); if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ) - i_satd -= a->i_lambda * 3; + i_satd -= 3 * lambda; COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode ); - a->i_satd_i8x8_dir[i_mode][idx] = i_satd; + a->i_satd_i8x8_dir[i_mode][idx] = i_satd + 4 * lambda; } - i_cost += i_best; + i_cost += i_best + 3 * lambda; if( idx == 3 || i_cost > i_satd_thresh ) break; @@ -801,64 +870,83 @@ static void x264_mb_analyse_intra( x264_t *h, x264_mb_analysis_t *a, int i_satd_ a->i_satd_i8x8 = COST_MAX; i_cost = (i_cost * cost_div_fix8[idx]) >> 8; } - if( X264_MIN(i_cost, a->i_satd_i16x16) > i_satd_inter*(5+!!a->i_mbrd)/4 ) + /* Not heavily tuned */ + static const uint8_t i8x8_thresh[11] = { 4, 4, 4, 5, 5, 5, 6, 6, 6, 6, 6 }; + if( X264_MIN(i_cost, a->i_satd_i16x16) > (i_satd_inter*i8x8_thresh[h->mb.i_subpel_refine])>>2 ) return; } /* 4x4 prediction selection */ if( flags & X264_ANALYSE_I4x4 ) { - int i_cost; + int i_cost = lambda * (24+16); /* 24from JVT (SATD0), 16 from base predmode costs */ int i_satd_thresh = X264_MIN3( i_satd_inter, a->i_satd_i16x16, a->i_satd_i8x8 ); h->mb.i_cbp_luma = 0; - b_merged_satd = h->pixf.intra_mbcmp_x3_4x4 && !h->mb.b_lossless; + if( a->i_mbrd ) i_satd_thresh = i_satd_thresh * (10-a->b_fast_intra)/8; - i_cost = a->i_lambda * 24; /* from JVT (SATD0) */ if( h->sh.i_type == SLICE_TYPE_B ) - i_cost += a->i_lambda * i_mb_b_cost_table[I_4x4]; + i_cost += lambda * i_mb_b_cost_table[I_4x4]; for( idx = 0;; idx++ ) { - uint8_t *p_src_by = p_src + block_idx_xy_fenc[idx]; - uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx]; + pixel *p_src_by = p_src + block_idx_xy_fenc[idx]; + pixel *p_dst_by = p_dst + block_idx_xy_fdec[idx]; int i_best = COST_MAX; int i_pred_mode = x264_mb_predict_intra4x4_mode( h, idx ); - const int8_t *predict_mode = predict_4x4_mode_available( h->mb.i_neighbour4[idx] ); + predict_mode = predict_4x4_mode_available( a->b_avoid_topright, h->mb.i_neighbour4[idx], idx ); if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP ) /* emulate missing topright samples */ - M32( &p_dst_by[4 - FDEC_STRIDE] ) = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U; + MPIXEL_X4( &p_dst_by[4 - FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst_by[3 - FDEC_STRIDE] ); - if( b_merged_satd && predict_mode[5] >= 0 ) + if( !h->mb.b_lossless && predict_mode[5] >= 0 ) { int satd[9]; h->pixf.intra_mbcmp_x3_4x4( p_src_by, p_dst_by, satd ); - satd[i_pred_mode] -= 3 * a->i_lambda; - for( i=2; i>=0; i-- ) + int favor_vertical = satd[I_PRED_4x4_H] > satd[I_PRED_4x4_V]; + satd[i_pred_mode] -= 3 * lambda; + for( int i = 2; i >= 0; i-- ) COPY2_IF_LT( i_best, satd[i], a->i_predict4x4[idx], i ); - predict_mode += 3; + + /* Take analysis shortcuts: don't analyse modes that are too + * far away direction-wise from the favored mode. */ + if( a->i_mbrd < 1 + a->b_fast_intra ) + predict_mode = intra_analysis_shortcut[a->b_avoid_topright][predict_mode[8] >= 0][favor_vertical]; + else + predict_mode += 3; } - for( ; *predict_mode >= 0; predict_mode++ ) + if( i_best > 0 ) { - int i_satd; - int i_mode = *predict_mode; + for( ; *predict_mode >= 0; predict_mode++ ) + { + int i_satd; + int i_mode = *predict_mode; - if( h->mb.b_lossless ) - x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode ); - else - h->predict_4x4[i_mode]( p_dst_by ); + if( h->mb.b_lossless ) + x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode ); + else + h->predict_4x4[i_mode]( p_dst_by ); - i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ); - if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ) - i_satd -= a->i_lambda * 3; + i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ); + if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ) + { + i_satd -= lambda * 3; + if( i_satd <= 0 ) + { + i_best = i_satd; + a->i_predict4x4[idx] = i_mode; + break; + } + } - COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode ); + COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode ); + } } - i_cost += i_best + 4 * a->i_lambda; + i_cost += i_best + 3 * lambda; if( i_cost > i_satd_thresh || idx == 15 ) break; @@ -922,10 +1010,7 @@ static void x264_intra_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_thresh ) static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) { - uint8_t *p_dst = h->mb.pic.p_fdec[0]; - - int i, idx, x, y; - int i_mode, i_thresh; + pixel *p_dst = h->mb.pic.p_fdec[0]; uint64_t i_satd, i_best; h->mb.i_skip_intra = 0; @@ -933,7 +1018,7 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) { int old_pred_mode = a->i_predict16x16; const int8_t *predict_mode = predict_16x16_mode_available( h->mb.i_neighbour_intra ); - i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8; + int i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8; i_best = a->i_satd_i16x16; for( ; *predict_mode >= 0; predict_mode++ ) { @@ -952,11 +1037,11 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) { int8_t predict_mode_sorted[4]; int i_max; - i_thresh = a->i_satd_i8x8chroma * 5/4; + int i_thresh = a->i_satd_i8x8chroma * 5/4; for( i_max = 0; *predict_mode >= 0; predict_mode++ ) { - i_mode = *predict_mode; + int i_mode = *predict_mode; if( a->i_satd_i8x8chroma_dir[i_mode] < i_thresh && i_mode != a->i_predict8x8chroma ) predict_mode_sorted[i_max++] = i_mode; } @@ -969,9 +1054,9 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) * coefs for the current chroma mode are still around, so we only * have to recount the bits. */ i_best = x264_rd_cost_i8x8_chroma( h, i_chroma_lambda, a->i_predict8x8chroma, 0 ); - for( i = 0; i < i_max; i++ ) + for( int i = 0; i < i_max; i++ ) { - i_mode = predict_mode_sorted[i]; + int i_mode = predict_mode_sorted[i]; if( h->mb.b_lossless ) x264_predict_lossless_8x8_chroma( h, i_mode ); else @@ -992,22 +1077,22 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) if( h->mb.i_type == I_4x4 ) { - uint32_t pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning + pixel4 pels[4] = {0}; // doesn't need initting, just shuts up a gcc warning int i_nnz = 0; - for( idx = 0; idx < 16; idx++ ) + for( int idx = 0; idx < 16; idx++ ) { - uint8_t *p_dst_by = p_dst + block_idx_xy_fdec[idx]; + pixel *p_dst_by = p_dst + block_idx_xy_fdec[idx]; i_best = COST_MAX64; - const int8_t *predict_mode = predict_4x4_mode_available( h->mb.i_neighbour4[idx] ); + predict_mode = predict_4x4_mode_available( a->b_avoid_topright, h->mb.i_neighbour4[idx], idx ); if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP ) /* emulate missing topright samples */ - M32( &p_dst_by[4 - FDEC_STRIDE] ) = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U; + MPIXEL_X4( &p_dst_by[4 - FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst_by[3 - FDEC_STRIDE] ); for( ; *predict_mode >= 0; predict_mode++ ) { - i_mode = *predict_mode; + int i_mode = *predict_mode; if( h->mb.b_lossless ) x264_predict_lossless_4x4( h, p_dst_by, idx, i_mode ); else @@ -1018,18 +1103,18 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) { a->i_predict4x4[idx] = i_mode; i_best = i_satd; - pels[0] = M32( p_dst_by+0*FDEC_STRIDE ); - pels[1] = M32( p_dst_by+1*FDEC_STRIDE ); - pels[2] = M32( p_dst_by+2*FDEC_STRIDE ); - pels[3] = M32( p_dst_by+3*FDEC_STRIDE ); + pels[0] = MPIXEL_X4( p_dst_by+0*FDEC_STRIDE ); + pels[1] = MPIXEL_X4( p_dst_by+1*FDEC_STRIDE ); + pels[2] = MPIXEL_X4( p_dst_by+2*FDEC_STRIDE ); + pels[3] = MPIXEL_X4( p_dst_by+3*FDEC_STRIDE ); i_nnz = h->mb.cache.non_zero_count[x264_scan8[idx]]; } } - M32( p_dst_by+0*FDEC_STRIDE ) = pels[0]; - M32( p_dst_by+1*FDEC_STRIDE ) = pels[1]; - M32( p_dst_by+2*FDEC_STRIDE ) = pels[2]; - M32( p_dst_by+3*FDEC_STRIDE ) = pels[3]; + MPIXEL_X4( p_dst_by+0*FDEC_STRIDE ) = pels[0]; + MPIXEL_X4( p_dst_by+1*FDEC_STRIDE ) = pels[1]; + MPIXEL_X4( p_dst_by+2*FDEC_STRIDE ) = pels[2]; + MPIXEL_X4( p_dst_by+3*FDEC_STRIDE ) = pels[3]; h->mb.cache.non_zero_count[x264_scan8[idx]] = i_nnz; h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx]; @@ -1037,28 +1122,28 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) } else if( h->mb.i_type == I_8x8 ) { - ALIGNED_ARRAY_16( uint8_t, edge,[33] ); - for( idx = 0; idx < 4; idx++ ) + ALIGNED_ARRAY_16( pixel, edge,[33] ); + for( int idx = 0; idx < 4; idx++ ) { - uint64_t pels_h = 0; - uint8_t pels_v[7]; + pixel4 pels_h[2] = {0}; + pixel pels_v[7] = {0}; uint16_t i_nnz[2] = {0}; //shut up gcc - uint8_t *p_dst_by; - int j; + pixel *p_dst_by; int cbp_luma_new = 0; - i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8; + int i_thresh = a->i_satd_i8x8_dir[a->i_predict8x8[idx]][idx] * 11/8; i_best = COST_MAX64; - x = idx&1; - y = idx>>1; + int x = idx&1; + int y = idx>>1; + int s8 = X264_SCAN8_0 + 2*x + 16*y; p_dst_by = p_dst + 8*x + 8*y*FDEC_STRIDE; - const int8_t *predict_mode = predict_4x4_mode_available( h->mb.i_neighbour8[idx] ); + predict_mode = predict_8x8_mode_available( a->b_avoid_topright, h->mb.i_neighbour8[idx], idx ); h->predict_8x8_filter( p_dst_by, edge, h->mb.i_neighbour8[idx], ALL_NEIGHBORS ); for( ; *predict_mode >= 0; predict_mode++ ) { - i_mode = *predict_mode; + int i_mode = *predict_mode; if( a->i_satd_i8x8_dir[i_mode][idx] > i_thresh ) continue; @@ -1075,21 +1160,23 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) cbp_luma_new = h->mb.i_cbp_luma; i_best = i_satd; - pels_h = M64( p_dst_by+7*FDEC_STRIDE ); + pels_h[0] = MPIXEL_X4( p_dst_by+7*FDEC_STRIDE+0 ); + pels_h[1] = MPIXEL_X4( p_dst_by+7*FDEC_STRIDE+4 ); if( !(idx&1) ) - for( j=0; j<7; j++ ) + for( int j = 0; j < 7; j++ ) pels_v[j] = p_dst_by[7+j*FDEC_STRIDE]; - i_nnz[0] = M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] ); - i_nnz[1] = M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] ); + i_nnz[0] = M16( &h->mb.cache.non_zero_count[s8 + 0*8] ); + i_nnz[1] = M16( &h->mb.cache.non_zero_count[s8 + 1*8] ); } } a->i_cbp_i8x8_luma = cbp_luma_new; - M64( p_dst_by+7*FDEC_STRIDE ) = pels_h; + MPIXEL_X4( p_dst_by+7*FDEC_STRIDE+0 ) = pels_h[0]; + MPIXEL_X4( p_dst_by+7*FDEC_STRIDE+4 ) = pels_h[1]; if( !(idx&1) ) - for( j=0; j<7; j++ ) + for( int j = 0; j < 7; j++ ) p_dst_by[7+j*FDEC_STRIDE] = pels_v[j]; - M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] ) = i_nnz[0]; - M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] ) = i_nnz[1]; + M16( &h->mb.cache.non_zero_count[s8 + 0*8] ) = i_nnz[0]; + M16( &h->mb.cache.non_zero_count[s8 + 1*8] ) = i_nnz[1]; x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] ); } @@ -1109,8 +1196,7 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) (m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \ (m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \ (m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \ - (m)->p_fref[4] = &(src)[4][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \ - (m)->p_fref[5] = &(src)[5][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \ + (m)->p_fref[4] = &(src)[4][(xoff)+((yoff)>>1)*(m)->i_stride[1]]; \ (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]]; \ (m)->weight = weight_none; \ (m)->i_ref = ref; @@ -1125,7 +1211,7 @@ static void x264_intra_rd_refine( x264_t *h, x264_mb_analysis_t *a ) static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) { x264_me_t m; - int i_ref, i_mvc; + int i_mvc; ALIGNED_4( int16_t mvc[8][2] ); int i_halfpel_thresh = INT_MAX; int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL; @@ -1135,18 +1221,16 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 ); a->l0.me16x16.cost = INT_MAX; - for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ ) + for( int i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ ) { - const int i_ref_cost = REF_COST( 0, i_ref ); - i_halfpel_thresh -= i_ref_cost; - m.i_ref_cost = i_ref_cost; + m.i_ref_cost = REF_COST( 0, i_ref ); + i_halfpel_thresh -= m.i_ref_cost; /* search with ref */ LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 ); LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 0 ); x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp ); - x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); if( h->mb.ref_blind_dupe == i_ref ) { @@ -1154,12 +1238,19 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) x264_me_refine_qpel_refdupe( h, &m, p_halfpel_thresh ); } else + { + x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); + } + + /* save mv for predicting neighbors */ + CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv ); + CP32( a->l0.mvc[i_ref][0], m.mv ); /* early termination * SSD threshold would probably be better than SATD */ if( i_ref == 0 - && a->b_try_pskip + && a->b_try_skip && m.cost-m.cost_mv < 300*a->i_lambda && abs(m.mv[0]-h->mb.cache.pskip_mv[0]) + abs(m.mv[1]-h->mb.cache.pskip_mv[1]) <= 1 @@ -1167,28 +1258,24 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) { h->mb.i_type = P_SKIP; x264_analyse_update_cache( h, a ); - assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 || h->param.b_sliced_threads ); + assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); return; } - m.cost += i_ref_cost; - i_halfpel_thresh += i_ref_cost; + m.cost += m.i_ref_cost; + i_halfpel_thresh += m.i_ref_cost; if( m.cost < a->l0.me16x16.cost ) h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); - - /* save mv for predicting neighbors */ - CP32( a->l0.mvc[i_ref][0], m.mv ); - CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv ); } x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref ); - assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 || h->param.b_sliced_threads ); + assert( a->l0.me16x16.mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); h->mb.i_type = P_L0; if( a->i_mbrd ) { - x264_mb_cache_fenc_satd( h ); + x264_mb_init_fenc_cache( h, a->i_mbrd >= 2 || h->param.analyse.inter & X264_ANALYSE_PSUB8x8 ); if( a->l0.me16x16.i_ref == 0 && M32( a->l0.me16x16.mv ) == M32( h->mb.cache.pskip_mv ) && !a->b_force_intra ) { h->mb.i_partition = D_16x16; @@ -1203,11 +1290,7 @@ static void x264_mb_analyse_inter_p16x16( x264_t *h, x264_mb_analysis_t *a ) static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a ) { x264_me_t m; - int i_ref; - uint8_t **p_fenc = h->mb.pic.p_fenc; - int i_halfpel_thresh = INT_MAX; - int *p_halfpel_thresh = /*h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : */NULL; - int i; + pixel **p_fenc = h->mb.pic.p_fenc; int i_maxref = h->mb.pic.i_fref[0]-1; h->mb.i_partition = D_8x8; @@ -1222,7 +1305,7 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t /* early termination: if 16x16 chose ref 0, then evalute no refs older * than those used by the neighbors */ if( i_maxref > 0 && (a->l0.me16x16.i_ref == 0 || a->l0.me16x16.i_ref == h->mb.ref_blind_dupe) && - h->mb.i_mb_type_top && h->mb.i_mb_type_left ) + h->mb.i_mb_type_top > 0 && h->mb.i_mb_type_left > 0 ) { i_maxref = 0; CHECK_NEIGHBOUR( -8 - 1 ); @@ -1232,24 +1315,24 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t CHECK_NEIGHBOUR( 0 - 1 ); CHECK_NEIGHBOUR( 2*8 - 1 ); } + #undef CHECK_NEIGHBOUR - for( i_ref = 0; i_ref <= i_maxref; i_ref++ ) + for( int i_ref = 0; i_ref <= i_maxref; i_ref++ ) CP32( a->l0.mvc[i_ref][0], h->mb.mvr[0][i_ref][h->mb.i_mb_xy] ); - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { x264_me_t *l0m = &a->l0.me8x8[i]; - const int x8 = i%2; - const int y8 = i/2; + int x8 = i&1; + int y8 = i>>1; m.i_pixel = PIXEL_8x8; LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 ); l0m->cost = INT_MAX; - for( i_ref = 0; i_ref <= i_maxref || i_ref == h->mb.ref_blind_dupe; ) + for( int i_ref = 0; i_ref <= i_maxref || i_ref == h->mb.ref_blind_dupe; ) { - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; + m.i_ref_cost = REF_COST( 0, i_ref ); LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 ); LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 ); @@ -1259,13 +1342,13 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t if( h->mb.ref_blind_dupe == i_ref ) { CP32( m.mv, a->l0.mvc[0][i+1] ); - x264_me_refine_qpel_refdupe( h, &m, p_halfpel_thresh ); + x264_me_refine_qpel_refdupe( h, &m, NULL ); } else - x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh ); + x264_me_search( h, &m, a->l0.mvc[i_ref], i+1 ); + + m.cost += m.i_ref_cost; - m.cost += i_ref_cost; - i_halfpel_thresh += i_ref_cost; CP32( a->l0.mvc[i_ref][i+1], m.mv ); if( m.cost < l0m->cost ) @@ -1278,6 +1361,8 @@ static void x264_mb_analyse_inter_p8x8_mixed_ref( x264_t *h, x264_mb_analysis_t x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv ); x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref ); + a->i_satd8x8[0][i] = l0m->cost - ( l0m->cost_mv + l0m->i_ref_cost ); + /* If CABAC is on and we're not doing sub-8x8 analysis, the costs are effectively zero. */ if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) ) @@ -1301,10 +1386,9 @@ static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) * don't bother analysing the dupes. */ const int i_ref = h->mb.ref_blind_dupe == a->l0.me16x16.i_ref ? 0 : a->l0.me16x16.i_ref; const int i_ref_cost = h->param.b_cabac || i_ref ? REF_COST( 0, i_ref ) : 0; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fenc = h->mb.pic.p_fenc; int i_mvc; int16_t (*mvc)[2] = a->l0.mvc[i_ref]; - int i; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; @@ -1312,11 +1396,11 @@ static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) i_mvc = 1; CP32( mvc[0], a->l0.me16x16.mv ); - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { x264_me_t *m = &a->l0.me8x8[i]; - const int x8 = i%2; - const int y8 = i/2; + int x8 = i&1; + int y8 = i>>1; m->i_pixel = PIXEL_8x8; m->i_ref_cost = i_ref_cost; @@ -1333,6 +1417,8 @@ static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) CP32( mvc[i_mvc], m->mv ); i_mvc++; + a->i_satd8x8[0][i] = m->cost - m->cost_mv; + /* mb type cost */ m->cost += i_ref_cost; if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) ) @@ -1349,17 +1435,16 @@ static void x264_mb_analyse_inter_p8x8( x264_t *h, x264_mb_analysis_t *a ) h->mb.i_sub_partition[2] = h->mb.i_sub_partition[3] = D_L0_8x8; } -static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) +static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a, int i_best_satd ) { x264_me_t m; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fenc = h->mb.pic.p_fenc; ALIGNED_4( int16_t mvc[3][2] ); - int i, j; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_16x8; - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { x264_me_t *l0m = &a->l0.me16x8[i]; const int minref = X264_MIN( a->l0.me8x8[2*i].i_ref, a->l0.me8x8[2*i+1].i_ref ); @@ -1371,11 +1456,10 @@ static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) LOAD_FENC( &m, p_fenc, 0, 8*i ); l0m->cost = INT_MAX; - for( j = 0; j < i_ref8s; j++ ) + for( int j = 0; j < i_ref8s; j++ ) { const int i_ref = ref8[j]; - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; + m.i_ref_cost = REF_COST( 0, i_ref ); /* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */ CP32( mvc[0], a->l0.mvc[i_ref][0] ); @@ -1396,11 +1480,20 @@ static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) else x264_me_search( h, &m, mvc, 3 ); - m.cost += i_ref_cost; + m.cost += m.i_ref_cost; if( m.cost < l0m->cost ) h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) ); } + + /* Early termination based on the current SATD score of partition[0] + plus the estimated SATD score of partition[1] */ + if( !i && l0m->cost + a->i_cost_est16x8[1] > i_best_satd * (4 + !!a->i_mbrd) / 4 ) + { + a->l0.i_cost16x8 = COST_MAX; + return; + } + x264_macroblock_cache_mv_ptr( h, 0, 2*i, 4, 2, 0, l0m->mv ); x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, l0m->i_ref ); } @@ -1408,17 +1501,16 @@ static void x264_mb_analyse_inter_p16x8( x264_t *h, x264_mb_analysis_t *a ) a->l0.i_cost16x8 = a->l0.me16x8[0].cost + a->l0.me16x8[1].cost; } -static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) +static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a, int i_best_satd ) { x264_me_t m; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fenc = h->mb.pic.p_fenc; ALIGNED_4( int16_t mvc[3][2] ); - int i, j; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x16; - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { x264_me_t *l0m = &a->l0.me8x16[i]; const int minref = X264_MIN( a->l0.me8x8[i].i_ref, a->l0.me8x8[i+2].i_ref ); @@ -1430,11 +1522,10 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) LOAD_FENC( &m, p_fenc, 8*i, 0 ); l0m->cost = INT_MAX; - for( j = 0; j < i_ref8s; j++ ) + for( int j = 0; j < i_ref8s; j++ ) { const int i_ref = ref8[j]; - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; + m.i_ref_cost = REF_COST( 0, i_ref ); CP32( mvc[0], a->l0.mvc[i_ref][0] ); CP32( mvc[1], a->l0.mvc[i_ref][i+1] ); @@ -1454,11 +1545,20 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) else x264_me_search( h, &m, mvc, 3 ); - m.cost += i_ref_cost; + m.cost += m.i_ref_cost; if( m.cost < l0m->cost ) h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) ); } + + /* Early termination based on the current SATD score of partition[0] + plus the estimated SATD score of partition[1] */ + if( !i && l0m->cost + a->i_cost_est8x16[1] > i_best_satd * (4 + !!a->i_mbrd) / 4 ) + { + a->l0.i_cost8x16 = COST_MAX; + return; + } + x264_macroblock_cache_mv_ptr( h, 2*i, 0, 2, 4, 0, l0m->mv ); x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, l0m->i_ref ); } @@ -1466,27 +1566,27 @@ static void x264_mb_analyse_inter_p8x16( x264_t *h, x264_mb_analysis_t *a ) a->l0.i_cost8x16 = a->l0.me8x16[0].cost + a->l0.me8x16[1].cost; } -static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel ) +static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, pixel **p_fref, int i8x8, int size ) { - ALIGNED_ARRAY_8( uint8_t, pix1,[16*8] ); - uint8_t *pix2 = pix1+8; + ALIGNED_ARRAY_16( pixel, pix1,[16*8] ); + pixel *pix2 = pix1+8; const int i_stride = h->mb.pic.i_stride[1]; - const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride; + const int or = 8*(i8x8&1) + 2*(i8x8&2)*i_stride; const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE; const int i_ref = a->l0.me8x8[i8x8].i_ref; const int mvy_offset = h->mb.b_interlaced & i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; x264_weight_t *weight = h->sh.weight[i_ref]; + // FIXME weight can be done on 4x4 blocks even if mc is smaller #define CHROMA4x4MC( width, height, me, x, y ) \ - h->mc.mc_chroma( &pix1[x+y*16], 16, &p_fref[4][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1]+mvy_offset, width, height ); \ + h->mc.mc_chroma( &pix1[x+y*16], &pix2[x+y*16], 16, &p_fref[4][or+x*2+y*i_stride], i_stride, (me).mv[0], (me).mv[1]+mvy_offset, width, height ); \ if( weight[1].weightfn ) \ weight[1].weightfn[width>>2]( &pix1[x+y*16], 16, &pix1[x+y*16], 16, &weight[1], height ); \ - h->mc.mc_chroma( &pix2[x+y*16], 16, &p_fref[5][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1]+mvy_offset, width, height ); \ if( weight[2].weightfn ) \ - weight[1].weightfn[width>>2]( &pix2[x+y*16], 16, &pix2[x+y*16], 16, &weight[2], height ); + weight[2].weightfn[width>>2]( &pix2[x+y*16], 16, &pix2[x+y*16], 16, &weight[2], height ); - if( pixel == PIXEL_4x4 ) + if( size == PIXEL_4x4 ) { x264_me_t *m = a->l0.me4x4[i8x8]; CHROMA4x4MC( 2,2, m[0], 0,0 ); @@ -1494,7 +1594,7 @@ static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, CHROMA4x4MC( 2,2, m[2], 0,2 ); CHROMA4x4MC( 2,2, m[3], 2,2 ); } - else if( pixel == PIXEL_8x4 ) + else if( size == PIXEL_8x4 ) { x264_me_t *m = a->l0.me8x4[i8x8]; CHROMA4x4MC( 4,2, m[0], 0,0 ); @@ -1513,15 +1613,14 @@ static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 ) { - uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; + pixel **p_fenc = h->mb.pic.p_fenc; const int i_ref = a->l0.me8x8[i8x8].i_ref; - int i4x4; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; - for( i4x4 = 0; i4x4 < 4; i4x4++ ) + for( int i4x4 = 0; i4x4 < 4; i4x4++ ) { const int idx = 4*i8x8 + i4x4; const int x4 = block_idx_x[idx]; @@ -1553,15 +1652,14 @@ static void x264_mb_analyse_inter_p4x4( x264_t *h, x264_mb_analysis_t *a, int i8 static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8x8 ) { - uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; + pixel **p_fenc = h->mb.pic.p_fenc; const int i_ref = a->l0.me8x8[i8x8].i_ref; - int i8x4; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; - for( i8x4 = 0; i8x4 < 2; i8x4++ ) + for( int i8x4 = 0; i8x4 < 2; i8x4++ ) { const int idx = 4*i8x8 + 2*i8x4; const int x4 = block_idx_x[idx]; @@ -1590,15 +1688,14 @@ static void x264_mb_analyse_inter_p8x4( x264_t *h, x264_mb_analysis_t *a, int i8 static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8x8 ) { - uint8_t **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; - uint8_t **p_fenc = h->mb.pic.p_fenc; + pixel **p_fref = h->mb.pic.p_fref[0][a->l0.me8x8[i8x8].i_ref]; + pixel **p_fenc = h->mb.pic.p_fenc; const int i_ref = a->l0.me8x8[i8x8].i_ref; - int i4x8; /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; - for( i4x8 = 0; i4x8 < 2; i4x8++ ) + for( int i4x8 = 0; i4x8 < 2; i4x8++ ) { const int idx = 4*i8x8 + i4x8; const int x4 = block_idx_x[idx]; @@ -1625,117 +1722,236 @@ static void x264_mb_analyse_inter_p4x8( x264_t *h, x264_mb_analysis_t *a, int i8 a->l0.i_cost4x8[i8x8] += x264_mb_analyse_inter_p4x4_chroma( h, a, p_fref, i8x8, PIXEL_4x8 ); } +static ALWAYS_INLINE int x264_analyse_bi_chroma( x264_t *h, x264_mb_analysis_t *a, int idx, int i_pixel ) +{ + ALIGNED_ARRAY_16( pixel, pix, [4],[8*8] ); + ALIGNED_ARRAY_16( pixel, bi, [2],[8*8] ); + int l0_mvy_offset, l1_mvy_offset; + int i_chroma_cost = 0; + +#define COST_BI_CHROMA( m0, m1, width, height ) \ +{ \ + l0_mvy_offset = h->mb.b_interlaced & m0.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \ + l1_mvy_offset = h->mb.b_interlaced & m1.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; \ + h->mc.mc_chroma( pix[0], pix[1], 8, m0.p_fref[4], m0.i_stride[1], m0.mv[0], m0.mv[1] + l0_mvy_offset, width, height ); \ + h->mc.mc_chroma( pix[2], pix[3], 8, m1.p_fref[4], m1.i_stride[1], m1.mv[0], m1.mv[1] + l1_mvy_offset, width, height ); \ + h->mc.avg[i_pixel+3]( bi[0], 8, pix[0], 8, pix[2], 8, h->mb.bipred_weight[m0.i_ref][m1.i_ref] ); \ + h->mc.avg[i_pixel+3]( bi[1], 8, pix[1], 8, pix[3], 8, h->mb.bipred_weight[m0.i_ref][m1.i_ref] ); \ + i_chroma_cost = h->pixf.mbcmp[i_pixel+3]( m0.p_fenc[1], FENC_STRIDE, bi[0], 8 ); \ + i_chroma_cost += h->pixf.mbcmp[i_pixel+3]( m0.p_fenc[2], FENC_STRIDE, bi[1], 8 ); \ +} + + if( i_pixel == PIXEL_16x16 ) + COST_BI_CHROMA( a->l0.bi16x16, a->l1.bi16x16, 8, 8 ) + else if( i_pixel == PIXEL_16x8 ) + COST_BI_CHROMA( a->l0.me16x8[idx], a->l1.me16x8[idx], 8, 4 ) + else if( i_pixel == PIXEL_8x16 ) + COST_BI_CHROMA( a->l0.me8x16[idx], a->l1.me8x16[idx], 4, 8 ) + else + COST_BI_CHROMA( a->l0.me8x8[idx], a->l1.me8x8[idx], 4, 4 ) + + return i_chroma_cost; +} + static void x264_mb_analyse_inter_direct( x264_t *h, x264_mb_analysis_t *a ) { /* Assumes that fdec still contains the results of * x264_mb_predict_mv_direct16x16 and x264_mb_mc */ - uint8_t **p_fenc = h->mb.pic.p_fenc; - uint8_t **p_fdec = h->mb.pic.p_fdec; - int i; + pixel *p_fenc = h->mb.pic.p_fenc[0]; + pixel *p_fdec = h->mb.pic.p_fdec[0]; a->i_cost16x16direct = a->i_lambda * i_mb_b_cost_table[B_DIRECT]; - for( i = 0; i < 4; i++ ) - { - const int x = (i&1)*8; - const int y = (i>>1)*8; - a->i_cost16x16direct += - a->i_cost8x8direct[i] = - h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[0][x+y*FENC_STRIDE], FENC_STRIDE, &p_fdec[0][x+y*FDEC_STRIDE], FDEC_STRIDE ); + if( h->param.analyse.inter & X264_ANALYSE_BSUB16x16 ) + for( int i = 0; i < 4; i++ ) + { + const int x = (i&1)*8; + const int y = (i>>1)*8; + a->i_cost8x8direct[i] = h->pixf.mbcmp[PIXEL_8x8]( &p_fenc[x+y*FENC_STRIDE], FENC_STRIDE, + &p_fdec[x+y*FDEC_STRIDE], FDEC_STRIDE ); + if( h->mb.b_chroma_me ) + { + a->i_cost8x8direct[i] += h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][(x>>1)+(y>>1)*FENC_STRIDE], FENC_STRIDE, + &h->mb.pic.p_fdec[1][(x>>1)+(y>>1)*FDEC_STRIDE], FDEC_STRIDE ) + + h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[2][(x>>1)+(y>>1)*FENC_STRIDE], FENC_STRIDE, + &h->mb.pic.p_fdec[2][(x>>1)+(y>>1)*FDEC_STRIDE], FDEC_STRIDE ); + } + a->i_cost16x16direct += a->i_cost8x8direct[i]; - /* mb type cost */ - a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8]; + /* mb type cost */ + a->i_cost8x8direct[i] += a->i_lambda * i_sub_mb_b_cost_table[D_DIRECT_8x8]; + } + else + { + a->i_cost16x16direct += h->pixf.mbcmp[PIXEL_16x16]( p_fenc, FENC_STRIDE, p_fdec, FDEC_STRIDE ); + if( h->mb.b_chroma_me ) + { + a->i_cost16x16direct += h->pixf.mbcmp[PIXEL_8x8]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE ) + + h->pixf.mbcmp[PIXEL_8x8]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE ); + } } } static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) { - ALIGNED_ARRAY_16( uint8_t, pix0,[16*16] ); - ALIGNED_ARRAY_16( uint8_t, pix1,[16*16] ); - uint8_t *src0, *src1; + ALIGNED_ARRAY_16( pixel, pix0,[16*16] ); + ALIGNED_ARRAY_16( pixel, pix1,[16*16] ); + pixel *src0, *src1; int stride0 = 16, stride1 = 16; - - x264_me_t m; int i_ref, i_mvc; ALIGNED_4( int16_t mvc[9][2] ); - int i_halfpel_thresh = INT_MAX; - int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL; + int try_skip = a->b_try_skip; + int list1_skipped = 0; + int i_halfpel_thresh[2] = {INT_MAX, INT_MAX}; + int *p_halfpel_thresh[2] = {h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh[0] : NULL, + h->mb.pic.i_fref[1]>1 ? &i_halfpel_thresh[1] : NULL}; - /* 16x16 Search on all ref frame */ + x264_me_t m; m.i_pixel = PIXEL_16x16; - m.weight = weight_none; LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 ); - /* ME for List 0 */ + /* 16x16 Search on list 0 and list 1 */ a->l0.me16x16.cost = INT_MAX; - for( i_ref = 0; i_ref < h->mb.pic.i_fref[0]; i_ref++ ) - { - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; - /* search with ref */ - LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 ); - x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp ); - x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc ); - x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); + a->l1.me16x16.cost = INT_MAX; + for( int l = 1; l >= 0; ) + { + x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; + + /* This loop is extremely munged in order to facilitate the following order of operations, + * necessary for an efficient fast skip. + * 1. Search list1 ref0. + * 2. Search list0 ref0. + * 3. Try skip. + * 4. Search the rest of list0. + * 5. Go back and finish list1. + */ + for( i_ref = (list1_skipped && l == 1) ? 1 : 0; i_ref < h->mb.pic.i_fref[l]; i_ref++ ) + { + if( try_skip && l == 1 && i_ref > 0 ) + { + list1_skipped = 1; + break; + } - /* add ref cost */ - m.cost += i_ref_cost; + m.i_ref_cost = REF_COST( l, i_ref ); - if( m.cost < a->l0.me16x16.cost ) - { - a->l0.i_ref = i_ref; - h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) ); - } + /* search with ref */ + LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 0, 0 ); + x264_mb_predict_mv_16x16( h, l, i_ref, m.mvp ); + x264_mb_predict_mv_ref16x16( h, l, i_ref, mvc, &i_mvc ); + x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh[l] ); - /* save mv for predicting neighbors */ - CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv ); - } - a->l0.me16x16.i_ref = a->l0.i_ref; + /* add ref cost */ + m.cost += m.i_ref_cost; - /* ME for list 1 */ - i_halfpel_thresh = INT_MAX; - p_halfpel_thresh = h->mb.pic.i_fref[1]>1 ? &i_halfpel_thresh : NULL; - a->l1.me16x16.cost = INT_MAX; - for( i_ref = 0; i_ref < h->mb.pic.i_fref[1]; i_ref++ ) - { - const int i_ref_cost = REF_COST( 0, i_ref ); - m.i_ref_cost = i_ref_cost; - /* search with ref */ - LOAD_HPELS( &m, h->mb.pic.p_fref[1][i_ref], 1, i_ref, 0, 0 ); - x264_mb_predict_mv_16x16( h, 1, i_ref, m.mvp ); - x264_mb_predict_mv_ref16x16( h, 1, i_ref, mvc, &i_mvc ); - x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh ); + if( m.cost < lX->me16x16.cost ) + h->mc.memcpy_aligned( &lX->me16x16, &m, sizeof(x264_me_t) ); - /* add ref cost */ - m.cost += i_ref_cost; + /* save mv for predicting neighbors */ + CP32( lX->mvc[i_ref][0], m.mv ); + CP32( h->mb.mvr[l][i_ref][h->mb.i_mb_xy], m.mv ); - if( m.cost < a->l1.me16x16.cost ) - { - a->l1.i_ref = i_ref; - h->mc.memcpy_aligned( &a->l1.me16x16, &m, sizeof(x264_me_t) ); + /* Fast skip detection. */ + if( i_ref == 0 && try_skip ) + { + if( abs(lX->me16x16.mv[0]-h->mb.cache.direct_mv[l][0][0]) + + abs(lX->me16x16.mv[1]-h->mb.cache.direct_mv[l][0][1]) > 1 ) + { + try_skip = 0; + } + else if( !l ) + { + /* We already tested skip */ + h->mb.i_type = B_SKIP; + x264_analyse_update_cache( h, a ); + return; + } + } } - - /* save mv for predicting neighbors */ - CP32( h->mb.mvr[1][i_ref][h->mb.i_mb_xy], m.mv ); + if( list1_skipped && l == 1 && i_ref == h->mb.pic.i_fref[1] ) + break; + if( list1_skipped && l == 0 ) + l = 1; + else + l--; } - a->l1.me16x16.i_ref = a->l1.i_ref; /* get cost of BI mode */ + h->mc.memcpy_aligned( &a->l0.bi16x16, &a->l0.me16x16, sizeof(x264_me_t) ); + h->mc.memcpy_aligned( &a->l1.bi16x16, &a->l1.me16x16, sizeof(x264_me_t) ); + int ref_costs = REF_COST( 0, a->l0.bi16x16.i_ref ) + REF_COST( 1, a->l1.bi16x16.i_ref ); src0 = h->mc.get_ref( pix0, &stride0, - h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0], - a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16, weight_none ); + h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref], h->mb.pic.i_stride[0], + a->l0.bi16x16.mv[0], a->l0.bi16x16.mv[1], 16, 16, weight_none ); src1 = h->mc.get_ref( pix1, &stride1, - h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0], - a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16, weight_none ); + h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref], h->mb.pic.i_stride[0], + a->l1.bi16x16.mv[0], a->l1.bi16x16.mv[1], 16, 16, weight_none ); - h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); + h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] ); a->i_cost16x16bi = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 ) - + REF_COST( 0, a->l0.i_ref ) - + REF_COST( 1, a->l1.i_ref ) - + a->l0.me16x16.cost_mv - + a->l1.me16x16.cost_mv; + + ref_costs + + a->l0.bi16x16.cost_mv + + a->l1.bi16x16.cost_mv; + + if( h->mb.b_chroma_me ) + a->i_cost16x16bi += x264_analyse_bi_chroma( h, a, 0, PIXEL_16x16 ); + + /* Always try the 0,0,0,0 vector; helps avoid errant motion vectors in fades */ + if( M32( a->l0.bi16x16.mv ) | M32( a->l1.bi16x16.mv ) ) + { + int l0_mv_cost = a->l0.bi16x16.p_cost_mv[-a->l0.bi16x16.mvp[0]] + + a->l0.bi16x16.p_cost_mv[-a->l0.bi16x16.mvp[1]]; + int l1_mv_cost = a->l1.bi16x16.p_cost_mv[-a->l1.bi16x16.mvp[0]] + + a->l1.bi16x16.p_cost_mv[-a->l1.bi16x16.mvp[1]]; + h->mc.avg[PIXEL_16x16]( pix0, 16, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][0], h->mb.pic.i_stride[0], + h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][0], h->mb.pic.i_stride[0], + h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] ); + int cost00 = h->pixf.mbcmp[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, pix0, 16 ) + + ref_costs + l0_mv_cost + l1_mv_cost; + + if( h->mb.b_chroma_me ) + { + ALIGNED_ARRAY_16( pixel, pixuv, [2],[8*FENC_STRIDE] ); + ALIGNED_ARRAY_16( pixel, bi, [8*FENC_STRIDE] ); + + if( h->mb.b_interlaced & a->l0.bi16x16.i_ref ) + { + int l0_mvy_offset = h->mb.b_interlaced & a->l0.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; + h->mc.mc_chroma( pixuv[0], pixuv[0]+8, FENC_STRIDE, h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4], + h->mb.pic.i_stride[1], 0, 0 + l0_mvy_offset, 8, 8 ); + } + else + h->mc.load_deinterleave_8x8x2_fenc( pixuv[0], h->mb.pic.p_fref[0][a->l0.bi16x16.i_ref][4], h->mb.pic.i_stride[1] ); + + if( h->mb.b_interlaced & a->l1.bi16x16.i_ref ) + { + int l1_mvy_offset = h->mb.b_interlaced & a->l1.bi16x16.i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; + h->mc.mc_chroma( pixuv[1], pixuv[1]+8, FENC_STRIDE, h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4], + h->mb.pic.i_stride[1], 0, 0 + l1_mvy_offset, 8, 8 ); + } + else + h->mc.load_deinterleave_8x8x2_fenc( pixuv[1], h->mb.pic.p_fref[1][a->l1.bi16x16.i_ref][4], h->mb.pic.i_stride[1] ); + + h->mc.avg[PIXEL_8x8]( bi, FENC_STRIDE, pixuv[0], FENC_STRIDE, pixuv[1], FENC_STRIDE, + h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] ); + h->mc.avg[PIXEL_8x8]( bi+8, FENC_STRIDE, pixuv[0]+8, FENC_STRIDE, pixuv[1]+8, FENC_STRIDE, + h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref] ); + + cost00 += h->pixf.mbcmp[PIXEL_8x8]( h->mb.pic.p_fenc[1], FENC_STRIDE, bi, FENC_STRIDE ) + + h->pixf.mbcmp[PIXEL_8x8]( h->mb.pic.p_fenc[2], FENC_STRIDE, bi+8, FENC_STRIDE ); + } + + if( cost00 < a->i_cost16x16bi ) + { + M32( a->l0.bi16x16.mv ) = 0; + M32( a->l1.bi16x16.mv ) = 0; + a->l0.bi16x16.cost_mv = l0_mv_cost; + a->l1.bi16x16.cost_mv = l1_mv_cost; + a->i_cost16x16bi = cost00; + } + } /* mb type cost */ a->i_cost16x16bi += a->i_lambda * i_mb_b_cost_table[B_BI_BI]; @@ -1745,8 +1961,8 @@ static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a ) static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int i ) { - const int x = 2*(i%2); - const int y = 2*(i/2); + int x = 2*(i&1); + int y = i&2; switch( h->mb.i_sub_partition[i] ) { @@ -1775,8 +1991,8 @@ static inline void x264_mb_cache_mv_p8x8( x264_t *h, x264_mb_analysis_t *a, int static void x264_mb_load_mv_direct8x8( x264_t *h, int idx ) { - const int x = 2*(idx&1); - const int y = 2*(idx>>1); + int x = 2*(idx&1); + int y = idx&2; x264_macroblock_cache_ref( h, x, y, 2, 2, 0, h->mb.cache.direct_ref[0][idx] ); x264_macroblock_cache_ref( h, x, y, 2, 2, 1, h->mb.cache.direct_ref[1][idx] ); x264_macroblock_cache_mv_ptr( h, x, y, 2, 2, 0, h->mb.cache.direct_mv[0][idx] ); @@ -1786,7 +2002,7 @@ static void x264_mb_load_mv_direct8x8( x264_t *h, int idx ) #define CACHE_MV_BI(x,y,dx,dy,me0,me1,part) \ if( x264_mb_partition_listX_table[0][part] ) \ { \ - x264_macroblock_cache_ref( h, x,y,dx,dy, 0, a->l0.i_ref ); \ + x264_macroblock_cache_ref( h, x,y,dx,dy, 0, me0.i_ref ); \ x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 0, me0.mv ); \ } \ else \ @@ -1798,7 +2014,7 @@ static void x264_mb_load_mv_direct8x8( x264_t *h, int idx ) } \ if( x264_mb_partition_listX_table[1][part] ) \ { \ - x264_macroblock_cache_ref( h, x,y,dx,dy, 1, a->l1.i_ref ); \ + x264_macroblock_cache_ref( h, x,y,dx,dy, 1, me1.i_ref ); \ x264_macroblock_cache_mv_ptr( h, x,y,dx,dy, 1, me1.mv ); \ } \ else \ @@ -1811,8 +2027,8 @@ static void x264_mb_load_mv_direct8x8( x264_t *h, int idx ) static inline void x264_mb_cache_mv_b8x8( x264_t *h, x264_mb_analysis_t *a, int i, int b_mvd ) { - int x = (i%2)*2; - int y = (i/2)*2; + int x = 2*(i&1); + int y = i&2; if( h->mb.i_sub_partition[i] == D_DIRECT_8x8 ) { x264_mb_load_mv_direct8x8( h, i ); @@ -1838,58 +2054,180 @@ static inline void x264_mb_cache_mv_b8x16( x264_t *h, x264_mb_analysis_t *a, int } #undef CACHE_MV_BI +static void x264_mb_analyse_inter_b8x8_mixed_ref( x264_t *h, x264_mb_analysis_t *a ) +{ + ALIGNED_ARRAY_16( pixel, pix,[2],[8*8] ); + int i_maxref[2] = {h->mb.pic.i_fref[0]-1, h->mb.pic.i_fref[1]-1}; + + /* early termination: if 16x16 chose ref 0, then evalute no refs older + * than those used by the neighbors */ + #define CHECK_NEIGHBOUR(i)\ + {\ + int ref = h->mb.cache.ref[l][X264_SCAN8_0+i];\ + if( ref > i_maxref[l] )\ + i_maxref[l] = ref;\ + } + + for( int l = 0; l < 2; l++ ) + { + x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; + if( i_maxref[l] > 0 && lX->me16x16.i_ref == 0 && + h->mb.i_mb_type_top > 0 && h->mb.i_mb_type_left > 0 ) + { + i_maxref[l] = 0; + CHECK_NEIGHBOUR( -8 - 1 ); + CHECK_NEIGHBOUR( -8 + 0 ); + CHECK_NEIGHBOUR( -8 + 2 ); + CHECK_NEIGHBOUR( -8 + 4 ); + CHECK_NEIGHBOUR( 0 - 1 ); + CHECK_NEIGHBOUR( 2*8 - 1 ); + } + } + + /* XXX Needed for x264_mb_predict_mv */ + h->mb.i_partition = D_8x8; + + a->i_cost8x8bi = 0; + + for( int i = 0; i < 4; i++ ) + { + int x8 = i&1; + int y8 = i>>1; + int i_part_cost; + int i_part_cost_bi; + int stride[2] = {8,8}; + pixel *src[2]; + x264_me_t m; + m.i_pixel = PIXEL_8x8; + LOAD_FENC( &m, h->mb.pic.p_fenc, 8*x8, 8*y8 ); + + for( int l = 0; l < 2; l++ ) + { + x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; + + lX->me8x8[i].cost = INT_MAX; + for( int i_ref = 0; i_ref <= i_maxref[l]; i_ref++ ) + { + m.i_ref_cost = REF_COST( l, i_ref ); + + LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 8*x8, 8*y8 ); + + x264_macroblock_cache_ref( h, x8*2, y8*2, 2, 2, l, i_ref ); + x264_mb_predict_mv( h, l, 4*i, 2, m.mvp ); + x264_me_search( h, &m, lX->mvc[i_ref], i+1 ); + m.cost += m.i_ref_cost; + + if( m.cost < lX->me8x8[i].cost ) + { + h->mc.memcpy_aligned( &lX->me8x8[i], &m, sizeof(x264_me_t) ); + a->i_satd8x8[l][i] = m.cost - ( m.cost_mv + m.i_ref_cost ); + } + + /* save mv for predicting other partitions within this MB */ + CP32( lX->mvc[i_ref][i+1], m.mv ); + } + } + + /* BI mode */ + src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me8x8[i].p_fref, a->l0.me8x8[i].i_stride[0], + a->l0.me8x8[i].mv[0], a->l0.me8x8[i].mv[1], 8, 8, weight_none ); + src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me8x8[i].p_fref, a->l1.me8x8[i].i_stride[0], + a->l1.me8x8[i].mv[0], a->l1.me8x8[i].mv[1], 8, 8, weight_none ); + h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], + h->mb.bipred_weight[a->l0.me8x8[i].i_ref][a->l1.me8x8[i].i_ref] ); + + a->i_satd8x8[2][i] = h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ); + i_part_cost_bi = a->i_satd8x8[2][i] + a->l0.me8x8[i].cost_mv + a->l1.me8x8[i].cost_mv + + a->l0.me8x8[i].i_ref_cost + a->l1.me8x8[i].i_ref_cost + + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8]; + + if( h->mb.b_chroma_me ) + { + int i_chroma_cost = x264_analyse_bi_chroma( h, a, i, PIXEL_8x8 ); + i_part_cost_bi += i_chroma_cost; + a->i_satd8x8[2][i] += i_chroma_cost; + } + + a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8]; + a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8]; + + i_part_cost = a->l0.me8x8[i].cost; + h->mb.i_sub_partition[i] = D_L0_8x8; + COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 ); + COPY2_IF_LT( i_part_cost, i_part_cost_bi, h->mb.i_sub_partition[i], D_BI_8x8 ); + COPY2_IF_LT( i_part_cost, a->i_cost8x8direct[i], h->mb.i_sub_partition[i], D_DIRECT_8x8 ); + a->i_cost8x8bi += i_part_cost; + + /* XXX Needed for x264_mb_predict_mv */ + x264_mb_cache_mv_b8x8( h, a, i, 0 ); + } + + /* mb type cost */ + a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8]; +} + static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ) { - uint8_t **p_fref[2] = - { h->mb.pic.p_fref[0][a->l0.i_ref], - h->mb.pic.p_fref[1][a->l1.i_ref] }; - ALIGNED_ARRAY_8( uint8_t, pix,[2],[8*8] ); - int i, l; + pixel **p_fref[2] = + { h->mb.pic.p_fref[0][a->l0.me16x16.i_ref], + h->mb.pic.p_fref[1][a->l1.me16x16.i_ref] }; + ALIGNED_ARRAY_16( pixel, pix,[2],[8*8] ); /* XXX Needed for x264_mb_predict_mv */ h->mb.i_partition = D_8x8; a->i_cost8x8bi = 0; - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { - const int x8 = i%2; - const int y8 = i/2; + int x8 = i&1; + int y8 = i>>1; int i_part_cost; int i_part_cost_bi = 0; int stride[2] = {8,8}; - uint8_t *src[2]; + pixel *src[2]; - for( l = 0; l < 2; l++ ) + for( int l = 0; l < 2; l++ ) { x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; - const int i_ref_cost = REF_COST( l, lX->i_ref ); x264_me_t *m = &lX->me8x8[i]; - m->i_pixel = PIXEL_8x8; - m->i_ref_cost = i_ref_cost; - LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 ); - LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*x8, 8*y8 ); - x264_macroblock_cache_ref( h, x8*2, y8*2, 2, 2, l, lX->i_ref ); + m->i_ref_cost = REF_COST( l, lX->me16x16.i_ref ); + m->i_ref = lX->me16x16.i_ref; + + LOAD_HPELS( m, p_fref[l], l, lX->me16x16.i_ref, 8*x8, 8*y8 ); + + x264_macroblock_cache_ref( h, x8*2, y8*2, 2, 2, l, lX->me16x16.i_ref ); x264_mb_predict_mv( h, l, 4*i, 2, m->mvp ); x264_me_search( h, m, &lX->me16x16.mv, 1 ); - m->cost += i_ref_cost; + a->i_satd8x8[l][i] = m->cost - m->cost_mv; + m->cost += m->i_ref_cost; x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, l, m->mv ); + /* save mv for predicting other partitions within this MB */ + CP32( lX->mvc[lX->me16x16.i_ref][i+1], m->mv ); + /* BI mode */ src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], m->mv[0], m->mv[1], 8, 8, weight_none ); - i_part_cost_bi += m->cost_mv + i_ref_cost; + i_part_cost_bi += m->cost_mv + m->i_ref_cost; } - h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); - i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ) - + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8]; + h->mc.avg[PIXEL_8x8]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.me16x16.i_ref][a->l1.me16x16.i_ref] ); + a->i_satd8x8[2][i] = h->pixf.mbcmp[PIXEL_8x8]( a->l0.me8x8[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ); + i_part_cost_bi += a->i_satd8x8[2][i] + a->i_lambda * i_sub_mb_b_cost_table[D_BI_8x8]; a->l0.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L0_8x8]; a->l1.me8x8[i].cost += a->i_lambda * i_sub_mb_b_cost_table[D_L1_8x8]; + if( h->mb.b_chroma_me ) + { + int i_chroma_cost = x264_analyse_bi_chroma( h, a, i, PIXEL_8x8 ); + i_part_cost_bi += i_chroma_cost; + a->i_satd8x8[2][i] += i_chroma_cost; + } + i_part_cost = a->l0.me8x8[i].cost; h->mb.i_sub_partition[i] = D_L0_8x8; COPY2_IF_LT( i_part_cost, a->l1.me8x8[i].cost, h->mb.i_sub_partition[i], D_L1_8x8 ); @@ -1905,56 +2243,69 @@ static void x264_mb_analyse_inter_b8x8( x264_t *h, x264_mb_analysis_t *a ) a->i_cost8x8bi += a->i_lambda * i_mb_b_cost_table[B_8x8]; } -static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) +static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a, int i_best_satd ) { - uint8_t **p_fref[2] = - { h->mb.pic.p_fref[0][a->l0.i_ref], - h->mb.pic.p_fref[1][a->l1.i_ref] }; - ALIGNED_ARRAY_16( uint8_t, pix,[2],[16*8] ); - ALIGNED_4( int16_t mvc[2][2] ); - int i, l; + ALIGNED_ARRAY_16( pixel, pix,[2],[16*8] ); + ALIGNED_4( int16_t mvc[3][2] ); h->mb.i_partition = D_16x8; a->i_cost16x8bi = 0; - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { int i_part_cost; int i_part_cost_bi = 0; int stride[2] = {16,16}; - uint8_t *src[2]; + pixel *src[2]; + x264_me_t m; + m.i_pixel = PIXEL_16x8; + LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 8*i ); - /* TODO: check only the list(s) that were used in b8x8? */ - for( l = 0; l < 2; l++ ) + for( int l = 0; l < 2; l++ ) { x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; - const int i_ref_cost = REF_COST( l, lX->i_ref ); - x264_me_t *m = &lX->me16x8[i]; - - m->i_pixel = PIXEL_16x8; - m->i_ref_cost = i_ref_cost; + int ref8[2] = { lX->me8x8[2*i].i_ref, lX->me8x8[2*i+1].i_ref }; + int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2; + lX->me16x8[i].cost = INT_MAX; + for( int j = 0; j < i_ref8s; j++ ) + { + int i_ref = ref8[j]; + m.i_ref_cost = REF_COST( l, i_ref ); - LOAD_FENC( m, h->mb.pic.p_fenc, 0, 8*i ); - LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 0, 8*i ); + LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 0, 8*i ); - CP32( mvc[0], lX->me8x8[2*i].mv ); - CP32( mvc[1], lX->me8x8[2*i+1].mv ); + CP32( mvc[0], lX->mvc[i_ref][0] ); + CP32( mvc[1], lX->mvc[i_ref][2*i+1] ); + CP32( mvc[2], lX->mvc[i_ref][2*i+2] ); - x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, l, lX->i_ref ); - x264_mb_predict_mv( h, l, 8*i, 4, m->mvp ); - x264_me_search( h, m, mvc, 2 ); - m->cost += i_ref_cost; + x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, l, i_ref ); + x264_mb_predict_mv( h, l, 8*i, 4, m.mvp ); + x264_me_search( h, &m, mvc, 3 ); + m.cost += m.i_ref_cost; - /* BI mode */ - src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], - m->mv[0], m->mv[1], 16, 8, weight_none ); - i_part_cost_bi += m->cost_mv + i_ref_cost; + if( m.cost < lX->me16x8[i].cost ) + h->mc.memcpy_aligned( &lX->me16x8[i], &m, sizeof(x264_me_t) ); + } } - h->mc.avg[PIXEL_16x8]( pix[0], 16, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); - i_part_cost_bi += h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 ); + + /* BI mode */ + src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me16x8[i].p_fref, a->l0.me16x8[i].i_stride[0], + a->l0.me16x8[i].mv[0], a->l0.me16x8[i].mv[1], 16, 8, weight_none ); + src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me16x8[i].p_fref, a->l1.me16x8[i].i_stride[0], + a->l1.me16x8[i].mv[0], a->l1.me16x8[i].mv[1], 16, 8, weight_none ); + h->mc.avg[PIXEL_16x8]( pix[0], 16, src[0], stride[0], src[1], stride[1], + h->mb.bipred_weight[a->l0.me16x8[i].i_ref][a->l1.me16x8[i].i_ref] ); + + i_part_cost_bi = h->pixf.mbcmp[PIXEL_16x8]( a->l0.me16x8[i].p_fenc[0], FENC_STRIDE, pix[0], 16 ) + + a->l0.me16x8[i].cost_mv + a->l1.me16x8[i].cost_mv + a->l0.me16x8[i].i_ref_cost + + a->l1.me16x8[i].i_ref_cost; + + if( h->mb.b_chroma_me ) + i_part_cost_bi += x264_analyse_bi_chroma( h, a, i, PIXEL_16x8 ); i_part_cost = a->l0.me16x8[i].cost; a->i_mb_partition16x8[i] = D_L0_8x8; /* not actually 8x8, only the L0 matters */ + if( a->l1.me16x8[i].cost < i_part_cost ) { i_part_cost = a->l1.me16x8[i].cost; @@ -1967,6 +2318,15 @@ static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) } a->i_cost16x8bi += i_part_cost; + /* Early termination based on the current SATD score of partition[0] + plus the estimated SATD score of partition[1] */ + if( !i && i_part_cost + a->i_cost_est16x8[1] > i_best_satd + * (16 + (!!a->i_mbrd + !!h->mb.i_psy_rd))/16 ) + { + a->i_cost16x8bi = COST_MAX; + return; + } + x264_mb_cache_mv_b16x8( h, a, i, 0 ); } @@ -1977,56 +2337,68 @@ static void x264_mb_analyse_inter_b16x8( x264_t *h, x264_mb_analysis_t *a ) a->i_cost16x8bi += a->i_lambda * i_mb_b16x8_cost_table[a->i_mb_type16x8]; } -static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a ) +static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a, int i_best_satd ) { - uint8_t **p_fref[2] = - { h->mb.pic.p_fref[0][a->l0.i_ref], - h->mb.pic.p_fref[1][a->l1.i_ref] }; - ALIGNED_ARRAY_8( uint8_t, pix,[2],[8*16] ); - ALIGNED_4( int16_t mvc[2][2] ); - int i, l; + ALIGNED_ARRAY_16( pixel, pix,[2],[8*16] ); + ALIGNED_4( int16_t mvc[3][2] ); h->mb.i_partition = D_8x16; a->i_cost8x16bi = 0; - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { int i_part_cost; int i_part_cost_bi = 0; int stride[2] = {8,8}; - uint8_t *src[2]; + pixel *src[2]; + x264_me_t m; + m.i_pixel = PIXEL_8x16; + LOAD_FENC( &m, h->mb.pic.p_fenc, 8*i, 0 ); - for( l = 0; l < 2; l++ ) + for( int l = 0; l < 2; l++ ) { x264_mb_analysis_list_t *lX = l ? &a->l1 : &a->l0; - const int i_ref_cost = REF_COST( l, lX->i_ref ); - x264_me_t *m = &lX->me8x16[i]; - - m->i_pixel = PIXEL_8x16; - m->i_ref_cost = i_ref_cost; + int ref8[2] = { lX->me8x8[i].i_ref, lX->me8x8[i+2].i_ref }; + int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2; + lX->me8x16[i].cost = INT_MAX; + for( int j = 0; j < i_ref8s; j++ ) + { + int i_ref = ref8[j]; + m.i_ref_cost = REF_COST( l, i_ref ); - LOAD_FENC( m, h->mb.pic.p_fenc, 8*i, 0 ); - LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*i, 0 ); + LOAD_HPELS( &m, h->mb.pic.p_fref[l][i_ref], l, i_ref, 8*i, 0 ); - CP32( mvc[0], lX->me8x8[i].mv ); - CP32( mvc[1], lX->me8x8[i+2].mv ); + CP32( mvc[0], lX->mvc[i_ref][0] ); + CP32( mvc[1], lX->mvc[i_ref][i+1] ); + CP32( mvc[2], lX->mvc[i_ref][i+3] ); - x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, l, lX->i_ref ); - x264_mb_predict_mv( h, l, 4*i, 2, m->mvp ); - x264_me_search( h, m, mvc, 2 ); - m->cost += i_ref_cost; + x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, l, i_ref ); + x264_mb_predict_mv( h, l, 4*i, 2, m.mvp ); + x264_me_search( h, &m, mvc, 3 ); + m.cost += m.i_ref_cost; - /* BI mode */ - src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0], - m->mv[0], m->mv[1], 8, 16, weight_none ); - i_part_cost_bi += m->cost_mv + i_ref_cost; + if( m.cost < lX->me8x16[i].cost ) + h->mc.memcpy_aligned( &lX->me8x16[i], &m, sizeof(x264_me_t) ); + } } - h->mc.avg[PIXEL_8x16]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); - i_part_cost_bi += h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ); + /* BI mode */ + src[0] = h->mc.get_ref( pix[0], &stride[0], a->l0.me8x16[i].p_fref, a->l0.me8x16[i].i_stride[0], + a->l0.me8x16[i].mv[0], a->l0.me8x16[i].mv[1], 8, 16, weight_none ); + src[1] = h->mc.get_ref( pix[1], &stride[1], a->l1.me8x16[i].p_fref, a->l1.me8x16[i].i_stride[0], + a->l1.me8x16[i].mv[0], a->l1.me8x16[i].mv[1], 8, 16, weight_none ); + h->mc.avg[PIXEL_8x16]( pix[0], 8, src[0], stride[0], src[1], stride[1], h->mb.bipred_weight[a->l0.me8x16[i].i_ref][a->l1.me8x16[i].i_ref] ); + + i_part_cost_bi = h->pixf.mbcmp[PIXEL_8x16]( a->l0.me8x16[i].p_fenc[0], FENC_STRIDE, pix[0], 8 ) + + a->l0.me8x16[i].cost_mv + a->l1.me8x16[i].cost_mv + a->l0.me8x16[i].i_ref_cost + + a->l1.me8x16[i].i_ref_cost; + + if( h->mb.b_chroma_me ) + i_part_cost_bi += x264_analyse_bi_chroma( h, a, i, PIXEL_8x16 ); i_part_cost = a->l0.me8x16[i].cost; a->i_mb_partition8x16[i] = D_L0_8x8; + if( a->l1.me8x16[i].cost < i_part_cost ) { i_part_cost = a->l1.me8x16[i].cost; @@ -2039,6 +2411,15 @@ static void x264_mb_analyse_inter_b8x16( x264_t *h, x264_mb_analysis_t *a ) } a->i_cost8x16bi += i_part_cost; + /* Early termination based on the current SATD score of partition[0] + plus the estimated SATD score of partition[1] */ + if( !i && i_part_cost + a->i_cost_est8x16[1] > i_best_satd + * (16 + (!!a->i_mbrd + !!h->mb.i_psy_rd))/16 ) + { + a->i_cost8x16bi = COST_MAX; + return; + } + x264_mb_cache_mv_b8x16( h, a, i, 0 ); } @@ -2085,23 +2466,22 @@ static void x264_mb_analyse_p_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd ) h->mb.i_partition = D_8x8; if( h->param.analyse.inter & X264_ANALYSE_PSUB8x8 ) { - int i; x264_macroblock_cache_ref( h, 0, 0, 2, 2, 0, a->l0.me8x8[0].i_ref ); x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref ); x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref ); x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref ); /* FIXME: In the 8x8 blocks where RDO isn't run, the NNZ values used for context selection * for future blocks are those left over from previous RDO calls. */ - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { int costs[4] = {a->l0.i_cost4x4[i], a->l0.i_cost8x4[i], a->l0.i_cost4x8[i], a->l0.me8x8[i].cost}; - int thresh = X264_MIN4( costs[0], costs[1], costs[2], costs[3] ) * 5 / 4; + int sub8x8_thresh = X264_MIN4( costs[0], costs[1], costs[2], costs[3] ) * 5 / 4; int subtype, btype = D_L0_8x8; uint64_t bcost = COST_MAX64; for( subtype = D_L0_4x4; subtype <= D_L0_8x8; subtype++ ) { uint64_t cost; - if( costs[subtype] > thresh || (subtype == D_L0_8x8 && bcost == COST_MAX64) ) + if( costs[subtype] > sub8x8_thresh || (subtype == D_L0_8x8 && bcost == COST_MAX64) ) continue; h->mb.i_sub_partition[i] = subtype; x264_mb_cache_mv_p8x8( h, a, i ); @@ -2195,8 +2575,7 @@ static void x264_mb_analyse_b_rd( x264_t *h, x264_mb_analysis_t *a, int i_satd_i static void x264_refine_bidir( x264_t *h, x264_mb_analysis_t *a ) { - const int i_biweight = h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref]; - int i; + int i_biweight; if( IS_INTRA(h->mb.i_type) ) return; @@ -2205,22 +2584,34 @@ static void x264_refine_bidir( x264_t *h, x264_mb_analysis_t *a ) { case D_16x16: if( h->mb.i_type == B_BI_BI ) - x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight ); + { + i_biweight = h->mb.bipred_weight[a->l0.bi16x16.i_ref][a->l1.bi16x16.i_ref]; + x264_me_refine_bidir_satd( h, &a->l0.bi16x16, &a->l1.bi16x16, i_biweight ); + } break; case D_16x8: - for( i=0; i<2; i++ ) + for( int i = 0; i < 2; i++ ) if( a->i_mb_partition16x8[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[a->l0.me16x8[i].i_ref][a->l1.me16x8[i].i_ref]; x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight ); + } break; case D_8x16: - for( i=0; i<2; i++ ) + for( int i = 0; i < 2; i++ ) if( a->i_mb_partition8x16[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[a->l0.me8x16[i].i_ref][a->l1.me8x16[i].i_ref]; x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight ); + } break; case D_8x8: - for( i=0; i<4; i++ ) + for( int i = 0; i < 4; i++ ) if( h->mb.i_sub_partition[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[a->l0.me8x8[i].i_ref][a->l1.me8x8[i].i_ref]; x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight ); + } break; } } @@ -2229,13 +2620,12 @@ static inline void x264_mb_analyse_transform( x264_t *h ) { if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 && !h->mb.b_lossless ) { - int i_cost4, i_cost8; /* Only luma MC is really needed, but the full MC is re-used in macroblock_encode. */ x264_mb_mc( h ); - i_cost8 = h->pixf.sa8d[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, + int i_cost8 = h->pixf.sa8d[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, h->mb.pic.p_fdec[0], FDEC_STRIDE ); - i_cost4 = h->pixf.satd[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, + int i_cost4 = h->pixf.satd[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE, h->mb.pic.p_fdec[0], FDEC_STRIDE ); h->mb.b_transform_8x8 = i_cost8 < i_cost4; @@ -2247,11 +2637,10 @@ static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t * { if( x264_mb_transform_8x8_allowed( h ) && h->param.analyse.b_transform_8x8 ) { - int i_rd8; x264_analyse_update_cache( h, a ); h->mb.b_transform_8x8 ^= 1; /* FIXME only luma is needed, but the score for comparison already includes chroma */ - i_rd8 = x264_rd_cost_mb( h, a->i_lambda2 ); + int i_rd8 = x264_rd_cost_mb( h, a->i_lambda2 ); if( *i_rd >= i_rd8 ) { @@ -2273,13 +2662,14 @@ static inline void x264_mb_analyse_transform_rd( x264_t *h, x264_mb_analysis_t * * trick. */ static inline void x264_mb_analyse_qp_rd( x264_t *h, x264_mb_analysis_t *a ) { - int bcost, cost, direction, failures, prevcost, origcost; + int bcost, cost, failures, prevcost, origcost; int orig_qp = h->mb.i_qp, bqp = h->mb.i_qp; int last_qp_tried = 0; origcost = bcost = x264_rd_cost_mb( h, a->i_lambda2 ); + int origcbp = h->mb.cbp[h->mb.i_mb_xy]; /* If CBP is already zero, don't raise the quantizer any higher. */ - for( direction = h->mb.cbp[h->mb.i_mb_xy] ? 1 : -1; direction >= -1; direction-=2 ) + for( int direction = origcbp ? 1 : -1; direction >= -1; direction-=2 ) { /* Without psy-RD, require monotonicity when moving quant away from previous * macroblock's quant; allow 1 failure when moving quant towards previous quant. @@ -2294,14 +2684,47 @@ static inline void x264_mb_analyse_qp_rd( x264_t *h, x264_mb_analysis_t *a ) h->mb.i_qp = orig_qp; failures = 0; prevcost = origcost; + + /* If the current QP results in an empty CBP, it's highly likely that lower QPs + * (up to a point) will too. So, jump down to where the threshold will kick in + * and check the QP there. If the CBP is still empty, skip the main loop. + * If it isn't empty, we would have ended up having to check this QP anyways, + * so as long as we store it for later lookup, we lose nothing. */ + int already_checked_qp = -1; + int already_checked_cost = COST_MAX; + if( direction == -1 ) + { + if( !origcbp ) + { + h->mb.i_qp = X264_MAX( h->mb.i_qp - threshold - 1, h->param.rc.i_qp_min ); + h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; + already_checked_cost = x264_rd_cost_mb( h, a->i_lambda2 ); + if( !h->mb.cbp[h->mb.i_mb_xy] ) + { + /* If our empty-CBP block is lower QP than the last QP, + * the last QP almost surely doesn't have a CBP either. */ + if( h->mb.i_last_qp > h->mb.i_qp ) + last_qp_tried = 1; + break; + } + already_checked_qp = h->mb.i_qp; + h->mb.i_qp = orig_qp; + } + } + h->mb.i_qp += direction; - while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= h->param.rc.i_qp_max ) + while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= SPEC_QP( h->param.rc.i_qp_max ) ) { if( h->mb.i_last_qp == h->mb.i_qp ) last_qp_tried = 1; - h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; - cost = x264_rd_cost_mb( h, a->i_lambda2 ); - COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp ); + if( h->mb.i_qp == already_checked_qp ) + cost = already_checked_cost; + else + { + h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp]; + cost = x264_rd_cost_mb( h, a->i_lambda2 ); + COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp ); + } /* We can't assume that the costs are monotonic over QPs. * Tie case-as-failure seems to give better results. */ @@ -2349,17 +2772,12 @@ void x264_macroblock_analyse( x264_t *h ) { x264_mb_analysis_t analysis; int i_cost = COST_MAX; - int i; - h->mb.i_qp = x264_ratecontrol_qp( h ); - if( h->param.rc.i_aq_mode ) - { - x264_adaptive_quant( h ); - /* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB, - * to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */ - if( h->param.analyse.i_subpel_refine < 10 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 ) - h->mb.i_qp = h->mb.i_last_qp; - } + h->mb.i_qp = x264_ratecontrol_mb_qp( h ); + /* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB, + * to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */ + if( h->param.rc.i_aq_mode && h->param.analyse.i_subpel_refine < 10 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 ) + h->mb.i_qp = h->mb.i_last_qp; x264_mb_analyse_init( h, &analysis, h->mb.i_qp ); @@ -2368,7 +2786,7 @@ void x264_macroblock_analyse( x264_t *h ) { intra_analysis: if( analysis.i_mbrd ) - x264_mb_cache_fenc_satd( h ); + x264_mb_init_fenc_cache( h, analysis.i_mbrd >= 2 ); x264_mb_analyse_intra( h, &analysis, COST_MAX ); if( analysis.i_mbrd ) x264_intra_rd( h, &analysis, COST_MAX ); @@ -2389,7 +2807,7 @@ intra_analysis: h->mc.prefetch_ref( h->mb.pic.p_fref[0][0][h->mb.i_mb_x&3], h->mb.pic.i_stride[0], 0 ); - analysis.b_try_pskip = 0; + analysis.b_try_skip = 0; if( analysis.b_force_intra ) { if( !h->param.analyse.b_psy ) @@ -2403,11 +2821,11 @@ intra_analysis: /* Fast P_SKIP detection */ if( h->param.analyse.b_fast_pskip ) { - if( h->param.i_threads > 1 && !h->param.b_sliced_threads && h->mb.cache.pskip_mv[1] > h->mb.mv_max_spel[1] ) + if( h->i_thread_frames > 1 && h->mb.cache.pskip_mv[1] > h->mb.mv_max_spel[1] ) // FIXME don't need to check this if the reference frame is done {} else if( h->param.analyse.i_subpel_refine >= 3 ) - analysis.b_try_pskip = 1; + analysis.b_try_skip = 1; else if( h->mb.i_mb_type_left == P_SKIP || h->mb.i_mb_type_top == P_SKIP || h->mb.i_mb_type_topleft == P_SKIP || @@ -2422,7 +2840,10 @@ intra_analysis: { h->mb.i_type = P_SKIP; h->mb.i_partition = D_16x16; - assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->param.i_threads == 1 || h->param.b_sliced_threads ); + assert( h->mb.cache.pskip_mv[1] <= h->mb.mv_max_spel[1] || h->i_thread_frames == 1 ); + /* Set up MVs for future predictors */ + for( int i = 0; i < h->mb.pic.i_fref[0]; i++ ) + M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0; } else { @@ -2437,7 +2858,11 @@ intra_analysis: x264_mb_analyse_inter_p16x16( h, &analysis ); if( h->mb.i_type == P_SKIP ) + { + for( int i = 1; i < h->mb.pic.i_fref[0]; i++ ) + M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0; return; + } if( flags & X264_ANALYSE_PSUB16x16 ) { @@ -2462,7 +2887,7 @@ intra_analysis: /* Do sub 8x8 */ if( flags & X264_ANALYSE_PSUB8x8 ) { - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { x264_mb_analyse_inter_p4x4( h, &analysis, i ); if( analysis.l0.i_cost4x4[i] < analysis.l0.me8x8[i].cost ) @@ -2491,10 +2916,18 @@ intra_analysis: if( ( flags & X264_ANALYSE_PSUB16x16 ) && analysis.l0.i_cost8x8 < analysis.l0.me16x16.cost + i_thresh16x8 ) { - x264_mb_analyse_inter_p16x8( h, &analysis ); + int i_avg_mv_ref_cost = (analysis.l0.me8x8[2].cost_mv + analysis.l0.me8x8[2].i_ref_cost + + analysis.l0.me8x8[3].cost_mv + analysis.l0.me8x8[3].i_ref_cost + 1) >> 1; + analysis.i_cost_est16x8[1] = analysis.i_satd8x8[0][2] + analysis.i_satd8x8[0][3] + i_avg_mv_ref_cost; + + x264_mb_analyse_inter_p16x8( h, &analysis, i_cost ); COPY3_IF_LT( i_cost, analysis.l0.i_cost16x8, i_type, P_L0, i_partition, D_16x8 ); - x264_mb_analyse_inter_p8x16( h, &analysis ); + i_avg_mv_ref_cost = (analysis.l0.me8x8[1].cost_mv + analysis.l0.me8x8[1].i_ref_cost + + analysis.l0.me8x8[3].cost_mv + analysis.l0.me8x8[3].i_ref_cost + 1) >> 1; + analysis.i_cost_est8x16[1] = analysis.i_satd8x8[0][1] + analysis.i_satd8x8[0][3] + i_avg_mv_ref_cost; + + x264_mb_analyse_inter_p8x16( h, &analysis, i_cost ); COPY3_IF_LT( i_cost, analysis.l0.i_cost8x16, i_type, P_L0, i_partition, D_8x16 ); } @@ -2525,9 +2958,8 @@ intra_analysis: } else if( i_partition == D_8x8 ) { - int i8x8; i_cost = 0; - for( i8x8 = 0; i8x8 < 4; i8x8++ ) + for( int i8x8 = 0; i8x8 < 4; i8x8++ ) { switch( h->mb.i_sub_partition[i8x8] ) { @@ -2649,9 +3081,8 @@ intra_analysis: } else if( i_partition == D_8x8 ) { - int i8x8; x264_analyse_update_cache( h, &analysis ); - for( i8x8 = 0; i8x8 < 4; i8x8++ ) + for( int i8x8 = 0; i8x8 < 4; i8x8++ ) { if( h->mb.i_sub_partition[i8x8] == D_L0_8x8 ) { @@ -2685,13 +3116,13 @@ intra_analysis: int b_skip = 0; if( analysis.i_mbrd ) - x264_mb_cache_fenc_satd( h ); + x264_mb_init_fenc_cache( h, analysis.i_mbrd >= 2 ); h->mb.i_type = B_SKIP; if( h->mb.b_direct_auto_write ) { /* direct=auto heuristic: prefer whichever mode allows more Skip macroblocks */ - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { int b_changed = 1; h->sh.b_direct_spatial_mv_pred ^= 1; @@ -2712,6 +3143,7 @@ intra_analysis: else analysis.b_direct_available = x264_mb_predict_mv_direct16x16( h, NULL ); + analysis.b_try_skip = 0; if( analysis.b_direct_available ) { if( !h->mb.b_direct_auto_write ) @@ -2726,7 +3158,17 @@ intra_analysis: { /* Conditioning the probe on neighboring block types * doesn't seem to help speed or quality. */ - b_skip = x264_macroblock_probe_bskip( h ); + analysis.b_try_skip = x264_macroblock_probe_bskip( h ); + if( h->param.analyse.i_subpel_refine < 3 ) + b_skip = analysis.b_try_skip; + } + /* Set up MVs for future predictors */ + if( b_skip ) + { + for( int i = 0; i < h->mb.pic.i_fref[0]; i++ ) + M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0; + for( int i = 0; i < h->mb.pic.i_fref[1]; i++ ) + M32( h->mb.mvr[1][i][h->mb.i_mb_xy] ) = 0; } } @@ -2737,6 +3179,7 @@ intra_analysis: int i_partition; int i_satd_inter; h->mb.b_skip_mc = 0; + h->mb.i_type = B_DIRECT; x264_mb_analyse_load_costs( h, &analysis ); @@ -2747,6 +3190,15 @@ intra_analysis: x264_mb_analyse_inter_b16x16( h, &analysis ); + if( h->mb.i_type == B_SKIP ) + { + for( int i = 1; i < h->mb.pic.i_fref[0]; i++ ) + M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0; + for( int i = 1; i < h->mb.pic.i_fref[1]; i++ ) + M32( h->mb.mvr[0][i][h->mb.i_mb_xy] ) = 0; + return; + } + i_type = B_L0_L0; i_partition = D_16x16; i_cost = analysis.l0.me16x16.cost; @@ -2770,29 +3222,71 @@ intra_analysis: if( flags & X264_ANALYSE_BSUB16x16 ) { - x264_mb_analyse_inter_b8x8( h, &analysis ); - if( analysis.i_cost8x8bi < i_cost ) - { - i_type = B_8x8; - i_partition = D_8x8; - i_cost = analysis.i_cost8x8bi; + if( h->param.analyse.b_mixed_references ) + x264_mb_analyse_inter_b8x8_mixed_ref( h, &analysis ); + else + x264_mb_analyse_inter_b8x8( h, &analysis ); - if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[1] || - h->mb.i_sub_partition[2] == h->mb.i_sub_partition[3] ) - { - x264_mb_analyse_inter_b16x8( h, &analysis ); - COPY3_IF_LT( i_cost, analysis.i_cost16x8bi, - i_type, analysis.i_mb_type16x8, - i_partition, D_16x8 ); - } - if( h->mb.i_sub_partition[0] == h->mb.i_sub_partition[2] || - h->mb.i_sub_partition[1] == h->mb.i_sub_partition[3] ) - { - x264_mb_analyse_inter_b8x16( h, &analysis ); - COPY3_IF_LT( i_cost, analysis.i_cost8x16bi, - i_type, analysis.i_mb_type8x16, - i_partition, D_8x16 ); - } + COPY3_IF_LT( i_cost, analysis.i_cost8x8bi, i_type, B_8x8, i_partition, D_8x8 ); + + /* Try to estimate the cost of b16x8/b8x16 based on the satd scores of the b8x8 modes */ + int i_cost_est16x8bi_total = 0, i_cost_est8x16bi_total = 0; + int i_mb_type, i_partition16x8[2], i_partition8x16[2]; + for( int i = 0; i < 2; i++ ) + { + int avg_l0_mv_ref_cost, avg_l1_mv_ref_cost; + int i_l0_satd, i_l1_satd, i_bi_satd, i_best_cost; + // 16x8 + i_best_cost = COST_MAX; + i_l0_satd = analysis.i_satd8x8[0][i*2] + analysis.i_satd8x8[0][i*2+1]; + i_l1_satd = analysis.i_satd8x8[1][i*2] + analysis.i_satd8x8[1][i*2+1]; + i_bi_satd = analysis.i_satd8x8[2][i*2] + analysis.i_satd8x8[2][i*2+1]; + avg_l0_mv_ref_cost = ( analysis.l0.me8x8[i*2].cost_mv + analysis.l0.me8x8[i*2].i_ref_cost + + analysis.l0.me8x8[i*2+1].cost_mv + analysis.l0.me8x8[i*2+1].i_ref_cost + 1 ) >> 1; + avg_l1_mv_ref_cost = ( analysis.l1.me8x8[i*2].cost_mv + analysis.l1.me8x8[i*2].i_ref_cost + + analysis.l1.me8x8[i*2+1].cost_mv + analysis.l1.me8x8[i*2+1].i_ref_cost + 1 ) >> 1; + COPY2_IF_LT( i_best_cost, i_l0_satd + avg_l0_mv_ref_cost, i_partition16x8[i], D_L0_8x8 ); + COPY2_IF_LT( i_best_cost, i_l1_satd + avg_l1_mv_ref_cost, i_partition16x8[i], D_L1_8x8 ); + COPY2_IF_LT( i_best_cost, i_bi_satd + avg_l0_mv_ref_cost + avg_l1_mv_ref_cost, i_partition16x8[i], D_BI_8x8 ); + analysis.i_cost_est16x8[i] = i_best_cost; + + // 8x16 + i_best_cost = COST_MAX; + i_l0_satd = analysis.i_satd8x8[0][i] + analysis.i_satd8x8[0][i+2]; + i_l1_satd = analysis.i_satd8x8[1][i] + analysis.i_satd8x8[1][i+2]; + i_bi_satd = analysis.i_satd8x8[2][i] + analysis.i_satd8x8[2][i+2]; + avg_l0_mv_ref_cost = ( analysis.l0.me8x8[i].cost_mv + analysis.l0.me8x8[i].i_ref_cost + + analysis.l0.me8x8[i+2].cost_mv + analysis.l0.me8x8[i+2].i_ref_cost + 1 ) >> 1; + avg_l1_mv_ref_cost = ( analysis.l1.me8x8[i].cost_mv + analysis.l1.me8x8[i].i_ref_cost + + analysis.l1.me8x8[i+2].cost_mv + analysis.l1.me8x8[i+2].i_ref_cost + 1 ) >> 1; + COPY2_IF_LT( i_best_cost, i_l0_satd + avg_l0_mv_ref_cost, i_partition8x16[i], D_L0_8x8 ); + COPY2_IF_LT( i_best_cost, i_l1_satd + avg_l1_mv_ref_cost, i_partition8x16[i], D_L1_8x8 ); + COPY2_IF_LT( i_best_cost, i_bi_satd + avg_l0_mv_ref_cost + avg_l1_mv_ref_cost, i_partition8x16[i], D_BI_8x8 ); + analysis.i_cost_est8x16[i] = i_best_cost; + } + i_mb_type = B_L0_L0 + (i_partition16x8[0]>>2) * 3 + (i_partition16x8[1]>>2); + analysis.i_cost_est16x8[1] += analysis.i_lambda * i_mb_b16x8_cost_table[i_mb_type]; + i_cost_est16x8bi_total = analysis.i_cost_est16x8[0] + analysis.i_cost_est16x8[1]; + i_mb_type = B_L0_L0 + (i_partition8x16[0]>>2) * 3 + (i_partition8x16[1]>>2); + analysis.i_cost_est8x16[1] += analysis.i_lambda * i_mb_b16x8_cost_table[i_mb_type]; + i_cost_est8x16bi_total = analysis.i_cost_est8x16[0] + analysis.i_cost_est8x16[1]; + + /* We can gain a little speed by checking the mode with the lowest estimated cost first */ + int try_16x8_first = i_cost_est16x8bi_total < i_cost_est8x16bi_total; + if( try_16x8_first && i_cost_est16x8bi_total < i_cost ) + { + x264_mb_analyse_inter_b16x8( h, &analysis, i_cost ); + COPY3_IF_LT( i_cost, analysis.i_cost16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 ); + } + if( i_cost_est8x16bi_total < i_cost ) + { + x264_mb_analyse_inter_b8x16( h, &analysis, i_cost ); + COPY3_IF_LT( i_cost, analysis.i_cost8x16bi, i_type, analysis.i_mb_type8x16, i_partition, D_8x16 ); + } + if( !try_16x8_first && i_cost_est16x8bi_total < i_cost ) + { + x264_mb_analyse_inter_b16x8( h, &analysis, i_cost ); + COPY3_IF_LT( i_cost, analysis.i_cost16x8bi, i_type, analysis.i_mb_type16x8, i_partition, D_16x8 ); } } @@ -2819,13 +3313,13 @@ intra_analysis: } else if( i_type == B_BI_BI ) { - x264_me_refine_qpel( h, &analysis.l0.me16x16 ); - x264_me_refine_qpel( h, &analysis.l1.me16x16 ); + x264_me_refine_qpel( h, &analysis.l0.bi16x16 ); + x264_me_refine_qpel( h, &analysis.l1.bi16x16 ); } } else if( i_partition == D_16x8 ) { - for( i=0; i<2; i++ ) + for( int i = 0; i < 2; i++ ) { if( analysis.i_mb_partition16x8[i] != D_L1_8x8 ) x264_me_refine_qpel( h, &analysis.l0.me16x8[i] ); @@ -2835,7 +3329,7 @@ intra_analysis: } else if( i_partition == D_8x16 ) { - for( i=0; i<2; i++ ) + for( int i = 0; i < 2; i++ ) { if( analysis.i_mb_partition8x16[i] != D_L1_8x8 ) x264_me_refine_qpel( h, &analysis.l0.me8x16[i] ); @@ -2845,7 +3339,7 @@ intra_analysis: } else if( i_partition == D_8x8 ) { - for( i=0; i<4; i++ ) + for( int i = 0; i < 4; i++ ) { x264_me_t *m; int i_part_cost_old; @@ -2899,7 +3393,16 @@ intra_analysis: h->mb.i_partition = i_partition; } - x264_mb_analyse_intra( h, &analysis, i_satd_inter ); + if( h->mb.b_chroma_me ) + { + x264_mb_analyse_intra_chroma( h, &analysis ); + x264_mb_analyse_intra( h, &analysis, i_satd_inter - analysis.i_satd_i8x8chroma ); + analysis.i_satd_i16x16 += analysis.i_satd_i8x8chroma; + analysis.i_satd_i8x8 += analysis.i_satd_i8x8chroma; + analysis.i_satd_i4x4 += analysis.i_satd_i8x8chroma; + } + else + x264_mb_analyse_intra( h, &analysis, i_satd_inter ); if( analysis.i_mbrd ) { @@ -2922,7 +3425,7 @@ intra_analysis: if( analysis.i_mbrd >= 2 && i_type > B_DIRECT && i_type < B_SKIP ) { - const int i_biweight = h->mb.bipred_weight[analysis.l0.i_ref][analysis.l1.i_ref]; + int i_biweight; x264_analyse_update_cache( h, &analysis ); if( i_partition == D_16x16 ) @@ -2938,11 +3441,14 @@ intra_analysis: x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 ); } else if( i_type == B_BI_BI ) - x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 ); + { + i_biweight = h->mb.bipred_weight[analysis.l0.bi16x16.i_ref][analysis.l1.bi16x16.i_ref]; + x264_me_refine_bidir_rd( h, &analysis.l0.bi16x16, &analysis.l1.bi16x16, i_biweight, 0, analysis.i_lambda2 ); + } } else if( i_partition == D_16x8 ) { - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { h->mb.i_sub_partition[i*2] = h->mb.i_sub_partition[i*2+1] = analysis.i_mb_partition16x8[i]; if( analysis.i_mb_partition16x8[i] == D_L0_8x8 ) @@ -2950,12 +3456,15 @@ intra_analysis: else if( analysis.i_mb_partition16x8[i] == D_L1_8x8 ) x264_me_refine_qpel_rd( h, &analysis.l1.me16x8[i], analysis.i_lambda2, i*8, 1 ); else if( analysis.i_mb_partition16x8[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[analysis.l0.me16x8[i].i_ref][analysis.l1.me16x8[i].i_ref]; x264_me_refine_bidir_rd( h, &analysis.l0.me16x8[i], &analysis.l1.me16x8[i], i_biweight, i*2, analysis.i_lambda2 ); + } } } else if( i_partition == D_8x16 ) { - for( i = 0; i < 2; i++ ) + for( int i = 0; i < 2; i++ ) { h->mb.i_sub_partition[i] = h->mb.i_sub_partition[i+2] = analysis.i_mb_partition8x16[i]; if( analysis.i_mb_partition8x16[i] == D_L0_8x8 ) @@ -2963,19 +3472,25 @@ intra_analysis: else if( analysis.i_mb_partition8x16[i] == D_L1_8x8 ) x264_me_refine_qpel_rd( h, &analysis.l1.me8x16[i], analysis.i_lambda2, i*4, 1 ); else if( analysis.i_mb_partition8x16[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[analysis.l0.me8x16[i].i_ref][analysis.l1.me8x16[i].i_ref]; x264_me_refine_bidir_rd( h, &analysis.l0.me8x16[i], &analysis.l1.me8x16[i], i_biweight, i, analysis.i_lambda2 ); + } } } else if( i_partition == D_8x8 ) { - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) { if( h->mb.i_sub_partition[i] == D_L0_8x8 ) x264_me_refine_qpel_rd( h, &analysis.l0.me8x8[i], analysis.i_lambda2, i*4, 0 ); else if( h->mb.i_sub_partition[i] == D_L1_8x8 ) x264_me_refine_qpel_rd( h, &analysis.l1.me8x8[i], analysis.i_lambda2, i*4, 1 ); else if( h->mb.i_sub_partition[i] == D_BI_8x8 ) + { + i_biweight = h->mb.bipred_weight[analysis.l0.me8x8[i].i_ref][analysis.l1.me8x8[i].i_ref]; x264_me_refine_bidir_rd( h, &analysis.l0.me8x8[i], &analysis.l1.me8x8[i], i_biweight, i, analysis.i_lambda2 ); + } } } } @@ -3004,7 +3519,8 @@ intra_analysis: x264_mb_analyse_qp_rd( h, &analysis ); h->mb.b_trellis = h->param.analyse.i_trellis; - h->mb.b_noise_reduction = !!h->param.analyse.i_noise_reduction; + h->mb.b_noise_reduction = h->mb.b_noise_reduction || (!!h->param.analyse.i_noise_reduction && !IS_INTRA( h->mb.i_type )); + if( !IS_SKIP(h->mb.i_type) && h->mb.i_psy_trellis && h->param.analyse.i_trellis == 1 ) x264_psy_trellis_init( h, 0 ); if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction ) @@ -3014,18 +3530,16 @@ intra_analysis: /*-------------------- Update MB from the analysis ----------------------*/ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) { - int i; - switch( h->mb.i_type ) { case I_4x4: - for( i = 0; i < 16; i++ ) + for( int i = 0; i < 16; i++ ) h->mb.cache.intra4x4_pred_mode[x264_scan8[i]] = a->i_predict4x4[i]; x264_mb_analyse_intra_chroma( h, a ); break; case I_8x8: - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) x264_macroblock_cache_intra8x8_pred( h, 2*(i&1), 2*(i>>1), a->i_predict8x8[i] ); x264_mb_analyse_intra_chroma( h, a ); @@ -3071,7 +3585,7 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) x264_macroblock_cache_ref( h, 2, 0, 2, 2, 0, a->l0.me8x8[1].i_ref ); x264_macroblock_cache_ref( h, 0, 2, 2, 2, 0, a->l0.me8x8[2].i_ref ); x264_macroblock_cache_ref( h, 2, 2, 2, 2, 0, a->l0.me8x8[3].i_ref ); - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) x264_mb_cache_mv_p8x8( h, a, i ); break; @@ -3085,6 +3599,7 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) case B_SKIP: case B_DIRECT: + h->mb.i_partition = h->mb.cache.direct_partition; x264_mb_load_mv_direct8x8( h, 0 ); x264_mb_load_mv_direct8x8( h, 1 ); x264_mb_load_mv_direct8x8( h, 2 ); @@ -3093,7 +3608,7 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) case B_8x8: /* optimize: cache might not need to be rewritten */ - for( i = 0; i < 4; i++ ) + for( int i = 0; i < 4; i++ ) x264_mb_cache_mv_b8x8( h, a, i, 1 ); break; @@ -3104,7 +3619,7 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) switch( h->mb.i_type ) { case B_L0_L0: - x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref ); + x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref ); x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv ); x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, -1 ); @@ -3116,15 +3631,15 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) x264_macroblock_cache_mv ( h, 0, 0, 4, 4, 0, 0 ); x264_macroblock_cache_mvd( h, 0, 0, 4, 4, 0, 0 ); - x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref ); + x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.me16x16.i_ref ); x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv ); break; case B_BI_BI: - x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.i_ref ); - x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv ); + x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.bi16x16.i_ref ); + x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.bi16x16.mv ); - x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.i_ref ); - x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.me16x16.mv ); + x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, a->l1.bi16x16.i_ref ); + x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 1, a->l1.bi16x16.mv ); break; } break; @@ -3143,26 +3658,25 @@ static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a ) } #ifndef NDEBUG - if( h->param.i_threads > 1 && !h->param.b_sliced_threads && !IS_INTRA(h->mb.i_type) ) + if( h->i_thread_frames > 1 && !IS_INTRA(h->mb.i_type) ) { - int l; - for( l=0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ ) + for( int l = 0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ ) { int completed; int ref = h->mb.cache.ref[l][x264_scan8[0]]; if( ref < 0 ) continue; - completed = (l ? h->fref1 : h->fref0)[ ref >> h->mb.b_interlaced ]->orig->i_lines_completed; + completed = h->fref[l][ ref >> h->mb.b_interlaced ]->orig->i_lines_completed; if( (h->mb.cache.mv[l][x264_scan8[15]][1] >> (2 - h->mb.b_interlaced)) + h->mb.i_mb_y*16 > completed ) { x264_log( h, X264_LOG_WARNING, "internal error (MV out of thread range)\n"); - fprintf(stderr, "mb type: %d \n", h->mb.i_type); - fprintf(stderr, "mv: l%dr%d (%d,%d) \n", l, ref, + x264_log( h, X264_LOG_DEBUG, "mb type: %d \n", h->mb.i_type); + x264_log( h, X264_LOG_DEBUG, "mv: l%dr%d (%d,%d) \n", l, ref, h->mb.cache.mv[l][x264_scan8[15]][0], h->mb.cache.mv[l][x264_scan8[15]][1] ); - fprintf(stderr, "limit: %d \n", h->mb.mv_max_spel[1]); - fprintf(stderr, "mb_xy: %d,%d \n", h->mb.i_mb_x, h->mb.i_mb_y); - fprintf(stderr, "completed: %d \n", completed ); + x264_log( h, X264_LOG_DEBUG, "limit: %d \n", h->mb.mv_max_spel[1]); + x264_log( h, X264_LOG_DEBUG, "mb_xy: %d,%d \n", h->mb.i_mb_x, h->mb.i_mb_y); + x264_log( h, X264_LOG_DEBUG, "completed: %d \n", completed ); x264_log( h, X264_LOG_WARNING, "recovering by using intra mode\n"); x264_mb_analyse_intra( h, a, COST_MAX ); h->mb.i_type = I_16x16;