#define _ISOC99_SOURCE
#include <math.h>
-#include <limits.h>
-#ifndef _MSC_VER
#include <unistd.h>
-#endif
#include "common/common.h"
#include "common/cpu.h"
/* 8x8 */
int i_cost8x8;
/* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */
- DECLARE_ALIGNED_4( int16_t mvc[32][5][2] );
+ ALIGNED_4( int16_t mvc[32][5][2] );
x264_me_t me8x8[4];
/* Sub 4x4 */
int i_lambda;
int i_lambda2;
int i_qp;
- int16_t *p_cost_mv;
+ uint16_t *p_cost_mv;
uint16_t *p_cost_ref0;
uint16_t *p_cost_ref1;
int i_mbrd;
943718, 1189010, 1498059, 1887436 /* 48 - 51 */
};
+const uint8_t x264_exp2_lut[64] = {
+ 0, 3, 6, 8, 11, 14, 17, 20, 23, 26, 29, 32, 36, 39, 42, 45,
+ 48, 52, 55, 58, 62, 65, 69, 72, 76, 80, 83, 87, 91, 94, 98, 102,
+ 106, 110, 114, 118, 122, 126, 130, 135, 139, 143, 147, 152, 156, 161, 165, 170,
+ 175, 179, 184, 189, 194, 198, 203, 208, 214, 219, 224, 229, 234, 240, 245, 250
+};
+
+const float x264_log2_lut[128] = {
+ 0.00000, 0.01123, 0.02237, 0.03342, 0.04439, 0.05528, 0.06609, 0.07682,
+ 0.08746, 0.09803, 0.10852, 0.11894, 0.12928, 0.13955, 0.14975, 0.15987,
+ 0.16993, 0.17991, 0.18982, 0.19967, 0.20945, 0.21917, 0.22882, 0.23840,
+ 0.24793, 0.25739, 0.26679, 0.27612, 0.28540, 0.29462, 0.30378, 0.31288,
+ 0.32193, 0.33092, 0.33985, 0.34873, 0.35755, 0.36632, 0.37504, 0.38370,
+ 0.39232, 0.40088, 0.40939, 0.41785, 0.42626, 0.43463, 0.44294, 0.45121,
+ 0.45943, 0.46761, 0.47573, 0.48382, 0.49185, 0.49985, 0.50779, 0.51570,
+ 0.52356, 0.53138, 0.53916, 0.54689, 0.55459, 0.56224, 0.56986, 0.57743,
+ 0.58496, 0.59246, 0.59991, 0.60733, 0.61471, 0.62205, 0.62936, 0.63662,
+ 0.64386, 0.65105, 0.65821, 0.66534, 0.67243, 0.67948, 0.68650, 0.69349,
+ 0.70044, 0.70736, 0.71425, 0.72110, 0.72792, 0.73471, 0.74147, 0.74819,
+ 0.75489, 0.76155, 0.76818, 0.77479, 0.78136, 0.78790, 0.79442, 0.80090,
+ 0.80735, 0.81378, 0.82018, 0.82655, 0.83289, 0.83920, 0.84549, 0.85175,
+ 0.85798, 0.86419, 0.87036, 0.87652, 0.88264, 0.88874, 0.89482, 0.90087,
+ 0.90689, 0.91289, 0.91886, 0.92481, 0.93074, 0.93664, 0.94251, 0.94837,
+ 0.95420, 0.96000, 0.96578, 0.97154, 0.97728, 0.98299, 0.98868, 0.99435,
+};
+
+/* Avoid an int/float conversion. */
+const float x264_log2_lz_lut[32] = {
+ 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+};
+
// should the intra and inter lambdas be different?
// I'm just matching the behaviour of deadzone quant.
static const int x264_trellis_lambda2_tab[2][52] = {
static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a );
-/* Indexed by lambda instead of qp because, due to rounding,
- * some quantizers share lambdas. This saves memory. */
-uint16_t *x264_cost_mv_fpel[92][4];
-uint16_t x264_cost_ref[92][3][33];
+static uint16_t x264_cost_ref[92][3][33];
+static x264_pthread_mutex_t cost_ref_mutex = X264_PTHREAD_MUTEX_INITIALIZER;
-/* initialize an array of lambda*nbits for all possible mvs */
-static int x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
+int x264_analyse_init_costs( x264_t *h, int qp )
{
- static int16_t *p_cost_mv[92];
int i, j;
-
- if( !p_cost_mv[a->i_lambda] )
- {
- x264_emms();
- /* could be faster, but isn't called many times */
- /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
- CHECKED_MALLOC( p_cost_mv[a->i_lambda], (4*4*2048 + 1) * sizeof(int16_t) );
- p_cost_mv[a->i_lambda] += 2*4*2048;
- for( i = 0; i <= 2*4*2048; i++ )
- {
- p_cost_mv[a->i_lambda][-i] =
- p_cost_mv[a->i_lambda][i] = a->i_lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f;
- }
- for( i = 0; i < 3; i++ )
- for( j = 0; j < 33; j++ )
- x264_cost_ref[a->i_lambda][i][j] = i ? a->i_lambda * bs_size_te( i, j ) : 0;
- }
- a->p_cost_mv = p_cost_mv[a->i_lambda];
- a->p_cost_ref0 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)];
- a->p_cost_ref1 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)];
-
- /* FIXME is this useful for all me methods? */
- if( h->param.analyse.i_me_method >= X264_ME_ESA && !x264_cost_mv_fpel[a->i_lambda][0] )
+ int lambda = x264_lambda_tab[qp];
+ if( h->cost_mv[lambda] )
+ return 0;
+ /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
+ CHECKED_MALLOC( h->cost_mv[lambda], (4*4*2048 + 1) * sizeof(uint16_t) );
+ h->cost_mv[lambda] += 2*4*2048;
+ for( i = 0; i <= 2*4*2048; i++ )
+ {
+ h->cost_mv[lambda][-i] =
+ h->cost_mv[lambda][i] = lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f;
+ }
+ x264_pthread_mutex_lock( &cost_ref_mutex );
+ for( i = 0; i < 3; i++ )
+ for( j = 0; j < 33; j++ )
+ x264_cost_ref[lambda][i][j] = i ? lambda * bs_size_te( i, j ) : 0;
+ x264_pthread_mutex_unlock( &cost_ref_mutex );
+ if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[lambda][0] )
{
for( j=0; j<4; j++ )
{
- CHECKED_MALLOC( x264_cost_mv_fpel[a->i_lambda][j], (4*2048 + 1) * sizeof(int16_t) );
- x264_cost_mv_fpel[a->i_lambda][j] += 2*2048;
+ CHECKED_MALLOC( h->cost_mv_fpel[lambda][j], (4*2048 + 1) * sizeof(uint16_t) );
+ h->cost_mv_fpel[lambda][j] += 2*2048;
for( i = -2*2048; i < 2*2048; i++ )
- x264_cost_mv_fpel[a->i_lambda][j][i] = p_cost_mv[a->i_lambda][i*4+j];
+ h->cost_mv_fpel[lambda][j][i] = h->cost_mv[lambda][i*4+j];
}
}
return 0;
return -1;
}
+void x264_analyse_free_costs( x264_t *h )
+{
+ int i, j;
+ for( i = 0; i < 92; i++ )
+ {
+ if( h->cost_mv[i] )
+ x264_free( h->cost_mv[i] - 2*4*2048 );
+ if( h->cost_mv_fpel[i][0] )
+ for( j = 0; j < 4; j++ )
+ x264_free( h->cost_mv_fpel[i][j] - 2*2048 );
+ }
+}
+
+/* initialize an array of lambda*nbits for all possible mvs */
+static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
+{
+ a->p_cost_mv = h->cost_mv[a->i_lambda];
+ a->p_cost_ref0 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)];
+ a->p_cost_ref1 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)];
+}
+
static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp )
{
int i = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B);
/* Max = 4 */
static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
- if( i_neighbour & MB_TOPLEFT )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
/* top and left available */
*mode++ = I_PRED_16x16_V;
*mode++ = I_PRED_16x16_H;
*mode++ = I_PRED_16x16_DC;
- *mode++ = I_PRED_16x16_P;
- *pi_count = 4;
+ *pi_count = 3;
+ if( i_neighbour & MB_TOPLEFT )
+ {
+ /* top left available*/
+ *mode++ = I_PRED_16x16_P;
+ *pi_count = 4;
+ }
}
- else if( i_neighbour & MB_LEFT )
+ else if( b_left )
{
/* left available*/
*mode++ = I_PRED_16x16_DC_LEFT;
*mode++ = I_PRED_16x16_H;
*pi_count = 2;
}
- else if( i_neighbour & MB_TOP )
+ else if( b_top )
{
/* top available*/
*mode++ = I_PRED_16x16_DC_TOP;
/* Max = 4 */
static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
- if( i_neighbour & MB_TOPLEFT )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
/* top and left available */
*mode++ = I_PRED_CHROMA_V;
*mode++ = I_PRED_CHROMA_H;
*mode++ = I_PRED_CHROMA_DC;
- *mode++ = I_PRED_CHROMA_P;
- *pi_count = 4;
+ *pi_count = 3;
+ if( i_neighbour & MB_TOPLEFT )
+ {
+ /* top left available */
+ *mode++ = I_PRED_CHROMA_P;
+ *pi_count = 4;
+ }
}
- else if( i_neighbour & MB_LEFT )
+ else if( b_left )
{
/* left available*/
*mode++ = I_PRED_CHROMA_DC_LEFT;
*mode++ = I_PRED_CHROMA_H;
*pi_count = 2;
}
- else if( i_neighbour & MB_TOP )
+ else if( b_top )
{
/* top available*/
*mode++ = I_PRED_CHROMA_DC_TOP;
static void predict_4x4_mode_available( unsigned int i_neighbour,
int *mode, int *pi_count )
{
- int b_l = i_neighbour & MB_LEFT;
- int b_t = i_neighbour & MB_TOP;
-
- if( b_l && b_t )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
*pi_count = 6;
*mode++ = I_PRED_4x4_DC;
*mode++ = I_PRED_4x4_VL;
*mode++ = I_PRED_4x4_HU;
}
- else if( b_l )
+ else if( b_left )
{
*mode++ = I_PRED_4x4_DC_LEFT;
*mode++ = I_PRED_4x4_H;
*mode++ = I_PRED_4x4_HU;
*pi_count = 3;
}
- else if( b_t )
+ else if( b_top )
{
*mode++ = I_PRED_4x4_DC_TOP;
*mode++ = I_PRED_4x4_V;
/* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */
static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct )
{
- DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
- DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
- DECLARE_ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
+ ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
+ ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
+ ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
int i;
if( do_both_dct || h->mb.b_transform_8x8 )
/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */
static inline void x264_mb_cache_fenc_satd( x264_t *h )
{
- DECLARE_ALIGNED_16( static uint8_t zero[16] ) = {0};
+ ALIGNED_16( static uint8_t zero[16] ) = {0};
uint8_t *fenc;
int x, y, satd_sum = 0, sa8d_sum = 0;
if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis )
p_srcc[0] = h->mb.pic.p_fenc[1];
p_srcc[1] = h->mb.pic.p_fenc[2];
- predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_8x8chroma_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
a->i_satd_i8x8chroma = COST_MAX;
if( i_max == 4 && b_merged_satd )
{
/*---------------- Try all mode and calculate their score ---------------*/
/* 16x16 prediction selection */
- predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_16x16_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
if( b_merged_satd && i_max == 4 )
{
/* 8x8 prediction selection */
if( flags & X264_ANALYSE_I8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8];
int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
int i_cost = 0;
else
h->predict_8x8[i_mode]( p_dst_by, edge );
- i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE )
- + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
+ i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ) + a->i_lambda * 4;
+ if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
+ i_satd -= a->i_lambda * 3;
COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode );
a->i_satd_i8x8_dir[i_mode][idx] = i_satd;
h->pixf.intra_mbcmp_x3_4x4( p_src_by, p_dst_by, satd );
satd[i_pred_mode] -= 3 * a->i_lambda;
for( i=2; i>=0; i-- )
- COPY2_IF_LT( i_best, satd[i] + 4 * a->i_lambda,
- a->i_predict4x4[idx], i );
+ COPY2_IF_LT( i_best, satd[i], a->i_predict4x4[idx], i );
i = 3;
}
else
else
h->predict_4x4[i_mode]( p_dst_by );
- i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE,
- p_src_by, FENC_STRIDE )
- + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
+ i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE );
+ if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
+ i_satd -= a->i_lambda * 3;
COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode );
}
- i_cost += i_best;
+ i_cost += i_best + 4 * a->i_lambda;
if( i_cost > i_satd_thresh || idx == 15 )
break;
int old_pred_mode = a->i_predict16x16;
i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8;
i_best = a->i_satd_i16x16;
- predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_16x16_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
for( i = 0; i < i_max; i++ )
{
int i_mode = predict_mode[i];
}
/* RD selection for chroma prediction */
- predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_8x8chroma_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
if( i_max > 1 )
{
i_thresh = a->i_satd_i8x8chroma * 5/4;
}
else if( h->mb.i_type == I_8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
for( idx = 0; idx < 4; idx++ )
{
uint64_t pels_h = 0;
{
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[8][2] );
+ ALIGNED_4( int16_t mvc[8][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
h->mb.i_partition = D_16x16;
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
+ if( !(h->mb.i_cbp_luma|h->mb.i_cbp_chroma) )
+ h->mb.i_type = P_SKIP;
}
}
}
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
- /* mb type cost */
- l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
+ /* If CABAC is on and we're not doing sub-8x8 analysis, the costs
+ are effectively zero. */
+ if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
+ l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
/* mb type cost */
m->cost += i_ref_cost;
- m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
+ if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
+ m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
{
- DECLARE_ALIGNED_8( uint8_t pix1[16*8] );
+ ALIGNED_8( uint8_t pix1[16*8] );
uint8_t *pix2 = pix1+8;
const int i_stride = h->mb.pic.i_stride[1];
const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
{
- DECLARE_ALIGNED_16( uint8_t pix0[16*16] );
- DECLARE_ALIGNED_16( uint8_t pix1[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix0,[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix1,[16*16] );
uint8_t *src0, *src1;
int stride0 = 16, stride1 = 16;
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[9][2] );
+ ALIGNED_4( int16_t mvc[9][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*8] );
+ ALIGNED_8( uint8_t pix[2][8*8] );
int i, l;
/* XXX Needed for x264_mb_predict_mv */
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_16( uint8_t pix[2][16*8] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_ARRAY_16( uint8_t, pix,[2],[16*8] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_16x8;
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*16] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_8( uint8_t pix[2][8*16] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_8x16;
x264_analyse_update_cache( h, a );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
- a->l0.me16x16.cost = a->l0.i_rd16x16;
if( a->l0.i_cost16x8 <= thresh )
{
/*****************************************************************************
* x264_macroblock_analyse:
*****************************************************************************/
-int x264_macroblock_analyse( x264_t *h )
+void x264_macroblock_analyse( x264_t *h )
{
x264_mb_analysis_t analysis;
int i_cost = COST_MAX;
int i_thresh16x8;
int i_satd_inter, i_satd_intra;
- if( x264_mb_analyse_load_costs( h, &analysis ) )
- return -1;
+ x264_mb_analyse_load_costs( h, &analysis );
x264_mb_analyse_inter_p16x16( h, &analysis );
if( h->mb.i_type == P_SKIP )
- return 0;
+ return;
if( flags & X264_ANALYSE_PSUB16x16 )
{
x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
i_type = P_L0;
i_partition = D_16x16;
- i_cost = analysis.l0.me16x16.cost;
+ i_cost = analysis.l0.i_rd16x16;
COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
else if( i_partition == D_16x16 )
{
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
+ analysis.l0.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
}
else if( i_partition == D_16x8 )
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
}
}
}
const unsigned int flags = h->param.analyse.inter;
int i_type;
int i_partition;
- int i_satd_inter = 0; // shut up uninitialized warning
+ int i_satd_inter;
h->mb.b_skip_mc = 0;
- if( x264_mb_analyse_load_costs( h, &analysis ) )
- return -1;
+ x264_mb_analyse_load_costs( h, &analysis );
/* select best inter mode */
/* direct must be first */
{
h->mb.i_type = B_SKIP;
x264_analyse_update_cache( h, &analysis );
- return 0;
+ return;
}
}
}
}
+ i_satd_inter = i_cost;
+
if( analysis.i_mbrd )
{
- i_satd_inter = i_cost;
x264_mb_analyse_b_rd( h, &analysis, i_satd_inter );
i_type = B_SKIP;
i_cost = i_bskip_cost;
if( i_partition == D_16x16 )
{
if( i_type == B_L0_L0 )
+ {
+ analysis.l0.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
+ }
else if( i_type == B_L1_L1 )
+ {
+ analysis.l1.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 );
+ }
else if( i_type == B_BI_BI )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 );
}
x264_analyse_update_cache( h, &analysis );
+ /* In rare cases we can end up qpel-RDing our way back to a larger partition size
+ * without realizing it. Check for this and account for it if necessary. */
+ if( analysis.i_mbrd >= 2 )
+ {
+ /* Don't bother with bipred or 8x8-and-below, the odds are incredibly low. */
+ static const uint8_t check_mv_lists[X264_MBTYPE_MAX] = {[P_L0]=1, [B_L0_L0]=1, [B_L1_L1]=2};
+ int list = check_mv_lists[h->mb.i_type] - 1;
+ if( list >= 0 && h->mb.i_partition != D_16x16 &&
+ *(uint32_t*)&h->mb.cache.mv[list][x264_scan8[0]] == *(uint32_t*)&h->mb.cache.mv[list][x264_scan8[12]] &&
+ h->mb.cache.ref[list][x264_scan8[0]] == h->mb.cache.ref[list][x264_scan8[12]] )
+ h->mb.i_partition = D_16x16;
+ }
+
if( !analysis.i_mbrd )
x264_mb_analyse_transform( h );
x264_psy_trellis_init( h, 0 );
if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction )
h->mb.i_skip_intra = 0;
- return 0;
}
/*-------------------- Update MB from the analysis ----------------------*/