#define _ISOC99_SOURCE
#include <math.h>
-#include <limits.h>
-#ifndef _MSC_VER
#include <unistd.h>
-#endif
#include "common/common.h"
#include "common/cpu.h"
/* 8x8 */
int i_cost8x8;
/* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */
- DECLARE_ALIGNED_4( int16_t mvc[32][5][2] );
+ ALIGNED_4( int16_t mvc[32][5][2] );
x264_me_t me8x8[4];
/* Sub 4x4 */
int i_lambda;
int i_lambda2;
int i_qp;
- int16_t *p_cost_mv;
+ uint16_t *p_cost_mv;
uint16_t *p_cost_ref0;
uint16_t *p_cost_ref1;
int i_mbrd;
943718, 1189010, 1498059, 1887436 /* 48 - 51 */
};
+const uint8_t x264_exp2_lut[64] = {
+ 0, 3, 6, 8, 11, 14, 17, 20, 23, 26, 29, 32, 36, 39, 42, 45,
+ 48, 52, 55, 58, 62, 65, 69, 72, 76, 80, 83, 87, 91, 94, 98, 102,
+ 106, 110, 114, 118, 122, 126, 130, 135, 139, 143, 147, 152, 156, 161, 165, 170,
+ 175, 179, 184, 189, 194, 198, 203, 208, 214, 219, 224, 229, 234, 240, 245, 250
+};
+
+const float x264_log2_lut[128] = {
+ 0.00000, 0.01123, 0.02237, 0.03342, 0.04439, 0.05528, 0.06609, 0.07682,
+ 0.08746, 0.09803, 0.10852, 0.11894, 0.12928, 0.13955, 0.14975, 0.15987,
+ 0.16993, 0.17991, 0.18982, 0.19967, 0.20945, 0.21917, 0.22882, 0.23840,
+ 0.24793, 0.25739, 0.26679, 0.27612, 0.28540, 0.29462, 0.30378, 0.31288,
+ 0.32193, 0.33092, 0.33985, 0.34873, 0.35755, 0.36632, 0.37504, 0.38370,
+ 0.39232, 0.40088, 0.40939, 0.41785, 0.42626, 0.43463, 0.44294, 0.45121,
+ 0.45943, 0.46761, 0.47573, 0.48382, 0.49185, 0.49985, 0.50779, 0.51570,
+ 0.52356, 0.53138, 0.53916, 0.54689, 0.55459, 0.56224, 0.56986, 0.57743,
+ 0.58496, 0.59246, 0.59991, 0.60733, 0.61471, 0.62205, 0.62936, 0.63662,
+ 0.64386, 0.65105, 0.65821, 0.66534, 0.67243, 0.67948, 0.68650, 0.69349,
+ 0.70044, 0.70736, 0.71425, 0.72110, 0.72792, 0.73471, 0.74147, 0.74819,
+ 0.75489, 0.76155, 0.76818, 0.77479, 0.78136, 0.78790, 0.79442, 0.80090,
+ 0.80735, 0.81378, 0.82018, 0.82655, 0.83289, 0.83920, 0.84549, 0.85175,
+ 0.85798, 0.86419, 0.87036, 0.87652, 0.88264, 0.88874, 0.89482, 0.90087,
+ 0.90689, 0.91289, 0.91886, 0.92481, 0.93074, 0.93664, 0.94251, 0.94837,
+ 0.95420, 0.96000, 0.96578, 0.97154, 0.97728, 0.98299, 0.98868, 0.99435,
+};
+
+/* Avoid an int/float conversion. */
+const float x264_log2_lz_lut[32] = {
+ 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+};
+
// should the intra and inter lambdas be different?
// I'm just matching the behaviour of deadzone quant.
static const int x264_trellis_lambda2_tab[2][52] = {
static void x264_analyse_update_cache( x264_t *h, x264_mb_analysis_t *a );
-/* Indexed by lambda instead of qp because, due to rounding,
- * some quantizers share lambdas. This saves memory. */
-uint16_t *x264_cost_mv_fpel[92][4];
-uint16_t x264_cost_ref[92][3][33];
+static uint16_t x264_cost_ref[92][3][33];
+static UNUSED x264_pthread_mutex_t cost_ref_mutex = X264_PTHREAD_MUTEX_INITIALIZER;
-/* initialize an array of lambda*nbits for all possible mvs */
-static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
+int x264_analyse_init_costs( x264_t *h, int qp )
{
- static int16_t *p_cost_mv[92];
int i, j;
-
- if( !p_cost_mv[a->i_lambda] )
+ int lambda = x264_lambda_tab[qp];
+ if( h->cost_mv[lambda] )
+ return 0;
+ /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
+ CHECKED_MALLOC( h->cost_mv[lambda], (4*4*2048 + 1) * sizeof(uint16_t) );
+ h->cost_mv[lambda] += 2*4*2048;
+ for( i = 0; i <= 2*4*2048; i++ )
+ {
+ h->cost_mv[lambda][-i] =
+ h->cost_mv[lambda][i] = lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f;
+ }
+ x264_pthread_mutex_lock( &cost_ref_mutex );
+ for( i = 0; i < 3; i++ )
+ for( j = 0; j < 33; j++ )
+ x264_cost_ref[lambda][i][j] = i ? lambda * bs_size_te( i, j ) : 0;
+ x264_pthread_mutex_unlock( &cost_ref_mutex );
+ if( h->param.analyse.i_me_method >= X264_ME_ESA && !h->cost_mv_fpel[lambda][0] )
{
- x264_emms();
- /* could be faster, but isn't called many times */
- /* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
- p_cost_mv[a->i_lambda] = x264_malloc( (4*4*2048 + 1) * sizeof(int16_t) );
- p_cost_mv[a->i_lambda] += 2*4*2048;
- for( i = 0; i <= 2*4*2048; i++ )
+ for( j=0; j<4; j++ )
{
- p_cost_mv[a->i_lambda][-i] =
- p_cost_mv[a->i_lambda][i] = a->i_lambda * (log2f(i+1)*2 + 0.718f + !!i) + .5f;
+ CHECKED_MALLOC( h->cost_mv_fpel[lambda][j], (4*2048 + 1) * sizeof(uint16_t) );
+ h->cost_mv_fpel[lambda][j] += 2*2048;
+ for( i = -2*2048; i < 2*2048; i++ )
+ h->cost_mv_fpel[lambda][j][i] = h->cost_mv[lambda][i*4+j];
}
- for( i = 0; i < 3; i++ )
- for( j = 0; j < 33; j++ )
- x264_cost_ref[a->i_lambda][i][j] = i ? a->i_lambda * bs_size_te( i, j ) : 0;
}
- a->p_cost_mv = p_cost_mv[a->i_lambda];
- a->p_cost_ref0 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)];
- a->p_cost_ref1 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)];
+ return 0;
+fail:
+ return -1;
+}
- /* FIXME is this useful for all me methods? */
- if( h->param.analyse.i_me_method >= X264_ME_ESA && !x264_cost_mv_fpel[a->i_lambda][0] )
+void x264_analyse_free_costs( x264_t *h )
+{
+ int i, j;
+ for( i = 0; i < 92; i++ )
{
- for( j=0; j<4; j++ )
- {
- x264_cost_mv_fpel[a->i_lambda][j] = x264_malloc( (4*2048 + 1) * sizeof(int16_t) );
- x264_cost_mv_fpel[a->i_lambda][j] += 2*2048;
- for( i = -2*2048; i < 2*2048; i++ )
- x264_cost_mv_fpel[a->i_lambda][j][i] = p_cost_mv[a->i_lambda][i*4+j];
- }
+ if( h->cost_mv[i] )
+ x264_free( h->cost_mv[i] - 2*4*2048 );
+ if( h->cost_mv_fpel[i][0] )
+ for( j = 0; j < 4; j++ )
+ x264_free( h->cost_mv_fpel[i][j] - 2*2048 );
}
}
+/* initialize an array of lambda*nbits for all possible mvs */
+static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
+{
+ a->p_cost_mv = h->cost_mv[a->i_lambda];
+ a->p_cost_ref0 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l0_active-1,0,2)];
+ a->p_cost_ref1 = x264_cost_ref[a->i_lambda][x264_clip3(h->sh.i_num_ref_idx_l1_active-1,0,2)];
+}
+
static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp )
{
int i = h->param.analyse.i_subpel_refine - (h->sh.i_type == SLICE_TYPE_B);
h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][h->mb.i_chroma_qp];
}
h->mb.i_psy_rd_lambda = a->i_lambda;
- /* Adjusting chroma lambda based on QP offset hurts PSNR, so we'll leave it as part of psy-RD. */
- h->mb.i_chroma_lambda2_offset = h->mb.i_psy_rd ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256;
+ /* Adjusting chroma lambda based on QP offset hurts PSNR but improves visual quality. */
+ h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256;
h->mb.i_me_method = h->param.analyse.i_me_method;
h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
int i_ref = i ? h->i_ref1 : h->i_ref0;
for( j=0; j<i_ref; j++ )
{
- x264_frame_cond_wait( fref[j], thresh );
- thread_mvy_range = X264_MIN( thread_mvy_range, fref[j]->i_lines_completed - pix_y );
+ x264_frame_cond_wait( fref[j]->orig, thresh );
+ thread_mvy_range = X264_MIN( thread_mvy_range, fref[j]->orig->i_lines_completed - pix_y );
}
}
+
if( h->param.b_deterministic )
thread_mvy_range = h->param.analyse.i_mv_range_thread;
if( h->mb.b_interlaced )
thread_mvy_range >>= 1;
+
+ for( j=0; j<h->i_ref0; j++ )
+ {
+ if( h->sh.weight[j][0].weightfn )
+ {
+ x264_frame_t *frame = h->fref0[j];
+ int width = frame->i_width[0] + 2*PADH;
+ int i_padv = PADV << h->param.b_interlaced;
+ int offset, height;
+ uint8_t *src = frame->filtered[0] - frame->i_stride[0]*i_padv - PADH;
+ int k;
+ height = X264_MIN( 16 + thread_mvy_range + pix_y + i_padv, h->fref0[j]->i_lines[0] + i_padv*2 ) - h->fenc->i_lines_weighted;
+ offset = h->fenc->i_lines_weighted*frame->i_stride[0];
+ h->fenc->i_lines_weighted += height;
+ if( height )
+ {
+ for( k = j; k < h->i_ref0; k++ )
+ if( h->sh.weight[k][0].weightfn )
+ {
+ uint8_t *dst = h->fenc->weighted[k] - h->fenc->i_stride[0]*i_padv - PADH;
+ x264_weight_scale_plane( h, dst + offset, frame->i_stride[0],
+ src + offset, frame->i_stride[0],
+ width, height, &h->sh.weight[k][0] );
+ }
+ }
+ break;
+ }
+ }
}
h->mb.mv_min[1] = 4*( -16*mb_y - 24 );
/* Max = 4 */
static void predict_16x16_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
- if( i_neighbour & MB_TOPLEFT )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
/* top and left available */
*mode++ = I_PRED_16x16_V;
*mode++ = I_PRED_16x16_H;
*mode++ = I_PRED_16x16_DC;
- *mode++ = I_PRED_16x16_P;
- *pi_count = 4;
+ *pi_count = 3;
+ if( i_neighbour & MB_TOPLEFT )
+ {
+ /* top left available*/
+ *mode++ = I_PRED_16x16_P;
+ *pi_count = 4;
+ }
}
- else if( i_neighbour & MB_LEFT )
+ else if( b_left )
{
/* left available*/
*mode++ = I_PRED_16x16_DC_LEFT;
*mode++ = I_PRED_16x16_H;
*pi_count = 2;
}
- else if( i_neighbour & MB_TOP )
+ else if( b_top )
{
/* top available*/
*mode++ = I_PRED_16x16_DC_TOP;
/* Max = 4 */
static void predict_8x8chroma_mode_available( unsigned int i_neighbour, int *mode, int *pi_count )
{
- if( i_neighbour & MB_TOPLEFT )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
/* top and left available */
*mode++ = I_PRED_CHROMA_V;
*mode++ = I_PRED_CHROMA_H;
*mode++ = I_PRED_CHROMA_DC;
- *mode++ = I_PRED_CHROMA_P;
- *pi_count = 4;
+ *pi_count = 3;
+ if( i_neighbour & MB_TOPLEFT )
+ {
+ /* top left available */
+ *mode++ = I_PRED_CHROMA_P;
+ *pi_count = 4;
+ }
}
- else if( i_neighbour & MB_LEFT )
+ else if( b_left )
{
/* left available*/
*mode++ = I_PRED_CHROMA_DC_LEFT;
*mode++ = I_PRED_CHROMA_H;
*pi_count = 2;
}
- else if( i_neighbour & MB_TOP )
+ else if( b_top )
{
/* top available*/
*mode++ = I_PRED_CHROMA_DC_TOP;
static void predict_4x4_mode_available( unsigned int i_neighbour,
int *mode, int *pi_count )
{
- int b_l = i_neighbour & MB_LEFT;
- int b_t = i_neighbour & MB_TOP;
-
- if( b_l && b_t )
+ int b_top = i_neighbour & MB_TOP;
+ int b_left = i_neighbour & MB_LEFT;
+ if( b_top && b_left )
{
*pi_count = 6;
*mode++ = I_PRED_4x4_DC;
*mode++ = I_PRED_4x4_VL;
*mode++ = I_PRED_4x4_HU;
}
- else if( b_l )
+ else if( b_left )
{
*mode++ = I_PRED_4x4_DC_LEFT;
*mode++ = I_PRED_4x4_H;
*mode++ = I_PRED_4x4_HU;
*pi_count = 3;
}
- else if( b_t )
+ else if( b_top )
{
*mode++ = I_PRED_4x4_DC_TOP;
*mode++ = I_PRED_4x4_V;
/* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */
static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct )
{
- DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
- DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
- DECLARE_ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
+ ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
+ ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
+ ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
int i;
if( do_both_dct || h->mb.b_transform_8x8 )
/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */
static inline void x264_mb_cache_fenc_satd( x264_t *h )
{
- DECLARE_ALIGNED_16( static uint8_t zero[16] ) = {0};
+ ALIGNED_16( static uint8_t zero[16] ) = {0};
uint8_t *fenc;
int x, y, satd_sum = 0, sa8d_sum = 0;
if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis )
p_srcc[0] = h->mb.pic.p_fenc[1];
p_srcc[1] = h->mb.pic.p_fenc[2];
- predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_8x8chroma_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
a->i_satd_i8x8chroma = COST_MAX;
if( i_max == 4 && b_merged_satd )
{
/*---------------- Try all mode and calculate their score ---------------*/
/* 16x16 prediction selection */
- predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_16x16_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
if( b_merged_satd && i_max == 4 )
{
/* 8x8 prediction selection */
if( flags & X264_ANALYSE_I8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8];
int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
int i_cost = 0;
else
h->predict_8x8[i_mode]( p_dst_by, edge );
- i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE )
- + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
+ i_satd = sa8d( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE ) + a->i_lambda * 4;
+ if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
+ i_satd -= a->i_lambda * 3;
COPY2_IF_LT( i_best, i_satd, a->i_predict8x8[idx], i_mode );
a->i_satd_i8x8_dir[i_mode][idx] = i_satd;
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.i8x8_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
- h->mb.pic.i8x8_nnz_buf[0] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]];
- h->mb.pic.i8x8_nnz_buf[1] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]];
- h->mb.pic.i8x8_nnz_buf[2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]];
- h->mb.pic.i8x8_nnz_buf[3] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]];
+ h->mb.pic.i8x8_nnz_buf[0] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
+ h->mb.pic.i8x8_nnz_buf[1] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
+ h->mb.pic.i8x8_nnz_buf[2] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
+ h->mb.pic.i8x8_nnz_buf[3] = M32( &h->mb.cache.non_zero_count[x264_scan8[10]] );
h->mb.pic.i8x8_cbp = h->mb.i_cbp_luma;
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->mb.pic.i8x8_dct_buf, h->dct.luma8x8, sizeof(h->mb.pic.i8x8_dct_buf) );
if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
- *(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
+ M32( &p_dst_by[4 - FDEC_STRIDE] ) = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
if( b_merged_satd && i_max >= 6 )
{
h->pixf.intra_mbcmp_x3_4x4( p_src_by, p_dst_by, satd );
satd[i_pred_mode] -= 3 * a->i_lambda;
for( i=2; i>=0; i-- )
- COPY2_IF_LT( i_best, satd[i] + 4 * a->i_lambda,
- a->i_predict4x4[idx], i );
+ COPY2_IF_LT( i_best, satd[i], a->i_predict4x4[idx], i );
i = 3;
}
else
else
h->predict_4x4[i_mode]( p_dst_by );
- i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE,
- p_src_by, FENC_STRIDE )
- + a->i_lambda * (i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) ? 1 : 4);
+ i_satd = h->pixf.mbcmp[PIXEL_4x4]( p_dst_by, FDEC_STRIDE, p_src_by, FENC_STRIDE );
+ if( i_pred_mode == x264_mb_pred_mode4x4_fix(i_mode) )
+ i_satd -= a->i_lambda * 3;
COPY2_IF_LT( i_best, i_satd, a->i_predict4x4[idx], i_mode );
}
- i_cost += i_best;
+ i_cost += i_best + 4 * a->i_lambda;
if( i_cost > i_satd_thresh || idx == 15 )
break;
if( h->mb.i_skip_intra )
{
h->mc.copy[PIXEL_16x16]( h->mb.pic.i4x4_fdec_buf, 16, p_dst, FDEC_STRIDE, 16 );
- h->mb.pic.i4x4_nnz_buf[0] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]];
- h->mb.pic.i4x4_nnz_buf[1] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]];
- h->mb.pic.i4x4_nnz_buf[2] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]];
- h->mb.pic.i4x4_nnz_buf[3] = *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]];
+ h->mb.pic.i4x4_nnz_buf[0] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
+ h->mb.pic.i4x4_nnz_buf[1] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
+ h->mb.pic.i4x4_nnz_buf[2] = M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
+ h->mb.pic.i4x4_nnz_buf[3] = M32( &h->mb.cache.non_zero_count[x264_scan8[10]] );
h->mb.pic.i4x4_cbp = h->mb.i_cbp_luma;
if( h->mb.i_skip_intra == 2 )
h->mc.memcpy_aligned( h->mb.pic.i4x4_dct_buf, h->dct.luma4x4, sizeof(h->mb.pic.i4x4_dct_buf) );
int old_pred_mode = a->i_predict16x16;
i_thresh = a->i_satd_i16x16_dir[old_pred_mode] * 9/8;
i_best = a->i_satd_i16x16;
- predict_16x16_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_16x16_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
for( i = 0; i < i_max; i++ )
{
int i_mode = predict_mode[i];
}
/* RD selection for chroma prediction */
- predict_8x8chroma_mode_available( h->mb.i_neighbour, predict_mode, &i_max );
+ predict_8x8chroma_mode_available( h->mb.i_neighbour_intra, predict_mode, &i_max );
if( i_max > 1 )
{
i_thresh = a->i_satd_i8x8chroma * 5/4;
if( (h->mb.i_neighbour4[idx] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
/* emulate missing topright samples */
- *(uint32_t*) &p_dst_by[4 - FDEC_STRIDE] = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
+ M32( &p_dst_by[4 - FDEC_STRIDE] ) = p_dst_by[3 - FDEC_STRIDE] * 0x01010101U;
for( i = 0; i < i_max; i++ )
{
{
a->i_predict4x4[idx] = i_mode;
i_best = i_satd;
- pels[0] = *(uint32_t*)(p_dst_by+0*FDEC_STRIDE);
- pels[1] = *(uint32_t*)(p_dst_by+1*FDEC_STRIDE);
- pels[2] = *(uint32_t*)(p_dst_by+2*FDEC_STRIDE);
- pels[3] = *(uint32_t*)(p_dst_by+3*FDEC_STRIDE);
+ pels[0] = M32( p_dst_by+0*FDEC_STRIDE );
+ pels[1] = M32( p_dst_by+1*FDEC_STRIDE );
+ pels[2] = M32( p_dst_by+2*FDEC_STRIDE );
+ pels[3] = M32( p_dst_by+3*FDEC_STRIDE );
i_nnz = h->mb.cache.non_zero_count[x264_scan8[idx]];
}
}
- *(uint32_t*)(p_dst_by+0*FDEC_STRIDE) = pels[0];
- *(uint32_t*)(p_dst_by+1*FDEC_STRIDE) = pels[1];
- *(uint32_t*)(p_dst_by+2*FDEC_STRIDE) = pels[2];
- *(uint32_t*)(p_dst_by+3*FDEC_STRIDE) = pels[3];
+ M32( p_dst_by+0*FDEC_STRIDE ) = pels[0];
+ M32( p_dst_by+1*FDEC_STRIDE ) = pels[1];
+ M32( p_dst_by+2*FDEC_STRIDE ) = pels[2];
+ M32( p_dst_by+3*FDEC_STRIDE ) = pels[3];
h->mb.cache.non_zero_count[x264_scan8[idx]] = i_nnz;
h->mb.cache.intra4x4_pred_mode[x264_scan8[idx]] = a->i_predict4x4[idx];
}
else if( h->mb.i_type == I_8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
for( idx = 0; idx < 4; idx++ )
{
uint64_t pels_h = 0;
uint8_t pels_v[7];
- uint16_t i_nnz[2];
+ uint16_t i_nnz[2] = {0}; //shut up gcc
uint8_t *p_dst_by;
int j;
int cbp_luma_new = 0;
cbp_luma_new = h->mb.i_cbp_luma;
i_best = i_satd;
- pels_h = *(uint64_t*)(p_dst_by+7*FDEC_STRIDE);
+ pels_h = M64( p_dst_by+7*FDEC_STRIDE );
if( !(idx&1) )
for( j=0; j<7; j++ )
pels_v[j] = p_dst_by[7+j*FDEC_STRIDE];
- i_nnz[0] = *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+0]];
- i_nnz[1] = *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+2]];
+ i_nnz[0] = M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] );
+ i_nnz[1] = M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] );
}
}
a->i_cbp_i8x8_luma = cbp_luma_new;
- *(uint64_t*)(p_dst_by+7*FDEC_STRIDE) = pels_h;
+ M64( p_dst_by+7*FDEC_STRIDE ) = pels_h;
if( !(idx&1) )
for( j=0; j<7; j++ )
p_dst_by[7+j*FDEC_STRIDE] = pels_v[j];
- *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] = i_nnz[0];
- *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] = i_nnz[1];
+ M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+0]] ) = i_nnz[0];
+ M16( &h->mb.cache.non_zero_count[x264_scan8[4*idx+2]] ) = i_nnz[1];
x264_macroblock_cache_intra8x8_pred( h, 2*x, 2*y, a->i_predict8x8[idx] );
}
}
#define LOAD_FENC( m, src, xoff, yoff) \
+ (m)->p_cost_mv = a->p_cost_mv; \
(m)->i_stride[0] = h->mb.pic.i_stride[0]; \
(m)->i_stride[1] = h->mb.pic.i_stride[1]; \
(m)->p_fenc[0] = &(src)[0][(xoff)+(yoff)*FENC_STRIDE]; \
(m)->p_fenc[2] = &(src)[2][((xoff)>>1)+((yoff)>>1)*FENC_STRIDE];
#define LOAD_HPELS(m, src, list, ref, xoff, yoff) \
- (m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
+ (m)->p_fref_w = (m)->p_fref[0] = &(src)[0][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[1] = &(src)[1][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[2] = &(src)[2][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[3] = &(src)[3][(xoff)+(yoff)*(m)->i_stride[0]]; \
(m)->p_fref[4] = &(src)[4][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
(m)->p_fref[5] = &(src)[5][((xoff)>>1)+((yoff)>>1)*(m)->i_stride[1]]; \
- (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]];
+ (m)->integral = &h->mb.pic.p_integral[list][ref][(xoff)+(yoff)*(m)->i_stride[0]]; \
+ (m)->weight = weight_none; \
+ (m)->i_ref = ref;
+
+#define LOAD_WPELS(m, src, list, ref, xoff, yoff) \
+ (m)->p_fref_w = &(src)[(xoff)+(yoff)*(m)->i_stride[0]]; \
+ (m)->weight = h->sh.weight[i_ref];
#define REF_COST(list, ref) \
(a->p_cost_ref##list[ref])
{
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[8][2] );
+ ALIGNED_4( int16_t mvc[8][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
- m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
a->l0.me16x16.cost = INT_MAX;
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
- m.i_ref = i_ref;
/* search with ref */
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 0 );
+ LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 0 );
+
x264_mb_predict_mv_16x16( h, 0, i_ref, m.mvp );
x264_mb_predict_mv_ref16x16( h, 0, i_ref, mvc, &i_mvc );
x264_me_search_ref( h, &m, mvc, i_mvc, p_halfpel_thresh );
h->mc.memcpy_aligned( &a->l0.me16x16, &m, sizeof(x264_me_t) );
/* save mv for predicting neighbors */
- *(uint32_t*)a->l0.mvc[i_ref][0] =
- *(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
+ CP32( a->l0.mvc[i_ref][0], m.mv );
+ CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv );
}
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, a->l0.me16x16.i_ref );
if( a->i_mbrd )
{
x264_mb_cache_fenc_satd( h );
- if( a->l0.me16x16.i_ref == 0 && *(uint32_t*)a->l0.me16x16.mv == *(uint32_t*)h->mb.cache.pskip_mv )
+ if( a->l0.me16x16.i_ref == 0 && M32( a->l0.me16x16.mv ) == M32( h->mb.cache.pskip_mv ) )
{
h->mb.i_partition = D_16x16;
x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, 0, a->l0.me16x16.mv );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
+ if( !(h->mb.i_cbp_luma|h->mb.i_cbp_chroma) )
+ h->mb.i_type = P_SKIP;
}
}
}
}
for( i_ref = 0; i_ref <= i_maxref; i_ref++ )
- *(uint32_t*)a->l0.mvc[i_ref][0] = *(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy];
+ CP32( a->l0.mvc[i_ref][0], h->mb.mvr[0][i_ref][h->mb.i_mb_xy] );
for( i = 0; i < 4; i++ )
{
const int y8 = i/2;
m.i_pixel = PIXEL_8x8;
- m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*x8, 8*y8 );
l0m->cost = INT_MAX;
const int i_ref_cost = REF_COST( 0, i_ref );
i_halfpel_thresh -= i_ref_cost;
m.i_ref_cost = i_ref_cost;
- m.i_ref = i_ref;
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
+ LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 );
+
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search_ref( h, &m, a->l0.mvc[i_ref], i+1, p_halfpel_thresh );
m.cost += i_ref_cost;
i_halfpel_thresh += i_ref_cost;
- *(uint32_t*)a->l0.mvc[i_ref][i+1] = *(uint32_t*)m.mv;
+ CP32( a->l0.mvc[i_ref][i+1], m.mv );
if( m.cost < l0m->cost )
h->mc.memcpy_aligned( l0m, &m, sizeof(x264_me_t) );
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, l0m->mv );
x264_macroblock_cache_ref( h, 2*x8, 2*y8, 2, 2, 0, l0m->i_ref );
- /* mb type cost */
- l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
+ /* If CABAC is on and we're not doing sub-8x8 analysis, the costs
+ are effectively zero. */
+ if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
+ l0m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
{
const int i_ref = a->l0.me16x16.i_ref;
const int i_ref_cost = h->param.b_cabac || i_ref ? REF_COST( 0, i_ref ) : 0;
- uint8_t **p_fref = h->mb.pic.p_fref[0][i_ref];
uint8_t **p_fenc = h->mb.pic.p_fenc;
int i_mvc;
int16_t (*mvc)[2] = a->l0.mvc[i_ref];
h->mb.i_partition = D_8x8;
i_mvc = 1;
- *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.me16x16.mv;
+ CP32( mvc[0], a->l0.me16x16.mv );
for( i = 0; i < 4; i++ )
{
const int y8 = i/2;
m->i_pixel = PIXEL_8x8;
- m->p_cost_mv = a->p_cost_mv;
m->i_ref_cost = i_ref_cost;
- m->i_ref = i_ref;
LOAD_FENC( m, p_fenc, 8*x8, 8*y8 );
- LOAD_HPELS( m, p_fref, 0, i_ref, 8*x8, 8*y8 );
+ LOAD_HPELS( m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*x8, 8*y8 );
+ LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*x8, 8*y8 );
+
x264_mb_predict_mv( h, 0, 4*i, 2, m->mvp );
x264_me_search( h, m, mvc, i_mvc );
x264_macroblock_cache_mv_ptr( h, 2*x8, 2*y8, 2, 2, 0, m->mv );
- *(uint32_t*)mvc[i_mvc] = *(uint32_t*)m->mv;
+ CP32( mvc[i_mvc], m->mv );
i_mvc++;
/* mb type cost */
m->cost += i_ref_cost;
- m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
+ if( !h->param.b_cabac || (h->param.analyse.inter & X264_ANALYSE_PSUB8x8) )
+ m->cost += a->i_lambda * i_sub_mb_p_cost_table[D_L0_8x8];
}
a->l0.i_cost8x8 = a->l0.me8x8[0].cost + a->l0.me8x8[1].cost +
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_16x8;
- m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 0, 8*i );
l0m->cost = INT_MAX;
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
- m.i_ref = i_ref;
/* if we skipped the 16x16 predictor, we wouldn't have to copy anything... */
- *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0];
- *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][2*i+1];
- *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][2*i+2];
+ CP32( mvc[0], a->l0.mvc[i_ref][0] );
+ CP32( mvc[1], a->l0.mvc[i_ref][2*i+1] );
+ CP32( mvc[2], a->l0.mvc[i_ref][2*i+2] );
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 0, 8*i );
+ LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 0, 8*i );
+
x264_macroblock_cache_ref( h, 0, 2*i, 4, 2, 0, i_ref );
x264_mb_predict_mv( h, 0, 8*i, 4, m.mvp );
x264_me_search( h, &m, mvc, 3 );
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
const int i_ref8s = ( ref8[0] == ref8[1] ) ? 1 : 2;
m.i_pixel = PIXEL_8x16;
- m.p_cost_mv = a->p_cost_mv;
LOAD_FENC( &m, p_fenc, 8*i, 0 );
l0m->cost = INT_MAX;
const int i_ref = ref8[j];
const int i_ref_cost = REF_COST( 0, i_ref );
m.i_ref_cost = i_ref_cost;
- m.i_ref = i_ref;
- *(uint32_t*)mvc[0] = *(uint32_t*)a->l0.mvc[i_ref][0];
- *(uint32_t*)mvc[1] = *(uint32_t*)a->l0.mvc[i_ref][i+1];
- *(uint32_t*)mvc[2] = *(uint32_t*)a->l0.mvc[i_ref][i+3];
+ CP32( mvc[0], a->l0.mvc[i_ref][0] );
+ CP32( mvc[1], a->l0.mvc[i_ref][i+1] );
+ CP32( mvc[2], a->l0.mvc[i_ref][i+3] );
LOAD_HPELS( &m, h->mb.pic.p_fref[0][i_ref], 0, i_ref, 8*i, 0 );
+ LOAD_WPELS( &m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 8*i, 0 );
+
x264_macroblock_cache_ref( h, 2*i, 0, 2, 4, 0, i_ref );
x264_mb_predict_mv( h, 0, 4*i, 2, m.mvp );
x264_me_search( h, &m, mvc, 3 );
static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
{
- DECLARE_ALIGNED_8( uint8_t pix1[16*8] );
+ ALIGNED_8( uint8_t pix1[16*8] );
uint8_t *pix2 = pix1+8;
const int i_stride = h->mb.pic.i_stride[1];
const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
const int oe = 4*(i8x8&1) + 2*(i8x8&2)*FENC_STRIDE;
+ const int i_ref = a->l0.me8x8[i8x8].i_ref;
+ const int mvy_offset = h->mb.b_interlaced & i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ x264_weight_t *weight = h->sh.weight[i_ref];
#define CHROMA4x4MC( width, height, me, x, y ) \
- h->mc.mc_chroma( &pix1[x+y*16], 16, &p_fref[4][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height ); \
- h->mc.mc_chroma( &pix2[x+y*16], 16, &p_fref[5][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1], width, height );
+ h->mc.mc_chroma( &pix1[x+y*16], 16, &p_fref[4][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1]+mvy_offset, width, height ); \
+ if( weight[1].weightfn ) \
+ weight[1].weightfn[width>>2]( &pix1[x+y*16], 16, &pix1[x+y*16], 16, &weight[1], height ); \
+ h->mc.mc_chroma( &pix2[x+y*16], 16, &p_fref[5][or+x+y*i_stride], i_stride, (me).mv[0], (me).mv[1]+mvy_offset, width, height ); \
+ if( weight[2].weightfn ) \
+ weight[1].weightfn[width>>2]( &pix2[x+y*16], 16, &pix2[x+y*16], 16, &weight[2], height );
+
if( pixel == PIXEL_4x4 )
{
- CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][0], 0,0 );
- CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][1], 2,0 );
- CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][2], 0,2 );
- CHROMA4x4MC( 2,2, a->l0.me4x4[i8x8][3], 2,2 );
+ x264_me_t *m = a->l0.me4x4[i8x8];
+ CHROMA4x4MC( 2,2, m[0], 0,0 );
+ CHROMA4x4MC( 2,2, m[1], 2,0 );
+ CHROMA4x4MC( 2,2, m[2], 0,2 );
+ CHROMA4x4MC( 2,2, m[3], 2,2 );
}
else if( pixel == PIXEL_8x4 )
{
- CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][0], 0,0 );
- CHROMA4x4MC( 4,2, a->l0.me8x4[i8x8][1], 0,2 );
+ x264_me_t *m = a->l0.me8x4[i8x8];
+ CHROMA4x4MC( 4,2, m[0], 0,0 );
+ CHROMA4x4MC( 4,2, m[1], 0,2 );
}
else
{
- CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][0], 0,0 );
- CHROMA4x4MC( 2,4, a->l0.me4x8[i8x8][1], 2,0 );
+ x264_me_t *m = a->l0.me4x8[i8x8];
+ CHROMA4x4MC( 2,4, m[0], 0,0 );
+ CHROMA4x4MC( 2,4, m[1], 2,0 );
}
return h->pixf.mbcmp[PIXEL_4x4]( &h->mb.pic.p_fenc[1][oe], FENC_STRIDE, pix1, 16 )
x264_me_t *m = &a->l0.me4x4[i8x8][i4x4];
m->i_pixel = PIXEL_4x4;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
+ LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me8x8[i8x8].mv, i_mvc );
x264_me_t *m = &a->l0.me8x4[i8x8][i8x4];
m->i_pixel = PIXEL_8x4;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
+ LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 2, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
x264_me_t *m = &a->l0.me4x8[i8x8][i4x8];
m->i_pixel = PIXEL_4x8;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, p_fenc, 4*x4, 4*y4 );
LOAD_HPELS( m, p_fref, 0, i_ref, 4*x4, 4*y4 );
+ LOAD_WPELS( m, h->mb.pic.p_fref_w[i_ref], 0, i_ref, 4*x4, 4*y4 );
x264_mb_predict_mv( h, 0, idx, 1, m->mvp );
x264_me_search( h, m, &a->l0.me4x4[i8x8][0].mv, i_mvc );
}
}
-#define WEIGHTED_AVG( size, pix, stride, src1, stride1, src2, stride2 ) \
-{ \
- h->mc.avg[size]( pix, stride, src1, stride1, src2, stride2, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] ); \
-}
-
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
{
- DECLARE_ALIGNED_16( uint8_t pix0[16*16] );
- DECLARE_ALIGNED_16( uint8_t pix1[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix0,[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix1,[16*16] );
uint8_t *src0, *src1;
int stride0 = 16, stride1 = 16;
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[9][2] );
+ ALIGNED_4( int16_t mvc[9][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
/* 16x16 Search on all ref frame */
m.i_pixel = PIXEL_16x16;
- m.p_cost_mv = a->p_cost_mv;
+ m.weight = weight_none;
+
LOAD_FENC( &m, h->mb.pic.p_fenc, 0, 0 );
/* ME for List 0 */
}
/* save mv for predicting neighbors */
- *(uint32_t*)h->mb.mvr[0][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
+ CP32( h->mb.mvr[0][i_ref][h->mb.i_mb_xy], m.mv );
}
+ a->l0.me16x16.i_ref = a->l0.i_ref;
+
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l0.me16x16.cost -= REF_COST( 0, a->l0.i_ref );
}
/* save mv for predicting neighbors */
- *(uint32_t*)h->mb.mvr[1][i_ref][h->mb.i_mb_xy] = *(uint32_t*)m.mv;
+ CP32( h->mb.mvr[1][i_ref][h->mb.i_mb_xy], m.mv );
}
+ a->l1.me16x16.i_ref = a->l1.i_ref;
+
/* subtract ref cost, so we don't have to add it for the other MB types */
a->l1.me16x16.cost -= REF_COST( 1, a->l1.i_ref );
/* get cost of BI mode */
src0 = h->mc.get_ref( pix0, &stride0,
- h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
- a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16 );
+ h->mb.pic.p_fref[0][a->l0.i_ref], h->mb.pic.i_stride[0],
+ a->l0.me16x16.mv[0], a->l0.me16x16.mv[1], 16, 16, weight_none );
src1 = h->mc.get_ref( pix1, &stride1,
- h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
- a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16 );
+ h->mb.pic.p_fref[1][a->l1.i_ref], h->mb.pic.i_stride[0],
+ a->l1.me16x16.mv[0], a->l1.me16x16.mv[1], 16, 16, weight_none );
h->mc.avg[PIXEL_16x16]( pix0, 16, src0, stride0, src1, stride1, h->mb.bipred_weight[a->l0.i_ref][a->l1.i_ref] );
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*8] );
+ ALIGNED_8( uint8_t pix[2][8*8] );
int i, l;
/* XXX Needed for x264_mb_predict_mv */
x264_me_t *m = &lX->me8x8[i];
m->i_pixel = PIXEL_8x8;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 8*x8, 8*y8 );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*x8, 8*y8 );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
- m->mv[0], m->mv[1], 8, 8 );
+ m->mv[0], m->mv[1], 8, 8, weight_none );
i_part_cost_bi += m->cost_mv;
/* FIXME: ref cost */
}
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_16( uint8_t pix[2][16*8] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_ARRAY_16( uint8_t, pix,[2],[16*8] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_16x8;
x264_me_t *m = &lX->me16x8[i];
m->i_pixel = PIXEL_16x8;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 0, 8*i );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 0, 8*i );
- *(uint32_t*)mvc[0] = *(uint32_t*)lX->me8x8[2*i].mv;
- *(uint32_t*)mvc[1] = *(uint32_t*)lX->me8x8[2*i+1].mv;
+ CP32( mvc[0], lX->me8x8[2*i].mv );
+ CP32( mvc[1], lX->me8x8[2*i+1].mv );
x264_mb_predict_mv( h, l, 8*i, 2, m->mvp );
x264_me_search( h, m, mvc, 2 );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
- m->mv[0], m->mv[1], 16, 8 );
+ m->mv[0], m->mv[1], 16, 8, weight_none );
/* FIXME: ref cost */
i_part_cost_bi += m->cost_mv;
}
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*16] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_8( uint8_t pix[2][8*16] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_8x16;
x264_me_t *m = &lX->me8x16[i];
m->i_pixel = PIXEL_8x16;
- m->p_cost_mv = a->p_cost_mv;
LOAD_FENC( m, h->mb.pic.p_fenc, 8*i, 0 );
LOAD_HPELS( m, p_fref[l], l, lX->i_ref, 8*i, 0 );
- *(uint32_t*)mvc[0] = *(uint32_t*)lX->me8x8[i].mv;
- *(uint32_t*)mvc[1] = *(uint32_t*)lX->me8x8[i+2].mv;
+ CP32( mvc[0], lX->me8x8[i].mv );
+ CP32( mvc[1], lX->me8x8[i+2].mv );
x264_mb_predict_mv( h, l, 4*i, 2, m->mvp );
x264_me_search( h, m, mvc, 2 );
/* BI mode */
src[l] = h->mc.get_ref( pix[l], &stride[l], m->p_fref, m->i_stride[0],
- m->mv[0], m->mv[1], 8, 16 );
+ m->mv[0], m->mv[1], 8, 16, weight_none );
/* FIXME: ref cost */
i_part_cost_bi += m->cost_mv;
}
x264_analyse_update_cache( h, a );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
- a->l0.me16x16.cost = a->l0.i_rd16x16;
if( a->l0.i_cost16x8 <= thresh )
{
switch( h->mb.i_partition )
{
- case D_16x16:
- if( h->mb.i_type == B_BI_BI )
- x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
- break;
- case D_16x8:
- for( i=0; i<2; i++ )
- if( a->i_mb_partition16x8[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
- break;
- case D_8x16:
- for( i=0; i<2; i++ )
- if( a->i_mb_partition8x16[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
- break;
- case D_8x8:
- for( i=0; i<4; i++ )
- if( h->mb.i_sub_partition[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
- break;
+ case D_16x16:
+ if( h->mb.i_type == B_BI_BI )
+ x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
+ break;
+ case D_16x8:
+ for( i=0; i<2; i++ )
+ if( a->i_mb_partition16x8[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
+ break;
+ case D_8x16:
+ for( i=0; i<2; i++ )
+ if( a->i_mb_partition8x16[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
+ break;
+ case D_8x8:
+ for( i=0; i<4; i++ )
+ if( h->mb.i_sub_partition[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
+ break;
}
}
{
int bcost, cost, direction, failures, prevcost, origcost;
int orig_qp = h->mb.i_qp, bqp = h->mb.i_qp;
+ int last_qp_tried = 0;
origcost = bcost = x264_rd_cost_mb( h, a->i_lambda2 );
/* If CBP is already zero, don't raise the quantizer any higher. */
for( direction = h->mb.cbp[h->mb.i_mb_xy] ? 1 : -1; direction >= -1; direction-=2 )
{
+ /* Without psy-RD, require monotonicity when moving quant away from previous
+ * macroblock's quant; allow 1 failure when moving quant towards previous quant.
+ * With psy-RD, allow 1 failure when moving quant away from previous quant,
+ * allow 2 failures when moving quant towards previous quant.
+ * Psy-RD generally seems to result in more chaotic RD score-vs-quantizer curves. */
+ int threshold = (!!h->mb.i_psy_rd);
+ /* Raise the threshold for failures if we're moving towards the last QP. */
+ if( ( h->mb.i_last_qp < orig_qp && direction == -1 ) ||
+ ( h->mb.i_last_qp > orig_qp && direction == 1 ) )
+ threshold++;
h->mb.i_qp = orig_qp;
failures = 0;
prevcost = origcost;
- while( h->mb.i_qp > 0 && h->mb.i_qp < 51 )
+ h->mb.i_qp += direction;
+ while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= h->param.rc.i_qp_max )
{
- h->mb.i_qp += direction;
+ if( h->mb.i_last_qp == h->mb.i_qp )
+ last_qp_tried = 1;
h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
cost = x264_rd_cost_mb( h, a->i_lambda2 );
COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
failures++;
prevcost = cost;
- /* Without psy-RD, require monotonicity when lowering
- * quant, allow 1 failure when raising quant.
- * With psy-RD, allow 1 failure when lowering quant,
- * allow 2 failures when raising quant.
- * Psy-RD generally seems to result in more chaotic
- * RD score-vs-quantizer curves. */
- if( failures > ((direction + 1)>>1)+(!!h->mb.i_psy_rd) )
+ if( failures > threshold )
break;
if( direction == 1 && !h->mb.cbp[h->mb.i_mb_xy] )
break;
+ h->mb.i_qp += direction;
}
}
+ /* Always try the last block's QP. */
+ if( !last_qp_tried )
+ {
+ h->mb.i_qp = h->mb.i_last_qp;
+ h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
+ cost = x264_rd_cost_mb( h, a->i_lambda2 );
+ COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
+ }
+
h->mb.i_qp = bqp;
h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
/* Check transform again; decision from before may no longer be optimal. */
- if( h->mb.i_qp != orig_qp && x264_mb_transform_8x8_allowed( h ) &&
- h->param.analyse.b_transform_8x8 )
+ if( h->mb.i_qp != orig_qp && h->param.analyse.b_transform_8x8 &&
+ x264_mb_transform_8x8_allowed( h ) )
{
h->mb.b_transform_8x8 ^= 1;
cost = x264_rd_cost_mb( h, a->i_lambda2 );
x264_adaptive_quant( h );
/* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB,
* to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */
- if( analysis.i_mbrd < 3 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 )
+ if( h->param.analyse.i_subpel_refine < 10 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 )
h->mb.i_qp = h->mb.i_last_qp;
}
x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
i_type = P_L0;
i_partition = D_16x16;
- i_cost = analysis.l0.me16x16.cost;
+ i_cost = analysis.l0.i_rd16x16;
COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
else if( i_partition == D_16x16 )
{
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
+ analysis.l0.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
}
else if( i_partition == D_16x8 )
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_8x4 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me8x4[i8x8][1], analysis.i_lambda2, i8x8*4+2, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x8 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x8[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
}
else if( h->mb.i_sub_partition[i8x8] == D_L0_4x4 )
{
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
- x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][0], analysis.i_lambda2, i8x8*4+0, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][1], analysis.i_lambda2, i8x8*4+1, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][2], analysis.i_lambda2, i8x8*4+2, 0 );
+ x264_me_refine_qpel_rd( h, &analysis.l0.me4x4[i8x8][3], analysis.i_lambda2, i8x8*4+3, 0 );
}
}
}
const unsigned int flags = h->param.analyse.inter;
int i_type;
int i_partition;
- int i_satd_inter = 0; // shut up uninitialized warning
+ int i_satd_inter;
h->mb.b_skip_mc = 0;
x264_mb_analyse_load_costs( h, &analysis );
}
}
+ i_satd_inter = i_cost;
+
if( analysis.i_mbrd )
{
- i_satd_inter = i_cost;
x264_mb_analyse_b_rd( h, &analysis, i_satd_inter );
i_type = B_SKIP;
i_cost = i_bskip_cost;
if( i_partition == D_16x16 )
{
if( i_type == B_L0_L0 )
+ {
+ analysis.l0.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
+ }
else if( i_type == B_L1_L1 )
+ {
+ analysis.l1.me16x16.cost = i_cost;
x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 );
+ }
else if( i_type == B_BI_BI )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 );
}
x264_analyse_update_cache( h, &analysis );
+ /* In rare cases we can end up qpel-RDing our way back to a larger partition size
+ * without realizing it. Check for this and account for it if necessary. */
+ if( analysis.i_mbrd >= 2 )
+ {
+ /* Don't bother with bipred or 8x8-and-below, the odds are incredibly low. */
+ static const uint8_t check_mv_lists[X264_MBTYPE_MAX] = {[P_L0]=1, [B_L0_L0]=1, [B_L1_L1]=2};
+ int list = check_mv_lists[h->mb.i_type] - 1;
+ if( list >= 0 && h->mb.i_partition != D_16x16 &&
+ M32( &h->mb.cache.mv[list][x264_scan8[0]] ) == M32( &h->mb.cache.mv[list][x264_scan8[12]] ) &&
+ h->mb.cache.ref[list][x264_scan8[0]] == h->mb.cache.ref[list][x264_scan8[12]] )
+ h->mb.i_partition = D_16x16;
+ }
+
if( !analysis.i_mbrd )
x264_mb_analyse_transform( h );