/* 8x8 */
int i_cost8x8;
/* [ref][0] is 16x16 mv, [ref][1..4] are 8x8 mv from partition [0..3] */
- DECLARE_ALIGNED_4( int16_t mvc[32][5][2] );
+ ALIGNED_4( int16_t mvc[32][5][2] );
x264_me_t me8x8[4];
/* Sub 4x4 */
943718, 1189010, 1498059, 1887436 /* 48 - 51 */
};
+const uint8_t x264_exp2_lut[64] = {
+ 1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 44, 47,
+ 50, 53, 57, 60, 64, 67, 71, 74, 78, 81, 85, 89, 93, 96, 100, 104,
+ 108, 112, 116, 120, 124, 128, 132, 137, 141, 145, 150, 154, 159, 163, 168, 172,
+ 177, 182, 186, 191, 196, 201, 206, 211, 216, 221, 226, 232, 237, 242, 248, 253,
+};
+
+const float x264_log2_lut[128] = {
+ 0.00000, 0.01123, 0.02237, 0.03342, 0.04439, 0.05528, 0.06609, 0.07682,
+ 0.08746, 0.09803, 0.10852, 0.11894, 0.12928, 0.13955, 0.14975, 0.15987,
+ 0.16993, 0.17991, 0.18982, 0.19967, 0.20945, 0.21917, 0.22882, 0.23840,
+ 0.24793, 0.25739, 0.26679, 0.27612, 0.28540, 0.29462, 0.30378, 0.31288,
+ 0.32193, 0.33092, 0.33985, 0.34873, 0.35755, 0.36632, 0.37504, 0.38370,
+ 0.39232, 0.40088, 0.40939, 0.41785, 0.42626, 0.43463, 0.44294, 0.45121,
+ 0.45943, 0.46761, 0.47573, 0.48382, 0.49185, 0.49985, 0.50779, 0.51570,
+ 0.52356, 0.53138, 0.53916, 0.54689, 0.55459, 0.56224, 0.56986, 0.57743,
+ 0.58496, 0.59246, 0.59991, 0.60733, 0.61471, 0.62205, 0.62936, 0.63662,
+ 0.64386, 0.65105, 0.65821, 0.66534, 0.67243, 0.67948, 0.68650, 0.69349,
+ 0.70044, 0.70736, 0.71425, 0.72110, 0.72792, 0.73471, 0.74147, 0.74819,
+ 0.75489, 0.76155, 0.76818, 0.77479, 0.78136, 0.78790, 0.79442, 0.80090,
+ 0.80735, 0.81378, 0.82018, 0.82655, 0.83289, 0.83920, 0.84549, 0.85175,
+ 0.85798, 0.86419, 0.87036, 0.87652, 0.88264, 0.88874, 0.89482, 0.90087,
+ 0.90689, 0.91289, 0.91886, 0.92481, 0.93074, 0.93664, 0.94251, 0.94837,
+ 0.95420, 0.96000, 0.96578, 0.97154, 0.97728, 0.98299, 0.98868, 0.99435,
+};
+
+/* Avoid an int/float conversion. */
+const float x264_log2_lz_lut[32] = {
+ 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+};
+
// should the intra and inter lambdas be different?
// I'm just matching the behaviour of deadzone quant.
static const int x264_trellis_lambda2_tab[2][52] = {
uint16_t x264_cost_ref[92][3][33];
/* initialize an array of lambda*nbits for all possible mvs */
-static void x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
+static int x264_mb_analyse_load_costs( x264_t *h, x264_mb_analysis_t *a )
{
static int16_t *p_cost_mv[92];
int i, j;
x264_emms();
/* could be faster, but isn't called many times */
/* factor of 4 from qpel, 2 from sign, and 2 because mv can be opposite from mvp */
- p_cost_mv[a->i_lambda] = x264_malloc( (4*4*2048 + 1) * sizeof(int16_t) );
+ CHECKED_MALLOC( p_cost_mv[a->i_lambda], (4*4*2048 + 1) * sizeof(int16_t) );
p_cost_mv[a->i_lambda] += 2*4*2048;
for( i = 0; i <= 2*4*2048; i++ )
{
{
for( j=0; j<4; j++ )
{
- x264_cost_mv_fpel[a->i_lambda][j] = x264_malloc( (4*2048 + 1) * sizeof(int16_t) );
+ CHECKED_MALLOC( x264_cost_mv_fpel[a->i_lambda][j], (4*2048 + 1) * sizeof(int16_t) );
x264_cost_mv_fpel[a->i_lambda][j] += 2*2048;
for( i = -2*2048; i < 2*2048; i++ )
x264_cost_mv_fpel[a->i_lambda][j][i] = p_cost_mv[a->i_lambda][i*4+j];
}
}
+ return 0;
+fail:
+ return -1;
}
static void x264_mb_analyse_init( x264_t *h, x264_mb_analysis_t *a, int i_qp )
a->i_lambda2 = x264_lambda2_tab[i_qp];
h->mb.b_trellis = h->param.analyse.i_trellis > 1 && a->i_mbrd;
- if( h->mb.b_trellis )
+ if( h->param.analyse.i_trellis )
{
h->mb.i_trellis_lambda2[0][0] = x264_trellis_lambda2_tab[0][h->mb.i_qp];
h->mb.i_trellis_lambda2[0][1] = x264_trellis_lambda2_tab[1][h->mb.i_qp];
h->mb.i_trellis_lambda2[1][1] = x264_trellis_lambda2_tab[1][h->mb.i_chroma_qp];
}
h->mb.i_psy_rd_lambda = a->i_lambda;
- /* Adjusting chroma lambda based on QP offset hurts PSNR, so we'll leave it as part of psy-RD. */
- h->mb.i_chroma_lambda2_offset = h->mb.i_psy_rd ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256;
+ /* Adjusting chroma lambda based on QP offset hurts PSNR but improves visual quality. */
+ h->mb.i_chroma_lambda2_offset = h->param.analyse.b_psy ? x264_chroma_lambda2_offset_tab[h->mb.i_qp-h->mb.i_chroma_qp+12] : 256;
h->mb.i_me_method = h->param.analyse.i_me_method;
h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
/* For trellis=2, we need to do this for both sizes of DCT, for trellis=1 we only need to use it on the chosen mode. */
static void inline x264_psy_trellis_init( x264_t *h, int do_both_dct )
{
- DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
- DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
- DECLARE_ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
+ ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[8][8] );
+ ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[4][4] );
+ ALIGNED_16( static uint8_t zero[16*FDEC_STRIDE] ) = {0};
int i;
if( do_both_dct || h->mb.b_transform_8x8 )
/* Pre-calculate fenc satd scores for psy RD, minus DC coefficients */
static inline void x264_mb_cache_fenc_satd( x264_t *h )
{
- DECLARE_ALIGNED_16( static uint8_t zero[16] ) = {0};
+ ALIGNED_16( static uint8_t zero[16] ) = {0};
uint8_t *fenc;
int x, y, satd_sum = 0, sa8d_sum = 0;
if( h->param.analyse.i_trellis == 2 && h->mb.i_psy_trellis )
/* 8x8 prediction selection */
if( flags & X264_ANALYSE_I8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
x264_pixel_cmp_t sa8d = (h->pixf.mbcmp[0] == h->pixf.satd[0]) ? h->pixf.sa8d[PIXEL_8x8] : h->pixf.mbcmp[PIXEL_8x8];
int i_satd_thresh = a->i_mbrd ? COST_MAX : X264_MIN( i_satd_inter, a->i_satd_i16x16 );
int i_cost = 0;
}
else if( h->mb.i_type == I_8x8 )
{
- DECLARE_ALIGNED_16( uint8_t edge[33] );
+ ALIGNED_ARRAY_16( uint8_t, edge,[33] );
for( idx = 0; idx < 4; idx++ )
{
uint64_t pels_h = 0;
{
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[8][2] );
+ ALIGNED_4( int16_t mvc[8][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
{
x264_me_t m;
uint8_t **p_fenc = h->mb.pic.p_fenc;
- DECLARE_ALIGNED_4( int16_t mvc[3][2] );
+ ALIGNED_4( int16_t mvc[3][2] );
int i, j;
/* XXX Needed for x264_mb_predict_mv */
static int x264_mb_analyse_inter_p4x4_chroma( x264_t *h, x264_mb_analysis_t *a, uint8_t **p_fref, int i8x8, int pixel )
{
- DECLARE_ALIGNED_8( uint8_t pix1[16*8] );
+ ALIGNED_8( uint8_t pix1[16*8] );
uint8_t *pix2 = pix1+8;
const int i_stride = h->mb.pic.i_stride[1];
const int or = 4*(i8x8&1) + 2*(i8x8&2)*i_stride;
static void x264_mb_analyse_inter_b16x16( x264_t *h, x264_mb_analysis_t *a )
{
- DECLARE_ALIGNED_16( uint8_t pix0[16*16] );
- DECLARE_ALIGNED_16( uint8_t pix1[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix0,[16*16] );
+ ALIGNED_ARRAY_16( uint8_t, pix1,[16*16] );
uint8_t *src0, *src1;
int stride0 = 16, stride1 = 16;
x264_me_t m;
int i_ref, i_mvc;
- DECLARE_ALIGNED_4( int16_t mvc[9][2] );
+ ALIGNED_4( int16_t mvc[9][2] );
int i_halfpel_thresh = INT_MAX;
int *p_halfpel_thresh = h->mb.pic.i_fref[0]>1 ? &i_halfpel_thresh : NULL;
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*8] );
+ ALIGNED_8( uint8_t pix[2][8*8] );
int i, l;
/* XXX Needed for x264_mb_predict_mv */
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_16( uint8_t pix[2][16*8] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_ARRAY_16( uint8_t, pix,[2],[16*8] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_16x8;
uint8_t **p_fref[2] =
{ h->mb.pic.p_fref[0][a->l0.i_ref],
h->mb.pic.p_fref[1][a->l1.i_ref] };
- DECLARE_ALIGNED_8( uint8_t pix[2][8*16] );
- DECLARE_ALIGNED_4( int16_t mvc[2][2] );
+ ALIGNED_8( uint8_t pix[2][8*16] );
+ ALIGNED_4( int16_t mvc[2][2] );
int i, l;
h->mb.i_partition = D_8x16;
x264_analyse_update_cache( h, a );
a->l0.i_rd16x16 = x264_rd_cost_mb( h, a->i_lambda2 );
}
- a->l0.me16x16.cost = a->l0.i_rd16x16;
if( a->l0.i_cost16x8 <= thresh )
{
switch( h->mb.i_partition )
{
- case D_16x16:
- if( h->mb.i_type == B_BI_BI )
- x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
- break;
- case D_16x8:
- for( i=0; i<2; i++ )
- if( a->i_mb_partition16x8[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
- break;
- case D_8x16:
- for( i=0; i<2; i++ )
- if( a->i_mb_partition8x16[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
- break;
- case D_8x8:
- for( i=0; i<4; i++ )
- if( h->mb.i_sub_partition[i] == D_BI_8x8 )
- x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
- break;
+ case D_16x16:
+ if( h->mb.i_type == B_BI_BI )
+ x264_me_refine_bidir_satd( h, &a->l0.me16x16, &a->l1.me16x16, i_biweight );
+ break;
+ case D_16x8:
+ for( i=0; i<2; i++ )
+ if( a->i_mb_partition16x8[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me16x8[i], &a->l1.me16x8[i], i_biweight );
+ break;
+ case D_8x16:
+ for( i=0; i<2; i++ )
+ if( a->i_mb_partition8x16[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me8x16[i], &a->l1.me8x16[i], i_biweight );
+ break;
+ case D_8x8:
+ for( i=0; i<4; i++ )
+ if( h->mb.i_sub_partition[i] == D_BI_8x8 )
+ x264_me_refine_bidir_satd( h, &a->l0.me8x8[i], &a->l1.me8x8[i], i_biweight );
+ break;
}
}
{
int bcost, cost, direction, failures, prevcost, origcost;
int orig_qp = h->mb.i_qp, bqp = h->mb.i_qp;
+ int last_qp_tried = 0;
origcost = bcost = x264_rd_cost_mb( h, a->i_lambda2 );
/* If CBP is already zero, don't raise the quantizer any higher. */
for( direction = h->mb.cbp[h->mb.i_mb_xy] ? 1 : -1; direction >= -1; direction-=2 )
{
+ /* Without psy-RD, require monotonicity when moving quant away from previous
+ * macroblock's quant; allow 1 failure when moving quant towards previous quant.
+ * With psy-RD, allow 1 failure when moving quant away from previous quant,
+ * allow 2 failures when moving quant towards previous quant.
+ * Psy-RD generally seems to result in more chaotic RD score-vs-quantizer curves. */
+ int threshold = (!!h->mb.i_psy_rd);
+ /* Raise the threshold for failures if we're moving towards the last QP. */
+ if( ( h->mb.i_last_qp < orig_qp && direction == -1 ) ||
+ ( h->mb.i_last_qp > orig_qp && direction == 1 ) )
+ threshold++;
h->mb.i_qp = orig_qp;
failures = 0;
prevcost = origcost;
- while( h->mb.i_qp > 0 && h->mb.i_qp < 51 )
+ h->mb.i_qp += direction;
+ while( h->mb.i_qp >= h->param.rc.i_qp_min && h->mb.i_qp <= h->param.rc.i_qp_max )
{
- h->mb.i_qp += direction;
+ if( h->mb.i_last_qp == h->mb.i_qp )
+ last_qp_tried = 1;
h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
cost = x264_rd_cost_mb( h, a->i_lambda2 );
COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
failures++;
prevcost = cost;
- /* Without psy-RD, require monotonicity when lowering
- * quant, allow 1 failure when raising quant.
- * With psy-RD, allow 1 failure when lowering quant,
- * allow 2 failures when raising quant.
- * Psy-RD generally seems to result in more chaotic
- * RD score-vs-quantizer curves. */
- if( failures > ((direction + 1)>>1)+(!!h->mb.i_psy_rd) )
+ if( failures > threshold )
break;
if( direction == 1 && !h->mb.cbp[h->mb.i_mb_xy] )
break;
+ h->mb.i_qp += direction;
}
}
+ /* Always try the last block's QP. */
+ if( !last_qp_tried )
+ {
+ h->mb.i_qp = h->mb.i_last_qp;
+ h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
+ cost = x264_rd_cost_mb( h, a->i_lambda2 );
+ COPY2_IF_LT( bcost, cost, bqp, h->mb.i_qp );
+ }
+
h->mb.i_qp = bqp;
h->mb.i_chroma_qp = h->chroma_qp_table[h->mb.i_qp];
/* Check transform again; decision from before may no longer be optimal. */
- if( h->mb.i_qp != orig_qp && x264_mb_transform_8x8_allowed( h ) &&
- h->param.analyse.b_transform_8x8 )
+ if( h->mb.i_qp != orig_qp && h->param.analyse.b_transform_8x8 &&
+ x264_mb_transform_8x8_allowed( h ) )
{
h->mb.b_transform_8x8 ^= 1;
cost = x264_rd_cost_mb( h, a->i_lambda2 );
/*****************************************************************************
* x264_macroblock_analyse:
*****************************************************************************/
-void x264_macroblock_analyse( x264_t *h )
+int x264_macroblock_analyse( x264_t *h )
{
x264_mb_analysis_t analysis;
int i_cost = COST_MAX;
x264_adaptive_quant( h );
/* If the QP of this MB is within 1 of the previous MB, code the same QP as the previous MB,
* to lower the bit cost of the qp_delta. Don't do this if QPRD is enabled. */
- if( analysis.i_mbrd < 3 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 )
+ if( h->param.analyse.i_subpel_refine < 10 && abs(h->mb.i_qp - h->mb.i_last_qp) == 1 )
h->mb.i_qp = h->mb.i_last_qp;
}
int i_thresh16x8;
int i_satd_inter, i_satd_intra;
- x264_mb_analyse_load_costs( h, &analysis );
+ if( x264_mb_analyse_load_costs( h, &analysis ) )
+ return -1;
x264_mb_analyse_inter_p16x16( h, &analysis );
if( h->mb.i_type == P_SKIP )
- return;
+ return 0;
if( flags & X264_ANALYSE_PSUB16x16 )
{
x264_mb_analyse_p_rd( h, &analysis, X264_MIN(i_satd_inter, i_satd_intra) );
i_type = P_L0;
i_partition = D_16x16;
- i_cost = analysis.l0.me16x16.cost;
+ i_cost = analysis.l0.i_rd16x16;
COPY2_IF_LT( i_cost, analysis.l0.i_cost16x8, i_partition, D_16x8 );
COPY2_IF_LT( i_cost, analysis.l0.i_cost8x16, i_partition, D_8x16 );
COPY3_IF_LT( i_cost, analysis.l0.i_cost8x8, i_partition, D_8x8, i_type, P_8x8 );
else if( i_partition == D_16x16 )
{
x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, analysis.l0.me16x16.i_ref );
+ analysis.l0.me16x16.cost = analysis.l0.i_rd16x16;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
}
else if( i_partition == D_16x8 )
const unsigned int flags = h->param.analyse.inter;
int i_type;
int i_partition;
- int i_satd_inter = 0; // shut up uninitialized warning
+ int i_satd_inter;
h->mb.b_skip_mc = 0;
- x264_mb_analyse_load_costs( h, &analysis );
+ if( x264_mb_analyse_load_costs( h, &analysis ) )
+ return -1;
/* select best inter mode */
/* direct must be first */
{
h->mb.i_type = B_SKIP;
x264_analyse_update_cache( h, &analysis );
- return;
+ return 0;
}
}
}
}
+ i_satd_inter = i_cost;
+
if( analysis.i_mbrd )
{
- i_satd_inter = i_cost;
x264_mb_analyse_b_rd( h, &analysis, i_satd_inter );
i_type = B_SKIP;
i_cost = i_bskip_cost;
if( i_partition == D_16x16 )
{
if( i_type == B_L0_L0 )
+ {
+ analysis.l0.me16x16.cost = analysis.l0.i_rd16x16;
x264_me_refine_qpel_rd( h, &analysis.l0.me16x16, analysis.i_lambda2, 0, 0 );
+ }
else if( i_type == B_L1_L1 )
+ {
+ analysis.l1.me16x16.cost = analysis.l1.i_rd16x16;
x264_me_refine_qpel_rd( h, &analysis.l1.me16x16, analysis.i_lambda2, 0, 1 );
+ }
else if( i_type == B_BI_BI )
x264_me_refine_bidir_rd( h, &analysis.l0.me16x16, &analysis.l1.me16x16, i_biweight, 0, analysis.i_lambda2 );
}
x264_psy_trellis_init( h, 0 );
if( h->mb.b_trellis == 1 || h->mb.b_noise_reduction )
h->mb.i_skip_intra = 0;
+ return 0;
}
/*-------------------- Update MB from the analysis ----------------------*/