/*****************************************************************************
- * slicetype.c: h264 encoder library
+ * slicetype.c: lookahead analysis
*****************************************************************************
- * Copyright (C) 2005-2008 x264 project
+ * Copyright (C) 2005-2010 x264 project
*
* Authors: Fiona Glaser <fiona@x264.com>
* Loren Merritt <lorenm@u.washington.edu>
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include <math.h>
}
/* makes a non-h264 weight (i.e. fix7), into an h264 weight */
-static void x264_weight_get_h264( unsigned int weight_nonh264, int offset, x264_weight_t *w )
+static void x264_weight_get_h264( int weight_nonh264, int offset, x264_weight_t *w )
{
w->i_offset = offset;
w->i_denom = 7;
w->i_scale = X264_MIN( w->i_scale, 127 );
}
-void x264_weight_plane_analyse( x264_t *h, x264_frame_t *frame )
-{
- uint32_t sad = 0;
- uint64_t ssd = 0;
- uint8_t *p = frame->plane[0];
- int stride = frame->i_stride[0];
- int width = frame->i_width[0];
- int height = frame->i_lines[0];
- for( int y = 0; y < height>>4; y++, p += stride*16 )
- for( int x = 0; x < width; x += 16 )
- {
- uint64_t res = h->pixf.var[PIXEL_16x16]( p + x, stride );
- sad += (uint32_t)res;
- ssd += res >> 32;
- }
- frame->i_pixel_sum = sad;
- frame->i_pixel_ssd = ssd - ((uint64_t)sad * sad + width * height / 2) / (width * height);
-}
-
-static NOINLINE uint8_t *x264_weight_cost_init_luma( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, uint8_t *dest )
+static NOINLINE pixel *x264_weight_cost_init_luma( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, pixel *dest )
{
int ref0_distance = fenc->i_frame - ref->i_frame - 1;
/* Note: this will never run during lookahead as weights_analyse is only called if no
int i_lines = fenc->i_lines_lowres;
int i_width = fenc->i_width_lowres;
int i_mb_xy = 0;
- uint8_t *p = dest;
+ pixel *p = dest;
for( int y = 0; y < i_lines; y += 8, p += i_stride*8 )
for( int x = 0; x < i_width; x += 8, i_mb_xy++ )
return ref->lowres[0];
}
-static NOINLINE unsigned int x264_weight_cost( x264_t *h, x264_frame_t *fenc, uint8_t *src, x264_weight_t *w )
+/* How data is organized for chroma weightp:
+ * [U: ref] [U: fenc]
+ * [V: ref] [V: fenc]
+ * fenc = ref + offset
+ * v = u + stride * chroma height
+ * We'll need more room if we do 4:2:2 or 4:4:4. */
+
+static NOINLINE void x264_weight_cost_init_chroma( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, pixel *dstu, pixel *dstv )
+{
+ int ref0_distance = fenc->i_frame - ref->i_frame - 1;
+ int i_stride = fenc->i_stride[1];
+ int i_offset = i_stride / 2;
+ int i_lines = fenc->i_lines[1];
+ int i_width = fenc->i_width[1];
+ int cw = h->mb.i_mb_width << 3;
+ int ch = h->mb.i_mb_height << 3;
+
+ if( fenc->lowres_mvs[0][ref0_distance][0][0] != 0x7FFF )
+ {
+ for( int y = 0, mb_xy = 0, pel_offset_y = 0; y < i_lines; y += 8, pel_offset_y = y*i_stride )
+ for( int x = 0, pel_offset_x = 0; x < i_width; x += 8, mb_xy++, pel_offset_x += 8 )
+ {
+ pixel *pixu = dstu + pel_offset_y + pel_offset_x;
+ pixel *pixv = dstv + pel_offset_y + pel_offset_x;
+ pixel *src1 = ref->plane[1] + pel_offset_y + pel_offset_x*2; /* NV12 */
+ int mvx = fenc->lowres_mvs[0][ref0_distance][mb_xy][0];
+ int mvy = fenc->lowres_mvs[0][ref0_distance][mb_xy][1];
+ h->mc.mc_chroma( pixu, pixv, i_stride, src1, i_stride, mvx, mvy, 8, 8 );
+ }
+ }
+ else
+ h->mc.plane_copy_deinterleave( dstu, i_stride, dstv, i_stride, ref->plane[1], i_stride, cw, ch );
+ h->mc.plane_copy_deinterleave( dstu+i_offset, i_stride, dstv+i_offset, i_stride, fenc->plane[1], i_stride, cw, ch );
+ x264_emms();
+}
+
+static int x264_weight_slice_header_cost( x264_t *h, x264_weight_t *w, int b_chroma )
+{
+ /* Add cost of weights in the slice header. */
+ int lambda = x264_lambda_tab[X264_LOOKAHEAD_QP];
+ int numslices;
+ if( h->param.i_slice_count )
+ numslices = h->param.i_slice_count;
+ else if( h->param.i_slice_max_mbs )
+ numslices = (h->mb.i_mb_width * h->mb.i_mb_height + h->param.i_slice_max_mbs-1) / h->param.i_slice_max_mbs;
+ else
+ numslices = 1;
+ /* FIXME: find a way to account for --slice-max-size?
+ * Multiply by 2 as there will be a duplicate. 10 bits added as if there is a weighted frame, then an additional duplicate is used.
+ * Cut denom cost in half if chroma, since it's shared between the two chroma planes. */
+ int denom_cost = bs_size_ue( w[0].i_denom ) * (2 - b_chroma);
+ return lambda * numslices * ( 10 + denom_cost + 2 * (bs_size_se( w[0].i_scale ) + bs_size_se( w[0].i_offset )) );
+}
+
+static NOINLINE unsigned int x264_weight_cost_luma( x264_t *h, x264_frame_t *fenc, pixel *src, x264_weight_t *w )
{
unsigned int cost = 0;
int i_stride = fenc->i_stride_lowres;
int i_lines = fenc->i_lines_lowres;
int i_width = fenc->i_width_lowres;
- uint8_t *fenc_plane = fenc->lowres[0];
- ALIGNED_ARRAY_8( uint8_t, buf,[8*8] );
+ pixel *fenc_plane = fenc->lowres[0];
+ ALIGNED_ARRAY_16( pixel, buf,[8*8] );
int pixoff = 0;
int i_mb = 0;
w->weightfn[8>>2]( buf, 8, &src[pixoff], i_stride, w, 8 );
cost += X264_MIN( h->pixf.mbcmp[PIXEL_8x8]( buf, 8, &fenc_plane[pixoff], i_stride ), fenc->i_intra_cost[i_mb] );
}
- /* Add cost of weights in the slice header. */
- int numslices;
- if( h->param.i_slice_count )
- numslices = h->param.i_slice_count;
- else if( h->param.i_slice_max_mbs )
- numslices = (h->sps->i_mb_width * h->sps->i_mb_height + h->param.i_slice_max_mbs-1) / h->param.i_slice_max_mbs;
- else
- numslices = 1;
- /* FIXME: find a way to account for --slice-max-size?
- * Multiply by 2 as there will be a duplicate. 10 bits added as if there is a weighted frame, then an additional duplicate is used.
- * Since using lowres frames, assume lambda = 1. */
- cost += numslices * ( 10 + 2 * ( bs_size_ue( w[0].i_denom ) + bs_size_se( w[0].i_scale ) + bs_size_se( w[0].i_offset ) ) );
+ cost += x264_weight_slice_header_cost( h, w, 0 );
}
else
for( int y = 0; y < i_lines; y += 8, pixoff = y*i_stride )
return cost;
}
+static NOINLINE unsigned int x264_weight_cost_chroma( x264_t *h, x264_frame_t *fenc, pixel *ref, x264_weight_t *w )
+{
+ unsigned int cost = 0;
+ int i_stride = fenc->i_stride[1];
+ int i_offset = i_stride / 2;
+ int i_lines = fenc->i_lines[1];
+ int i_width = fenc->i_width[1];
+ pixel *src = ref + i_offset;
+ ALIGNED_ARRAY_16( pixel, buf, [8*8] );
+ int pixoff = 0;
+ ALIGNED_16( static pixel flat[9] ) = {0,0,0,0,0,0,0,0,1}; //hack for win32
+ if( w )
+ {
+ for( int y = 0; y < i_lines; y += 8, pixoff = y*i_stride )
+ for( int x = 0; x < i_width; x += 8, pixoff += 8 )
+ {
+ w->weightfn[8>>2]( buf, 8, &ref[pixoff], i_stride, w, 8 );
+ /* The naive and seemingly sensible algorithm is to use mbcmp as in luma.
+ * But testing shows that for chroma the DC coefficient is by far the most
+ * important part of the coding cost. Thus a more useful chroma weight is
+ * obtained by comparing each block's DC coefficient instead of the actual
+ * pixels.
+ *
+ * FIXME: add a (faster) asm sum function to replace sad. */
+ cost += abs( h->pixf.sad_aligned[PIXEL_8x8]( buf, 8, flat, 0 ) -
+ h->pixf.sad_aligned[PIXEL_8x8]( &src[pixoff], i_stride, flat, 0 ) );
+ }
+ cost += x264_weight_slice_header_cost( h, w, 1 );
+ }
+ else
+ for( int y = 0; y < i_lines; y += 8, pixoff = y*i_stride )
+ for( int x = 0; x < i_width; x += 8, pixoff += 8 )
+ cost += abs( h->pixf.sad_aligned[PIXEL_8x8]( &ref[pixoff], i_stride, flat, 0 ) -
+ h->pixf.sad_aligned[PIXEL_8x8]( &src[pixoff], i_stride, flat, 0 ) );
+ x264_emms();
+ return cost;
+}
+
void x264_weights_analyse( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, int b_lookahead )
{
- float fenc_mean, ref_mean, fenc_var, ref_var;
- int offset_search;
- int minoff, minscale, mindenom;
- unsigned int minscore, origscore;
int i_delta_index = fenc->i_frame - ref->i_frame - 1;
/* epsilon is chosen to require at least a numerator of 127 (with denominator = 128) */
- const float epsilon = 1.0/128.0;
- float guess_scale;
- int found;
+ const float epsilon = 1.f/128.f;
x264_weight_t *weights = fenc->weight[0];
+ SET_WEIGHT( weights[0], 0, 1, 0, 0 );
+ SET_WEIGHT( weights[1], 0, 1, 0, 0 );
+ SET_WEIGHT( weights[2], 0, 1, 0, 0 );
+ /* Don't check chroma in lookahead, or if there wasn't a luma weight. */
+ for( int plane = 0; plane <= 2 && !( plane && ( !weights[0].weightfn || b_lookahead ) ); plane++ )
+ {
+ int offset_search;
+ int minoff, minscale, mindenom;
+ unsigned int minscore, origscore;
+ int found;
+ float fenc_var = fenc->i_pixel_ssd[plane] + !ref->i_pixel_ssd[plane];
+ float ref_var = ref->i_pixel_ssd[plane] + !ref->i_pixel_ssd[plane];
+ float guess_scale = sqrtf( fenc_var / ref_var );
+ float fenc_mean = (float)fenc->i_pixel_sum[plane] / (fenc->i_lines[!!plane] * fenc->i_width[!!plane]);
+ float ref_mean = (float) ref->i_pixel_sum[plane] / (fenc->i_lines[!!plane] * fenc->i_width[!!plane]);
+
+ //early termination
+ if( fabsf( ref_mean - fenc_mean ) < 0.5f && fabsf( 1.f - guess_scale ) < epsilon )
+ {
+ SET_WEIGHT( weights[plane], 0, 1, 0, 0 );
+ continue;
+ }
- fenc_var = round( sqrt( fenc->i_pixel_ssd ) );
- ref_var = round( sqrt( ref->i_pixel_ssd ) );
- fenc_mean = (float)fenc->i_pixel_sum / (fenc->i_lines[0] * fenc->i_width[0]);
- ref_mean = (float) ref->i_pixel_sum / (fenc->i_lines[0] * fenc->i_width[0]);
+ if( plane )
+ {
+ weights[plane].i_denom = 6;
+ weights[plane].i_scale = x264_clip3( round( guess_scale * 64 ), 0, 255 );
+ if( weights[plane].i_scale > 127 )
+ {
+ weights[1].weightfn = weights[2].weightfn = NULL;
+ break;
+ }
+ }
+ else
+ x264_weight_get_h264( round( guess_scale * 128 ), 0, &weights[plane] );
- //early termination
- if( fabs( ref_mean - fenc_mean ) < 0.5 && fabs( 1 - fenc_var / ref_var ) < epsilon )
- {
- SET_WEIGHT( weights[0], 0, 1, 0, 0 );
- return;
- }
+ found = 0;
+ mindenom = weights[plane].i_denom;
+ minscale = weights[plane].i_scale;
+ minoff = 0;
- guess_scale = ref_var ? fenc_var/ref_var : 0;
- x264_weight_get_h264( round( guess_scale * 128 ), 0, &weights[0] );
+ pixel *mcbuf;
+ if( !plane )
+ {
+ if( !fenc->b_intra_calculated )
+ {
+ x264_mb_analysis_t a;
+ x264_lowres_context_init( h, &a );
+ x264_slicetype_frame_cost( h, &a, &fenc, 0, 0, 0, 0 );
+ }
+ mcbuf = x264_weight_cost_init_luma( h, fenc, ref, h->mb.p_weight_buf[0] );
+ origscore = minscore = x264_weight_cost_luma( h, fenc, mcbuf, NULL );
+ }
+ else
+ {
+ pixel *dstu = h->mb.p_weight_buf[0];
+ pixel *dstv = h->mb.p_weight_buf[0]+fenc->i_stride[1]*fenc->i_lines[1];
+ /* Only initialize chroma data once. */
+ if( plane == 1 )
+ x264_weight_cost_init_chroma( h, fenc, ref, dstu, dstv );
+ mcbuf = plane == 1 ? dstu : dstv;
+ origscore = minscore = x264_weight_cost_chroma( h, fenc, mcbuf, NULL );
+ }
- found = 0;
- mindenom = weights[0].i_denom;
- minscale = weights[0].i_scale;
- minoff = 0;
- offset_search = x264_clip3( floor( fenc_mean - ref_mean * minscale / (1 << mindenom) + 0.5f*b_lookahead ), -128, 126 );
+ if( !minscore )
+ continue;
- if( !fenc->b_intra_calculated )
- {
- x264_mb_analysis_t a;
- x264_lowres_context_init( h, &a );
- x264_slicetype_frame_cost( h, &a, &fenc, 0, 0, 0, 0 );
- }
- uint8_t *mcbuf = x264_weight_cost_init_luma( h, fenc, ref, h->mb.p_weight_buf[0] );
- origscore = minscore = x264_weight_cost( h, fenc, mcbuf, 0 );
+ // This gives a slight improvement due to rounding errors but only tests
+ // one offset on lookahead.
+ // TODO: currently searches only offset +1. try other offsets/multipliers/combinations thereof?
+ offset_search = x264_clip3( fenc_mean - ref_mean * minscale / (1 << mindenom) + 0.5f * b_lookahead, -128, 126 );
+ for( int i_off = offset_search; i_off <= offset_search+!b_lookahead; i_off++ )
+ {
+ SET_WEIGHT( weights[plane], 1, minscale, mindenom, i_off );
+ unsigned int s;
+ if( plane )
+ s = x264_weight_cost_chroma( h, fenc, mcbuf, &weights[plane] );
+ else
+ s = x264_weight_cost_luma( h, fenc, mcbuf, &weights[plane] );
+ COPY3_IF_LT( minscore, s, minoff, i_off, found, 1 );
+ }
+ x264_emms();
- if( !minscore )
- {
- SET_WEIGHT( weights[0], 0, 1, 0, 0 );
- return;
- }
+ /* FIXME: More analysis can be done here on SAD vs. SATD termination. */
+ /* 0.2% termination derived experimentally to avoid weird weights in frames that are mostly intra. */
+ if( !found || (minscale == 1 << mindenom && minoff == 0) || (float)minscore / origscore > 0.998f )
+ {
+ SET_WEIGHT( weights[plane], 0, 1, 0, 0 );
+ continue;
+ }
+ else
+ SET_WEIGHT( weights[plane], 1, minscale, mindenom, minoff );
- // This gives a slight improvement due to rounding errors but only tests
- // one offset on lookahead.
- // TODO: currently searches only offset +1. try other offsets/multipliers/combinations thereof?
- for( int i_off = offset_search; i_off <= offset_search+!b_lookahead; i_off++ )
- {
- SET_WEIGHT( weights[0], 1, minscale, mindenom, i_off );
- unsigned int s = x264_weight_cost( h, fenc, mcbuf, &weights[0] );
- COPY3_IF_LT( minscore, s, minoff, i_off, found, 1 );
+ if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE && weights[0].weightfn && !plane )
+ fenc->f_weighted_cost_delta[i_delta_index] = (float)minscore / origscore;
}
- x264_emms();
- /* FIXME: More analysis can be done here on SAD vs. SATD termination. */
- /* 0.2% termination derived experimentally to avoid weird weights in frames that are mostly intra. */
- if( !found || (minscale == 1<<mindenom && minoff == 0) || (float)minscore / origscore > 0.998 )
+ //FIXME, what is the correct way to deal with this?
+ if( weights[1].weightfn && weights[2].weightfn && weights[1].i_denom != weights[2].i_denom )
{
- SET_WEIGHT( weights[0], 0, 1, 0, 0 );
- return;
+ int denom = X264_MIN( weights[1].i_denom, weights[2].i_denom );
+ int i;
+ for( i = 1; i <= 2; i++ )
+ {
+ weights[i].i_scale = x264_clip3( weights[i].i_scale >> ( weights[i].i_denom - denom ), 0, 255 );
+ weights[i].i_denom = denom;
+ h->mc.weight_cache( h, &weights[i] );
+ }
}
- else
- SET_WEIGHT( weights[0], 1, minscale, mindenom, minoff );
-
- if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE && weights[0].weightfn )
- fenc->f_weighted_cost_delta[i_delta_index] = (float)minscore / origscore;
if( weights[0].weightfn && b_lookahead )
{
//scale lowres in lookahead for slicetype_frame_cost
- uint8_t *src = ref->buffer_lowres[0];
- uint8_t *dst = h->mb.p_weight_buf[0];
+ pixel *src = ref->buffer_lowres[0];
+ pixel *dst = h->mb.p_weight_buf[0];
int width = ref->i_width_lowres + PADH*2;
int height = ref->i_lines_lowres + PADV*2;
x264_weight_scale_plane( h, dst, ref->i_stride_lowres, src, ref->i_stride_lowres,
const int b_bidir = (b < p1);
const int i_mb_x = h->mb.i_mb_x;
const int i_mb_y = h->mb.i_mb_y;
- const int i_mb_stride = h->sps->i_mb_width;
+ const int i_mb_stride = h->mb.i_mb_width;
const int i_mb_xy = i_mb_x + i_mb_y * i_mb_stride;
const int i_stride = fenc->i_stride_lowres;
const int i_pel_offset = 8 * (i_mb_x + i_mb_y * i_stride);
const int i_bipred_weight = h->param.analyse.b_weighted_bipred ? 64 - (dist_scale_factor>>2) : 32;
int16_t (*fenc_mvs[2])[2] = { &frames[b]->lowres_mvs[0][b-p0-1][i_mb_xy], &frames[b]->lowres_mvs[1][p1-b-1][i_mb_xy] };
int (*fenc_costs[2]) = { &frames[b]->lowres_mv_costs[0][b-p0-1][i_mb_xy], &frames[b]->lowres_mv_costs[1][p1-b-1][i_mb_xy] };
- int b_frame_score_mb = (i_mb_x > 0 && i_mb_x < h->sps->i_mb_width - 1 &&
- i_mb_y > 0 && i_mb_y < h->sps->i_mb_height - 1) ||
- h->sps->i_mb_width <= 2 || h->sps->i_mb_height <= 2;
+ int b_frame_score_mb = (i_mb_x > 0 && i_mb_x < h->mb.i_mb_width - 1 &&
+ i_mb_y > 0 && i_mb_y < h->mb.i_mb_height - 1) ||
+ h->mb.i_mb_width <= 2 || h->mb.i_mb_height <= 2;
- ALIGNED_ARRAY_8( uint8_t, pix1,[9*FDEC_STRIDE] );
- uint8_t *pix2 = pix1+8;
+ ALIGNED_ARRAY_16( pixel, pix1,[9*FDEC_STRIDE] );
+ pixel *pix2 = pix1+8;
x264_me_t m[2];
int i_bcost = COST_MAX;
int list_used = 0;
// no need for h->mb.mv_min[]
h->mb.mv_min_fpel[0] = -8*h->mb.i_mb_x - 4;
- h->mb.mv_max_fpel[0] = 8*( h->sps->i_mb_width - h->mb.i_mb_x - 1 ) + 4;
+ h->mb.mv_max_fpel[0] = 8*( h->mb.i_mb_width - h->mb.i_mb_x - 1 ) + 4;
h->mb.mv_min_spel[0] = 4*( h->mb.mv_min_fpel[0] - 8 );
h->mb.mv_max_spel[0] = 4*( h->mb.mv_max_fpel[0] + 8 );
- if( h->mb.i_mb_x >= h->sps->i_mb_width - 2 )
+ if( h->mb.i_mb_x >= h->mb.i_mb_width - 2 )
{
h->mb.mv_min_fpel[1] = -8*h->mb.i_mb_y - 4;
- h->mb.mv_max_fpel[1] = 8*( h->sps->i_mb_height - h->mb.i_mb_y - 1 ) + 4;
+ h->mb.mv_max_fpel[1] = 8*( h->mb.i_mb_height - h->mb.i_mb_y - 1 ) + 4;
h->mb.mv_min_spel[1] = 4*( h->mb.mv_min_fpel[1] - 8 );
h->mb.mv_max_spel[1] = 4*( h->mb.mv_max_fpel[1] + 8 );
}
{ \
int hpel_idx1 = (((mv0)[0]&2)>>1) + ((mv0)[1]&2); \
int hpel_idx2 = (((mv1)[0]&2)>>1) + ((mv1)[1]&2); \
- uint8_t *src1 = m[0].p_fref[hpel_idx1] + ((mv0)[0]>>2) + ((mv0)[1]>>2) * m[0].i_stride[0]; \
- uint8_t *src2 = m[1].p_fref[hpel_idx2] + ((mv1)[0]>>2) + ((mv1)[1]>>2) * m[1].i_stride[0]; \
+ pixel *src1 = m[0].p_fref[hpel_idx1] + ((mv0)[0]>>2) + ((mv0)[1]>>2) * m[0].i_stride[0]; \
+ pixel *src2 = m[1].p_fref[hpel_idx2] + ((mv1)[0]>>2) + ((mv1)[1]>>2) * m[1].i_stride[0]; \
h->mc.avg[PIXEL_8x8]( pix1, 16, src1, m[0].i_stride[0], src2, m[1].i_stride[0], i_bipred_weight ); \
} \
else \
{ \
int stride1 = 16, stride2 = 16; \
- uint8_t *src1, *src2; \
+ pixel *src1, *src2; \
src1 = h->mc.get_ref( pix1, &stride1, m[0].p_fref, m[0].i_stride[0], \
(mv0)[0], (mv0)[1], 8, 8, w ); \
src2 = h->mc.get_ref( pix2, &stride2, m[1].p_fref, m[1].i_stride[0], \
(mv1)[0], (mv1)[1], 8, 8, w ); \
h->mc.avg[PIXEL_8x8]( pix1, 16, src1, stride1, src2, stride2, i_bipred_weight ); \
} \
- i_cost = penalty + h->pixf.mbcmp[PIXEL_8x8]( \
+ i_cost = penalty * a->i_lambda + h->pixf.mbcmp[PIXEL_8x8]( \
m[0].p_fenc[0], FENC_STRIDE, pix1, 16 ); \
COPY2_IF_LT( i_bcost, i_cost, list_used, 3 ); \
}
/* Reverse-order MV prediction. */
M32( mvc[0] ) = 0;
- M32( mvc[1] ) = 0;
M32( mvc[2] ) = 0;
#define MVC(mv) { CP32( mvc[i_mvc], mv ); i_mvc++; }
- if( i_mb_x < h->sps->i_mb_width - 1 )
- MVC(fenc_mv[1]);
- if( i_mb_y < h->sps->i_mb_height - 1 )
+ if( i_mb_x < h->mb.i_mb_width - 1 )
+ MVC( fenc_mv[1] );
+ if( i_mb_y < h->mb.i_mb_height - 1 )
{
- MVC(fenc_mv[i_mb_stride]);
+ MVC( fenc_mv[i_mb_stride] );
if( i_mb_x > 0 )
- MVC(fenc_mv[i_mb_stride-1]);
- if( i_mb_x < h->sps->i_mb_width - 1 )
- MVC(fenc_mv[i_mb_stride+1]);
+ MVC( fenc_mv[i_mb_stride-1] );
+ if( i_mb_x < h->mb.i_mb_width - 1 )
+ MVC( fenc_mv[i_mb_stride+1] );
}
#undef MVC
- x264_median_mv( m[l].mvp, mvc[0], mvc[1], mvc[2] );
- x264_me_search( h, &m[l], mvc, i_mvc );
+ if( i_mvc <= 1 )
+ CP32( m[l].mvp, mvc[0] );
+ else
+ x264_median_mv( m[l].mvp, mvc[0], mvc[1], mvc[2] );
- m[l].cost -= 2; // remove mvcost from skip mbs
+ /* Fast skip for cases of near-zero residual. Shortcut: don't bother except in the mv0 case,
+ * since anything else is likely to have enough residual to not trigger the skip. */
+ if( !M32( m[l].mvp ) )
+ {
+ m[l].cost = h->pixf.mbcmp[PIXEL_8x8]( m[l].p_fenc[0], FENC_STRIDE, m[l].p_fref[0], m[l].i_stride[0] );
+ if( m[l].cost < 64 )
+ {
+ M32( m[l].mv ) = 0;
+ goto skip_motionest;
+ }
+ }
+
+ x264_me_search( h, &m[l], mvc, i_mvc );
+ m[l].cost -= 2 * a->i_lambda; // remove mvcost from skip mbs
if( M32( m[l].mv ) )
- m[l].cost += 5;
+ m[l].cost += 5 * a->i_lambda;
+
+skip_motionest:
CP32( fenc_mvs[l], m[l].mv );
*fenc_costs[l] = m[l].cost;
}
lowres_intra_mb:
if( !fenc->b_intra_calculated )
{
- ALIGNED_ARRAY_16( uint8_t, edge,[33] );
- uint8_t *pix = &pix1[8+FDEC_STRIDE - 1];
- uint8_t *src = &fenc->lowres[0][i_pel_offset - 1];
- const int intra_penalty = 5;
+ ALIGNED_ARRAY_16( pixel, edge,[33] );
+ pixel *pix = &pix1[8+FDEC_STRIDE - 1];
+ pixel *src = &fenc->lowres[0][i_pel_offset - 1];
+ const int intra_penalty = 5 * a->i_lambda;
int satds[3];
- memcpy( pix-FDEC_STRIDE, src-i_stride, 17 );
+ memcpy( pix-FDEC_STRIDE, src-i_stride, 17 * sizeof(pixel) );
for( int i = 0; i < 8; i++ )
pix[i*FDEC_STRIDE] = src[i*i_stride];
pix++;
- if( h->pixf.intra_mbcmp_x3_8x8c )
- h->pixf.intra_mbcmp_x3_8x8c( h->mb.pic.p_fenc[0], pix, satds );
- else
- {
- for( int i = 0; i < 3; i++ )
- {
- h->predict_8x8c[i]( pix );
- satds[i] = h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
- }
- }
+ h->pixf.intra_mbcmp_x3_8x8c( h->mb.pic.p_fenc[0], pix, satds );
int i_icost = X264_MIN3( satds[0], satds[1], satds[2] );
if( h->param.analyse.i_subpel_refine > 1 )
}
}
- fenc->lowres_costs[b-p0][p1-b][i_mb_xy] = i_bcost + (list_used << LOWRES_COST_SHIFT);
+ fenc->lowres_costs[b-p0][p1-b][i_mb_xy] = X264_MIN( i_bcost, LOWRES_COST_MASK ) + (list_used << LOWRES_COST_SHIFT);
}
#undef TRY_BIDIR
#define NUM_MBS\
- (h->sps->i_mb_width > 2 && h->sps->i_mb_height > 2 ?\
- (h->sps->i_mb_width - 2) * (h->sps->i_mb_height - 2) :\
- h->sps->i_mb_width * h->sps->i_mb_height)
+ (h->mb.i_mb_width > 2 && h->mb.i_mb_height > 2 ?\
+ (h->mb.i_mb_width - 2) * (h->mb.i_mb_height - 2) :\
+ h->mb.i_mb_width * h->mb.i_mb_height)
static int x264_slicetype_frame_cost( x264_t *h, x264_mb_analysis_t *a,
x264_frame_t **frames, int p0, int p1, int b,
do_search[1] = b != p1 && frames[b]->lowres_mvs[1][p1-b-1][0][0] == 0x7FFF;
if( do_search[0] )
{
- if( ( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART
- || h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE ) && b == p1 )
+ if( h->param.analyse.i_weighted_pred && b == p1 )
{
x264_emms();
x264_weights_analyse( h, frames[b], frames[p0], 1 );
/* The edge mbs seem to reduce the predictive quality of the
* whole frame's score, but are needed for a spatial distribution. */
if( h->param.rc.b_mb_tree || h->param.rc.i_vbv_buffer_size ||
- h->sps->i_mb_width <= 2 || h->sps->i_mb_height <= 2 )
+ h->mb.i_mb_width <= 2 || h->mb.i_mb_height <= 2 )
{
- for( h->mb.i_mb_y = h->sps->i_mb_height - 1; h->mb.i_mb_y >= 0; h->mb.i_mb_y-- )
+ for( h->mb.i_mb_y = h->mb.i_mb_height - 1; h->mb.i_mb_y >= 0; h->mb.i_mb_y-- )
{
row_satd[h->mb.i_mb_y] = 0;
if( !frames[b]->b_intra_calculated )
row_satd_intra[h->mb.i_mb_y] = 0;
- for( h->mb.i_mb_x = h->sps->i_mb_width - 1; h->mb.i_mb_x >= 0; h->mb.i_mb_x-- )
+ for( h->mb.i_mb_x = h->mb.i_mb_width - 1; h->mb.i_mb_x >= 0; h->mb.i_mb_x-- )
x264_slicetype_mb_cost( h, a, frames, p0, p1, b, dist_scale_factor, do_search, w );
}
}
else
{
- for( h->mb.i_mb_y = h->sps->i_mb_height - 2; h->mb.i_mb_y >= 1; h->mb.i_mb_y-- )
- for( h->mb.i_mb_x = h->sps->i_mb_width - 2; h->mb.i_mb_x >= 1; h->mb.i_mb_x-- )
+ for( h->mb.i_mb_y = h->mb.i_mb_height - 2; h->mb.i_mb_y >= 1; h->mb.i_mb_y-- )
+ for( h->mb.i_mb_x = h->mb.i_mb_width - 2; h->mb.i_mb_x >= 1; h->mb.i_mb_x-- )
x264_slicetype_mb_cost( h, a, frames, p0, p1, b, dist_scale_factor, do_search, w );
}
int *row_satd = frames[b]->i_row_satds[b-p0][p1-b];
float *qp_offset = IS_X264_TYPE_B(frames[b]->i_type) ? frames[b]->f_qp_offset_aq : frames[b]->f_qp_offset;
x264_emms();
- for( h->mb.i_mb_y = h->sps->i_mb_height - 1; h->mb.i_mb_y >= 0; h->mb.i_mb_y-- )
+ for( h->mb.i_mb_y = h->mb.i_mb_height - 1; h->mb.i_mb_y >= 0; h->mb.i_mb_y-- )
{
row_satd[ h->mb.i_mb_y ] = 0;
- for( h->mb.i_mb_x = h->sps->i_mb_width - 1; h->mb.i_mb_x >= 0; h->mb.i_mb_x-- )
+ for( h->mb.i_mb_x = h->mb.i_mb_width - 1; h->mb.i_mb_x >= 0; h->mb.i_mb_x-- )
{
int i_mb_xy = h->mb.i_mb_x + h->mb.i_mb_y*h->mb.i_mb_stride;
int i_mb_cost = frames[b]->lowres_costs[b-p0][p1-b][i_mb_xy] & LOWRES_COST_MASK;
float qp_adj = qp_offset[i_mb_xy];
i_mb_cost = (i_mb_cost * x264_exp2fix8(qp_adj) + 128) >> 8;
row_satd[ h->mb.i_mb_y ] += i_mb_cost;
- if( (h->mb.i_mb_y > 0 && h->mb.i_mb_y < h->sps->i_mb_height - 1 &&
- h->mb.i_mb_x > 0 && h->mb.i_mb_x < h->sps->i_mb_width - 1) ||
- h->sps->i_mb_width <= 2 || h->sps->i_mb_height <= 2 )
+ if( (h->mb.i_mb_y > 0 && h->mb.i_mb_y < h->mb.i_mb_height - 1 &&
+ h->mb.i_mb_x > 0 && h->mb.i_mb_x < h->mb.i_mb_width - 1) ||
+ h->mb.i_mb_width <= 2 || h->mb.i_mb_height <= 2 )
{
i_score += i_mb_cost;
}
/* For non-reffed frames the source costs are always zero, so just memset one row and re-use it. */
if( !referenced )
- memset( frames[b]->i_propagate_cost, 0, h->sps->i_mb_width * sizeof(uint16_t) );
+ memset( frames[b]->i_propagate_cost, 0, h->mb.i_mb_width * sizeof(uint16_t) );
- for( h->mb.i_mb_y = 0; h->mb.i_mb_y < h->sps->i_mb_height; h->mb.i_mb_y++ )
+ for( h->mb.i_mb_y = 0; h->mb.i_mb_y < h->mb.i_mb_height; h->mb.i_mb_y++ )
{
int mb_index = h->mb.i_mb_y*h->mb.i_mb_stride;
h->mc.mbtree_propagate_cost( buf, propagate_cost,
frames[b]->i_intra_cost+mb_index, frames[b]->lowres_costs[b-p0][p1-b]+mb_index,
- frames[b]->i_inv_qscale_factor+mb_index, h->sps->i_mb_width );
+ frames[b]->i_inv_qscale_factor+mb_index, h->mb.i_mb_width );
if( referenced )
- propagate_cost += h->sps->i_mb_width;
- for( h->mb.i_mb_x = 0; h->mb.i_mb_x < h->sps->i_mb_width; h->mb.i_mb_x++, mb_index++ )
+ propagate_cost += h->mb.i_mb_width;
+ for( h->mb.i_mb_x = 0; h->mb.i_mb_x < h->mb.i_mb_width; h->mb.i_mb_x++, mb_index++ )
{
int propagate_amount = buf[h->mb.i_mb_x];
/* Don't propagate for an intra block. */
/* We could just clip the MVs, but pixels that lie outside the frame probably shouldn't
* be counted. */
- if( mbx < h->sps->i_mb_width-1 && mby < h->sps->i_mb_height-1 && mbx >= 0 && mby >= 0 )
+ if( mbx < h->mb.i_mb_width-1 && mby < h->mb.i_mb_height-1 && mbx >= 0 && mby >= 0 )
{
CLIP_ADD( ref_costs[list][idx0], (listamount*idx0weight+512)>>10 );
CLIP_ADD( ref_costs[list][idx1], (listamount*idx1weight+512)>>10 );
}
else /* Check offsets individually */
{
- if( mbx < h->sps->i_mb_width && mby < h->sps->i_mb_height && mbx >= 0 && mby >= 0 )
+ if( mbx < h->mb.i_mb_width && mby < h->mb.i_mb_height && mbx >= 0 && mby >= 0 )
CLIP_ADD( ref_costs[list][idx0], (listamount*idx0weight+512)>>10 );
- if( mbx+1 < h->sps->i_mb_width && mby < h->sps->i_mb_height && mbx+1 >= 0 && mby >= 0 )
+ if( mbx+1 < h->mb.i_mb_width && mby < h->mb.i_mb_height && mbx+1 >= 0 && mby >= 0 )
CLIP_ADD( ref_costs[list][idx1], (listamount*idx1weight+512)>>10 );
- if( mbx < h->sps->i_mb_width && mby+1 < h->sps->i_mb_height && mbx >= 0 && mby+1 >= 0 )
+ if( mbx < h->mb.i_mb_width && mby+1 < h->mb.i_mb_height && mbx >= 0 && mby+1 >= 0 )
CLIP_ADD( ref_costs[list][idx2], (listamount*idx2weight+512)>>10 );
- if( mbx+1 < h->sps->i_mb_width && mby+1 < h->sps->i_mb_height && mbx+1 >= 0 && mby+1 >= 0 )
+ if( mbx+1 < h->mb.i_mb_width && mby+1 < h->mb.i_mb_height && mbx+1 >= 0 && mby+1 >= 0 )
CLIP_ADD( ref_costs[list][idx3], (listamount*idx3weight+512)>>10 );
}
}
}
}
- if( h->param.rc.i_vbv_buffer_size && referenced )
+ if( h->param.rc.i_vbv_buffer_size && h->param.rc.i_lookahead && referenced )
x264_macroblock_tree_finish( h, frames[b], b == p1 ? b - p0 : 0 );
}
int idx = !b_intra;
int last_nonb, cur_nonb = 1;
int bframes = 0;
- int i = num_frames - 1;
+ int i = num_frames;
+
if( b_intra )
x264_slicetype_frame_cost( h, a, frames, 0, 0, 0, 0 );
i--;
last_nonb = i;
- if( last_nonb < idx )
- return;
+ /* Lookaheadless MB-tree is not a theoretically distinct case; the same extrapolation could
+ * be applied to the end of a lookahead buffer of any size. However, it's most needed when
+ * lookahead=0, so that's what's currently implemented. */
+ if( !h->param.rc.i_lookahead )
+ {
+ if( b_intra )
+ {
+ memset( frames[0]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
+ memcpy( frames[0]->f_qp_offset, frames[0]->f_qp_offset_aq, h->mb.i_mb_count * sizeof(float) );
+ return;
+ }
+ XCHG( uint16_t*, frames[last_nonb]->i_propagate_cost, frames[0]->i_propagate_cost );
+ memset( frames[0]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
+ }
+ else
+ {
+ if( last_nonb < idx )
+ return;
+ memset( frames[last_nonb]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
+ }
- memset( frames[last_nonb]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
while( i-- > idx )
{
cur_nonb = i;
last_nonb = cur_nonb;
}
+ if( !h->param.rc.i_lookahead )
+ {
+ x264_macroblock_tree_propagate( h, frames, 0, last_nonb, last_nonb, 1 );
+ XCHG( uint16_t*, frames[last_nonb]->i_propagate_cost, frames[0]->i_propagate_cost );
+ }
+
x264_macroblock_tree_finish( h, frames[last_nonb], last_nonb );
if( h->param.i_bframe_pyramid && bframes > 1 && !h->param.rc.i_vbv_buffer_size )
x264_macroblock_tree_finish( h, frames[last_nonb+(bframes+1)/2], 0 );
int i_gop_size = frame->i_frame - h->lookahead->i_last_keyframe;
float f_thresh_max = h->param.i_scenecut_threshold / 100.0;
/* magic numbers pulled out of thin air */
- float f_thresh_min = f_thresh_max * h->param.i_keyint_min
- / ( h->param.i_keyint_max * 4 );
+ float f_thresh_min = f_thresh_max * 0.25;
int res;
if( h->param.i_keyint_min == h->param.i_keyint_max )
- f_thresh_min= f_thresh_max;
- if( i_gop_size < h->param.i_keyint_min / 4 || h->param.b_intra_refresh )
+ f_thresh_min = f_thresh_max;
+ if( i_gop_size <= h->param.i_keyint_min / 4 || h->param.b_intra_refresh )
f_bias = f_thresh_min / 4;
else if( i_gop_size <= h->param.i_keyint_min )
f_bias = f_thresh_min * i_gop_size / h->param.i_keyint_min;
{
f_bias = f_thresh_min
+ ( f_thresh_max - f_thresh_min )
- * ( i_gop_size - h->param.i_keyint_min )
- / ( h->param.i_keyint_max - h->param.i_keyint_min ) ;
+ * ( i_gop_size - h->param.i_keyint_min )
+ / ( h->param.i_keyint_max - h->param.i_keyint_min );
}
res = pcost >= (1.0 - f_bias) * icost;
return res;
}
-static int scenecut( x264_t *h, x264_mb_analysis_t *a, x264_frame_t **frames, int p0, int p1, int real_scenecut, int num_frames )
+static int scenecut( x264_t *h, x264_mb_analysis_t *a, x264_frame_t **frames, int p0, int p1, int real_scenecut, int num_frames, int i_max_search )
{
/* Only do analysis during a normal scenecut check. */
if( real_scenecut && h->param.i_bframe )
{
- int maxp1 = p0 + 1;
+ int origmaxp1 = p0 + 1;
/* Look ahead to avoid coding short flashes as scenecuts. */
if( h->param.i_bframe_adaptive == X264_B_ADAPT_TRELLIS )
/* Don't analyse any more frames than the trellis would have covered. */
- maxp1 += h->param.i_bframe;
+ origmaxp1 += h->param.i_bframe;
else
- maxp1++;
- maxp1 = X264_MIN( maxp1, num_frames );
+ origmaxp1++;
+ int maxp1 = X264_MIN( origmaxp1, num_frames );
/* Where A and B are scenes: AAAAAABBBAAAAAA
* If BBB is shorter than (maxp1-p0), it is detected as a flash
/* Where A-F are scenes: AAAAABBCCDDEEFFFFFF
* If each of BB ... EE are shorter than (maxp1-p0), they are
* detected as flashes and not considered scenecuts.
- * Instead, the first F frame becomes a scenecut. */
- for( int curp0 = p0; curp0 < maxp1; curp0++ )
- if( scenecut_internal( h, a, frames, curp0, maxp1, 0 ) )
+ * Instead, the first F frame becomes a scenecut.
+ * If the video ends before F, no frame becomes a scenecut. */
+ for( int curp0 = p0; curp0 <= maxp1; curp0++ )
+ if( origmaxp1 > i_max_search || (curp0 < maxp1 && scenecut_internal( h, a, frames, curp0, maxp1, 0 )) )
/* If cur_p0 is the p0 of a scenecut, it cannot be the p1 of a scenecut. */
frames[curp0]->b_scenecut = 0;
}
{
x264_mb_analysis_t a;
x264_frame_t *frames[X264_LOOKAHEAD_MAX+3] = { NULL, };
- int num_frames, orig_num_frames, keyint_limit, idr_frame_type, framecnt;
+ int num_frames, orig_num_frames, keyint_limit, framecnt;
int i_mb_count = NUM_MBS;
int cost1p0, cost2p0, cost1b1, cost2p1;
int i_max_search = X264_MIN( h->lookahead->next.i_size, X264_LOOKAHEAD_MAX );
+ int vbv_lookahead = h->param.rc.i_vbv_buffer_size && h->param.rc.i_lookahead;
if( h->param.b_deterministic )
i_max_search = X264_MIN( i_max_search, h->lookahead->i_slicetype_length + !keyframe );
frames[framecnt+1] = h->lookahead->next.list[framecnt];
if( !framecnt )
+ {
+ if( h->param.rc.b_mb_tree )
+ x264_macroblock_tree( h, &a, frames, 0, keyframe );
return;
+ }
keyint_limit = h->param.i_keyint_max - frames[0]->i_frame + h->lookahead->i_last_keyframe - 1;
orig_num_frames = num_frames = h->param.b_intra_refresh ? framecnt : X264_MIN( framecnt, keyint_limit );
x264_lowres_context_init( h, &a );
- idr_frame_type = frames[1]->i_frame - h->lookahead->i_last_keyframe >= h->param.i_keyint_min ? X264_TYPE_IDR : X264_TYPE_I;
/* This is important psy-wise: if we have a non-scenecut keyframe,
* there will be significant visual artifacts if the frames just before
* go down in quality due to being referenced less, despite it being
* more RD-optimal. */
- if( (h->param.analyse.b_psy && h->param.rc.b_mb_tree) || h->param.rc.i_vbv_buffer_size )
+ if( (h->param.analyse.b_psy && h->param.rc.b_mb_tree) || vbv_lookahead )
num_frames = framecnt;
- else if( num_frames == 1 )
- {
- frames[1]->i_type = X264_TYPE_P;
- if( h->param.i_scenecut_threshold && scenecut( h, &a, frames, 0, 1, 1, orig_num_frames ) )
- frames[1]->i_type = idr_frame_type;
- return;
- }
else if( num_frames == 0 )
{
- frames[1]->i_type = idr_frame_type;
+ frames[1]->i_type = X264_TYPE_I;
return;
}
int num_bframes = 0;
int num_analysed_frames = num_frames;
int reset_start;
- if( h->param.i_scenecut_threshold && scenecut( h, &a, frames, 0, 1, 1, orig_num_frames ) )
+ if( h->param.i_scenecut_threshold && scenecut( h, &a, frames, 0, 1, 1, orig_num_frames, i_max_search ) )
{
- frames[1]->i_type = idr_frame_type;
+ frames[1]->i_type = X264_TYPE_I;
return;
}
/* Check scenecut on the first minigop. */
for( int j = 1; j < num_bframes+1; j++ )
- if( h->param.i_scenecut_threshold && scenecut( h, &a, frames, j, j+1, 0, orig_num_frames ) )
+ if( h->param.i_scenecut_threshold && scenecut( h, &a, frames, j, j+1, 0, orig_num_frames, i_max_search ) )
{
frames[j]->i_type = X264_TYPE_P;
num_analysed_frames = j;
/* Enforce keyframe limit. */
if( !h->param.b_intra_refresh )
- for( int j = 0; j < num_frames; j++ )
+ for( int i = keyint_limit+1; i <= num_frames; i += h->param.i_keyint_max )
{
- if( ((j-keyint_limit) % h->param.i_keyint_max) == 0 )
- {
- if( j && h->param.i_keyint_max > 1 )
- frames[j]->i_type = X264_TYPE_P;
- frames[j+1]->i_type = X264_TYPE_IDR;
- reset_start = X264_MIN( reset_start, j+2 );
- }
+ frames[i]->i_type = X264_TYPE_I;
+ reset_start = X264_MIN( reset_start, i+1 );
+ if( h->param.i_open_gop == X264_OPEN_GOP_BLURAY )
+ while( IS_X264_TYPE_B( frames[i-1]->i_type ) )
+ i--;
}
- if( h->param.rc.i_vbv_buffer_size )
+ if( vbv_lookahead )
x264_vbv_lookahead( h, &a, frames, num_frames, keyframe );
/* Restore frametypes for all frames that haven't actually been decided yet. */
int lookahead_size = h->lookahead->next.i_size;
- if( h->param.rc.i_rc_method == X264_RC_ABR || h->param.rc.b_stat_write || h->param.rc.i_vbv_buffer_size )
+ for( int i = 0; i < h->lookahead->next.i_size; i++ )
{
- for( int i = 0; i < h->lookahead->next.i_size; i++ )
+ if( h->param.b_vfr_input )
{
- if( h->param.b_vfr_input )
- {
- if( lookahead_size-- > 1 )
- h->lookahead->next.list[i]->i_duration = 2 * (h->lookahead->next.list[i+1]->i_pts - h->lookahead->next.list[i]->i_pts);
- else
- h->lookahead->next.list[i]->i_duration = h->i_prev_duration;
- }
+ if( lookahead_size-- > 1 )
+ h->lookahead->next.list[i]->i_duration = 2 * (h->lookahead->next.list[i+1]->i_pts - h->lookahead->next.list[i]->i_pts);
else
- h->lookahead->next.list[i]->i_duration = delta_tfi_divisor[h->lookahead->next.list[i]->i_pic_struct];
- h->i_prev_duration = h->lookahead->next.list[i]->i_duration;
-
- if( h->lookahead->next.list[i]->i_frame > h->i_disp_fields_last_frame && lookahead_size > 0 )
- {
- h->lookahead->next.list[i]->i_field_cnt = h->i_disp_fields;
- h->i_disp_fields += h->lookahead->next.list[i]->i_duration;
- h->i_disp_fields_last_frame = h->lookahead->next.list[i]->i_frame;
- }
- else if( lookahead_size == 0 )
- {
- h->lookahead->next.list[i]->i_field_cnt = h->i_disp_fields;
h->lookahead->next.list[i]->i_duration = h->i_prev_duration;
- }
+ }
+ else
+ h->lookahead->next.list[i]->i_duration = delta_tfi_divisor[h->lookahead->next.list[i]->i_pic_struct];
+ h->i_prev_duration = h->lookahead->next.list[i]->i_duration;
+ h->lookahead->next.list[i]->f_duration = (double)h->lookahead->next.list[i]->i_duration
+ * h->sps->vui.i_num_units_in_tick
+ / h->sps->vui.i_time_scale;
+
+ if( h->lookahead->next.list[i]->i_frame > h->i_disp_fields_last_frame && lookahead_size > 0 )
+ {
+ h->lookahead->next.list[i]->i_field_cnt = h->i_disp_fields;
+ h->i_disp_fields += h->lookahead->next.list[i]->i_duration;
+ h->i_disp_fields_last_frame = h->lookahead->next.list[i]->i_frame;
+ }
+ else if( lookahead_size == 0 )
+ {
+ h->lookahead->next.list[i]->i_field_cnt = h->i_disp_fields;
+ h->lookahead->next.list[i]->i_duration = h->i_prev_duration;
}
}
frm->i_frame, x264_b_pyramid_names[h->param.i_bframe_pyramid], h->param.i_frame_reference );
}
+ if( frm->i_type == X264_TYPE_KEYFRAME )
+ frm->i_type = h->param.i_open_gop ? X264_TYPE_I : X264_TYPE_IDR;
+
/* Limit GOP size */
if( (!h->param.b_intra_refresh || frm->i_frame == 0) && frm->i_frame - h->lookahead->i_last_keyframe >= h->param.i_keyint_max )
{
- if( frm->i_type == X264_TYPE_AUTO )
+ if( frm->i_type == X264_TYPE_AUTO || frm->i_type == X264_TYPE_I )
+ frm->i_type = h->param.i_open_gop && h->lookahead->i_last_keyframe >= 0 ? X264_TYPE_I : X264_TYPE_IDR;
+ int warn = frm->i_type != X264_TYPE_IDR;
+ if( warn && h->param.i_open_gop )
+ warn &= frm->i_type != X264_TYPE_I;
+ if( warn )
+ x264_log( h, X264_LOG_WARNING, "specified frame type (%d) at %d is not compatible with keyframe interval\n", frm->i_type, frm->i_frame );
+ }
+ if( frm->i_type == X264_TYPE_I && frm->i_frame - h->lookahead->i_last_keyframe >= h->param.i_keyint_min )
+ {
+ if( h->param.i_open_gop )
+ {
+ h->lookahead->i_last_keyframe = frm->i_frame; // Use display order
+ if( h->param.i_open_gop == X264_OPEN_GOP_BLURAY )
+ h->lookahead->i_last_keyframe -= bframes; // Use bluray order
+ frm->b_keyframe = 1;
+ }
+ else
frm->i_type = X264_TYPE_IDR;
- if( frm->i_type != X264_TYPE_IDR )
- x264_log( h, X264_LOG_WARNING, "specified frame type (%d) is not compatible with keyframe interval\n", frm->i_type );
}
if( frm->i_type == X264_TYPE_IDR )
{
/* Analyse for weighted P frames */
if( !h->param.rc.b_stat_read && h->lookahead->next.list[bframes]->i_type == X264_TYPE_P
- && h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
+ && h->param.analyse.i_weighted_pred >= X264_WEIGHTP_SIMPLE )
{
x264_emms();
x264_weights_analyse( h, h->lookahead->next.list[bframes], h->lookahead->last_nonb, 0 );
int i_coded = h->lookahead->next.list[0]->i_frame;
if( bframes )
{
- int index[] = { brefs+1, 1 };
+ int idx_list[] = { brefs+1, 1 };
for( int i = 0; i < bframes; i++ )
{
- int idx = index[h->lookahead->next.list[i]->i_type == X264_TYPE_BREF]++;
+ int idx = idx_list[h->lookahead->next.list[i]->i_type == X264_TYPE_BREF]++;
frames[idx] = h->lookahead->next.list[i];
frames[idx]->i_reordered_pts = h->lookahead->next.list[idx]->i_pts;
}
for( int i = 0; i <= bframes; i++ )
{
h->lookahead->next.list[i]->i_coded = i_coded++;
- if( h->param.rc.i_rc_method == X264_RC_ABR || h->param.rc.b_stat_write || h->param.rc.i_vbv_buffer_size )
+ if( i )
{
- if( i )
- {
- x264_calculate_durations( h, h->lookahead->next.list[i], h->lookahead->next.list[i-1], &h->i_cpb_delay, &h->i_coded_fields );
- h->lookahead->next.list[0]->f_planned_cpb_duration[i-1] = (double)h->lookahead->next.list[i-1]->i_cpb_duration *
- h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
- }
- else
- x264_calculate_durations( h, h->lookahead->next.list[i], NULL, &h->i_cpb_delay, &h->i_coded_fields );
-
- h->lookahead->next.list[0]->f_planned_cpb_duration[i] = (double)h->lookahead->next.list[i]->i_cpb_duration *
- h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
+ x264_calculate_durations( h, h->lookahead->next.list[i], h->lookahead->next.list[i-1], &h->i_cpb_delay, &h->i_coded_fields );
+ h->lookahead->next.list[0]->f_planned_cpb_duration[i-1] = (double)h->lookahead->next.list[i-1]->i_cpb_duration *
+ h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
}
+ else
+ x264_calculate_durations( h, h->lookahead->next.list[i], NULL, &h->i_cpb_delay, &h->i_coded_fields );
+
+ h->lookahead->next.list[0]->f_planned_cpb_duration[i] = (double)h->lookahead->next.list[i]->i_cpb_duration *
+ h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
}
}
h->fenc->i_row_satd = h->fenc->i_row_satds[b-p0][p1-b];
h->fdec->i_row_satd = h->fdec->i_row_satds[b-p0][p1-b];
h->fdec->i_satd = cost;
- memcpy( h->fdec->i_row_satd, h->fenc->i_row_satd, h->sps->i_mb_height * sizeof(int) );
+ memcpy( h->fdec->i_row_satd, h->fenc->i_row_satd, h->mb.i_mb_height * sizeof(int) );
if( !IS_X264_TYPE_I(h->fenc->i_type) )
- memcpy( h->fdec->i_row_satds[0][0], h->fenc->i_row_satds[0][0], h->sps->i_mb_height * sizeof(int) );
+ memcpy( h->fdec->i_row_satds[0][0], h->fenc->i_row_satds[0][0], h->mb.i_mb_height * sizeof(int) );
if( h->param.b_intra_refresh && h->param.rc.i_vbv_buffer_size && h->fenc->i_type == X264_TYPE_P )
{
int ip_factor = 256 * h->param.rc.f_ip_factor; /* fix8 */
- for( int y = 0; y < h->sps->i_mb_height; y++ )
+ for( int y = 0; y < h->mb.i_mb_height; y++ )
{
int mb_xy = y * h->mb.i_mb_stride;
for( int x = h->fdec->i_pir_start_col; x <= h->fdec->i_pir_end_col; x++, mb_xy++ )