a->i_qp = X264_LOOKAHEAD_QP;
a->i_lambda = x264_lambda_tab[ a->i_qp ];
x264_mb_analyse_load_costs( h, a );
- h->mb.i_me_method = X264_MIN( X264_ME_HEX, h->param.analyse.i_me_method ); // maybe dia?
- h->mb.i_subpel_refine = 4; // 3 should be enough, but not tweaking for speed now
+ if( h->param.analyse.i_subpel_refine > 1 )
+ {
+ h->mb.i_me_method = X264_MIN( X264_ME_HEX, h->param.analyse.i_me_method );
+ h->mb.i_subpel_refine = 4;
+ }
+ else
+ {
+ h->mb.i_me_method = X264_ME_DIA;
+ h->mb.i_subpel_refine = 2;
+ }
h->mb.b_chroma_me = 0;
}
/* makes a non-h264 weight (i.e. fix7), into an h264 weight */
-static void get_h264_weight( unsigned int weight_nonh264, int offset, x264_weight_t *w )
+static void x264_weight_get_h264( unsigned int weight_nonh264, int offset, x264_weight_t *w )
{
w->i_offset = offset;
w->i_denom = 7;
w->i_scale = X264_MIN( w->i_scale, 127 );
}
-static NOINLINE void weights_plane_analyse( x264_t *h, uint8_t *plane, int width, int height, int stride, unsigned int *sum, uint64_t *var )
+void x264_weight_plane_analyse( x264_t *h, x264_frame_t *frame )
{
int x,y;
- uint64_t sad = 0;
+ uint32_t sad = 0;
uint64_t ssd = 0;
- uint8_t *p = plane;
+ uint8_t *p = frame->plane[0];
+ int stride = frame->i_stride[0];
+ int width = frame->i_width[0];
+ int height = frame->i_lines[0];
for( y = 0; y < height>>4; y++, p += stride*16 )
- for( x = 0; x < width; x+=16 )
+ for( x = 0; x < width; x += 16 )
{
uint64_t res = h->pixf.var[PIXEL_16x16]( p + x, stride );
sad += (uint32_t)res;
ssd += res >> 32;
}
-
- *sum = sad;
- *var = ssd - ((uint64_t)sad * sad + width * height / 2) / (width * height);
- x264_emms();
-}
-
-#define LOAD_HPELS_LUMA(dst, src) \
-{ \
- (dst)[0] = &(src)[0][i_pel_offset]; \
- (dst)[1] = &(src)[1][i_pel_offset]; \
- (dst)[2] = &(src)[2][i_pel_offset]; \
- (dst)[3] = &(src)[3][i_pel_offset]; \
+ frame->i_pixel_sum = sad;
+ frame->i_pixel_ssd = ssd - ((uint64_t)sad * sad + width * height / 2) / (width * height);
}
static NOINLINE uint8_t *x264_weight_cost_init_luma( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, uint8_t *dest )
* motion search has been done. */
if( fenc->lowres_mvs[0][ref0_distance][0][0] != 0x7FFF )
{
- uint8_t *src[4];
int i_stride = fenc->i_stride_lowres;
int i_lines = fenc->i_lines_lowres;
int i_width = fenc->i_width_lowres;
int i_mb_xy = 0;
int x,y;
- int i_pel_offset = 0;
+ uint8_t *p = dest;
- for( y = 0; y < i_lines; y += 8, i_pel_offset = y*i_stride )
- for( x = 0; x < i_width; x += 8, i_mb_xy++, i_pel_offset += 8 )
+ for( y = 0; y < i_lines; y += 8, p += i_stride*8 )
+ for( x = 0; x < i_width; x += 8, i_mb_xy++ )
{
- uint8_t *pix = &dest[ i_pel_offset ];
int mvx = fenc->lowres_mvs[0][ref0_distance][i_mb_xy][0];
int mvy = fenc->lowres_mvs[0][ref0_distance][i_mb_xy][1];
- LOAD_HPELS_LUMA( src, ref->lowres );
- h->mc.mc_luma( pix, i_stride, src, i_stride,
- mvx, mvy, 8, 8, weight_none );
+ h->mc.mc_luma( p+x, i_stride, ref->lowres, i_stride,
+ mvx+(x<<2), mvy+(y<<2), 8, 8, weight_none );
}
x264_emms();
return dest;
x264_emms();
return ref->lowres[0];
}
-#undef LOAD_HPELS_LUMA
static NOINLINE unsigned int x264_weight_cost( x264_t *h, x264_frame_t *fenc, uint8_t *src, x264_weight_t *w )
{
int i_lines = fenc->i_lines_lowres;
int i_width = fenc->i_width_lowres;
uint8_t *fenc_plane = fenc->lowres[0];
- ALIGNED_8( uint8_t buf[8*8] );
+ ALIGNED_ARRAY_8( uint8_t, buf,[8*8] );
int pixoff = 0;
int i_mb = 0;
void x264_weights_analyse( x264_t *h, x264_frame_t *fenc, x264_frame_t *ref, int b_lookahead )
{
- unsigned int fenc_sum, ref_sum;
- float fenc_mean, ref_mean;
- uint64_t fenc_var, ref_var;
+ float fenc_mean, ref_mean, fenc_var, ref_var;
int i_off, offset_search;
int minoff, minscale, mindenom;
unsigned int minscore, origscore;
int found;
x264_weight_t *weights = fenc->weight[0];
- weights_plane_analyse( h, fenc->plane[0], fenc->i_width[0], fenc->i_lines[0], fenc->i_stride[0], &fenc_sum, &fenc_var );
- weights_plane_analyse( h, ref->plane[0], ref->i_width[0], ref->i_lines[0], ref->i_stride[0], &ref_sum, &ref_var );
- fenc_var = round( sqrt( fenc_var ) );
- ref_var = round( sqrt( ref_var ) );
- fenc_mean = (float)fenc_sum / (fenc->i_lines[0] * fenc->i_width[0]);
- ref_mean = (float) ref_sum / (fenc->i_lines[0] * fenc->i_width[0]);
+ fenc_var = round( sqrt( fenc->i_pixel_ssd ) );
+ ref_var = round( sqrt( ref->i_pixel_ssd ) );
+ fenc_mean = (float)fenc->i_pixel_sum / (fenc->i_lines[0] * fenc->i_width[0]);
+ ref_mean = (float) ref->i_pixel_sum / (fenc->i_lines[0] * fenc->i_width[0]);
//early termination
- if( fabs( ref_mean - fenc_mean ) < 0.5 && fabsf( 1 - (float)fenc_var / ref_var ) < epsilon )
+ if( fabs( ref_mean - fenc_mean ) < 0.5 && fabs( 1 - fenc_var / ref_var ) < epsilon )
+ {
+ SET_WEIGHT( weights[0], 0, 1, 0, 0 );
return;
+ }
- guess_scale = ref_var ? (float)fenc_var/ref_var : 0;
- get_h264_weight( round( guess_scale * 128 ), 0, &weights[0] );
+ guess_scale = ref_var ? fenc_var/ref_var : 0;
+ x264_weight_get_h264( round( guess_scale * 128 ), 0, &weights[0] );
found = 0;
mindenom = weights[0].i_denom;
origscore = minscore = x264_weight_cost( h, fenc, mcbuf, 0 );
if( !minscore )
+ {
+ SET_WEIGHT( weights[0], 0, 1, 0, 0 );
return;
+ }
// This gives a slight improvement due to rounding errors but only tests
// one offset on lookahead.
if( weights[0].weightfn && b_lookahead )
{
//scale lowres in lookahead for slicetype_frame_cost
- int i_padv = PADV<<h->param.b_interlaced;
uint8_t *src = ref->buffer_lowres[0];
uint8_t *dst = h->mb.p_weight_buf[0];
int width = ref->i_width_lowres + PADH*2;
- int height = ref->i_lines_lowres + i_padv*2;
+ int height = ref->i_lines_lowres + PADV*2;
x264_weight_scale_plane( h, dst, ref->i_stride_lowres, src, ref->i_stride_lowres,
width, height, &weights[0] );
- fenc->weighted[0] = h->mb.p_weight_buf[0] + PADH + ref->i_stride_lowres * i_padv;
+ fenc->weighted[0] = h->mb.p_weight_buf[0] + PADH + ref->i_stride_lowres * PADV;
}
}
int16_t (*fenc_mvs[2])[2] = { &frames[b]->lowres_mvs[0][b-p0-1][i_mb_xy], &frames[b]->lowres_mvs[1][p1-b-1][i_mb_xy] };
int (*fenc_costs[2]) = { &frames[b]->lowres_mv_costs[0][b-p0-1][i_mb_xy], &frames[b]->lowres_mv_costs[1][p1-b-1][i_mb_xy] };
- ALIGNED_8( uint8_t pix1[9*FDEC_STRIDE] );
+ ALIGNED_ARRAY_8( uint8_t, pix1,[9*FDEC_STRIDE] );
uint8_t *pix2 = pix1+8;
x264_me_t m[2];
int i_bcost = COST_MAX;
}
#define TRY_BIDIR( mv0, mv1, penalty ) \
{ \
- int stride1 = 16, stride2 = 16; \
- uint8_t *src1, *src2; \
int i_cost; \
- src1 = h->mc.get_ref( pix1, &stride1, m[0].p_fref, m[0].i_stride[0], \
- (mv0)[0], (mv0)[1], 8, 8, w ); \
- src2 = h->mc.get_ref( pix2, &stride2, m[1].p_fref, m[1].i_stride[0], \
- (mv1)[0], (mv1)[1], 8, 8, w ); \
- h->mc.avg[PIXEL_8x8]( pix1, 16, src1, stride1, src2, stride2, i_bipred_weight ); \
+ if( h->param.analyse.i_subpel_refine <= 1 ) \
+ { \
+ int hpel_idx1 = (((mv0)[0]&2)>>1) + ((mv0)[1]&2); \
+ int hpel_idx2 = (((mv1)[0]&2)>>1) + ((mv1)[1]&2); \
+ uint8_t *src1 = m[0].p_fref[hpel_idx1] + ((mv0)[0]>>2) + ((mv0)[1]>>2) * m[0].i_stride[0]; \
+ uint8_t *src2 = m[1].p_fref[hpel_idx2] + ((mv1)[0]>>2) + ((mv1)[1]>>2) * m[1].i_stride[0]; \
+ h->mc.avg[PIXEL_8x8]( pix1, 16, src1, m[0].i_stride[0], src2, m[1].i_stride[0], i_bipred_weight ); \
+ } \
+ else \
+ { \
+ int stride1 = 16, stride2 = 16; \
+ uint8_t *src1, *src2; \
+ src1 = h->mc.get_ref( pix1, &stride1, m[0].p_fref, m[0].i_stride[0], \
+ (mv0)[0], (mv0)[1], 8, 8, w ); \
+ src2 = h->mc.get_ref( pix2, &stride2, m[1].p_fref, m[1].i_stride[0], \
+ (mv1)[0], (mv1)[1], 8, 8, w ); \
+ h->mc.avg[PIXEL_8x8]( pix1, 16, src1, stride1, src2, stride2, i_bipred_weight ); \
+ } \
i_cost = penalty + h->pixf.mbcmp[PIXEL_8x8]( \
m[0].p_fenc[0], FENC_STRIDE, pix1, 16 ); \
COPY2_IF_LT( i_bcost, i_cost, list_used, 3 ); \
m[0].i_stride[0] = i_stride;
m[0].p_fenc[0] = h->mb.pic.p_fenc[0];
m[0].weight = w;
+ m[0].i_ref = 0;
LOAD_HPELS_LUMA( m[0].p_fref, fref0->lowres );
m[0].p_fref_w = m[0].p_fref[0];
if( w[0].weightfn )
if( b_bidir )
{
int16_t *mvr = fref1->lowres_mvs[0][p1-p0-1][i_mb_xy];
- int dmv[2][2];
+ ALIGNED_ARRAY_8( int16_t, dmv,[2],[2] );
- h->mc.memcpy_aligned( &m[1], &m[0], sizeof(x264_me_t) );
- m[1].i_ref = p1;
+ m[1].i_pixel = PIXEL_8x8;
+ m[1].p_cost_mv = a->p_cost_mv;
+ m[1].i_stride[0] = i_stride;
+ m[1].p_fenc[0] = h->mb.pic.p_fenc[0];
+ m[1].i_ref = 0;
m[1].weight = weight_none;
LOAD_HPELS_LUMA( m[1].p_fref, fref1->lowres );
m[1].p_fref_w = m[1].p_fref[0];
dmv[1][1] = dmv[0][1] - mvr[1];
CLIP_MV( dmv[0] );
CLIP_MV( dmv[1] );
+ if( h->param.analyse.i_subpel_refine <= 1 )
+ M64( dmv ) &= ~0x0001000100010001ULL; /* mv & ~1 */
TRY_BIDIR( dmv[0], dmv[1], 0 );
- if( dmv[0][0] | dmv[0][1] | dmv[1][0] | dmv[1][1] )
+ if( M64( dmv ) )
{
int i_cost;
h->mc.avg[PIXEL_8x8]( pix1, 16, m[0].p_fref[0], m[0].i_stride[0], m[1].p_fref[0], m[1].i_stride[0], i_bipred_weight );
uint8_t *pix = &pix1[8+FDEC_STRIDE - 1];
uint8_t *src = &fenc->lowres[0][i_pel_offset - 1];
const int intra_penalty = 5;
- int satds[4];
+ int satds[3];
memcpy( pix-FDEC_STRIDE, src-i_stride, 17 );
for( i=0; i<8; i++ )
pix++;
if( h->pixf.intra_mbcmp_x3_8x8c )
- {
h->pixf.intra_mbcmp_x3_8x8c( h->mb.pic.p_fenc[0], pix, satds );
- h->predict_8x8c[I_PRED_CHROMA_P]( pix );
- satds[I_PRED_CHROMA_P] =
- h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
- }
else
{
- for( i=0; i<4; i++ )
+ for( i=0; i<3; i++ )
{
h->predict_8x8c[i]( pix );
satds[i] = h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
}
}
- i_icost = X264_MIN4( satds[0], satds[1], satds[2], satds[3] );
+ i_icost = X264_MIN3( satds[0], satds[1], satds[2] );
- h->predict_8x8_filter( pix, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
- for( i=3; i<9; i++ )
+ if( h->param.analyse.i_subpel_refine > 1 )
{
- int satd;
- h->predict_8x8[i]( pix, edge );
- satd = h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
+ h->predict_8x8c[I_PRED_CHROMA_P]( pix );
+ int satd = h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
i_icost = X264_MIN( i_icost, satd );
+ h->predict_8x8_filter( pix, edge, ALL_NEIGHBORS, ALL_NEIGHBORS );
+ for( i=3; i<9; i++ )
+ {
+ int satd;
+ h->predict_8x8[i]( pix, edge );
+ satd = h->pixf.mbcmp[PIXEL_8x8]( pix, FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE );
+ i_icost = X264_MIN( i_icost, satd );
+ }
}
i_icost += intra_penalty;
if( ( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART
|| h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE ) && b == p1 )
{
+ x264_emms();
x264_weights_analyse( h, frames[b], frames[p0], 1 );
w = frames[b]->weight[0];
}
}
}
-static void x264_macroblock_tree_propagate( x264_t *h, x264_frame_t **frames, int p0, int p1, int b )
+static void x264_macroblock_tree_propagate( x264_t *h, x264_frame_t **frames, int p0, int p1, int b, int referenced )
{
uint16_t *ref_costs[2] = {frames[p0]->i_propagate_cost,frames[p1]->i_propagate_cost};
int dist_scale_factor = ( ((b-p0) << 8) + ((p1-p0) >> 1) ) / (p1-p0);
int16_t (*mvs[2])[2] = { frames[b]->lowres_mvs[0][b-p0-1], frames[b]->lowres_mvs[1][p1-b-1] };
int bipred_weights[2] = {i_bipred_weight, 64 - i_bipred_weight};
int *buf = h->scratch_buffer;
+ uint16_t *propagate_cost = frames[b]->i_propagate_cost;
+
+ /* For non-reffed frames the source costs are always zero, so just memset one row and re-use it. */
+ if( !referenced )
+ memset( frames[b]->i_propagate_cost, 0, h->sps->i_mb_width * sizeof(uint16_t) );
for( h->mb.i_mb_y = 0; h->mb.i_mb_y < h->sps->i_mb_height; h->mb.i_mb_y++ )
{
int mb_index = h->mb.i_mb_y*h->mb.i_mb_stride;
- h->mc.mbtree_propagate_cost( buf, frames[b]->i_propagate_cost+mb_index,
+ h->mc.mbtree_propagate_cost( buf, propagate_cost,
frames[b]->i_intra_cost+mb_index, frames[b]->lowres_costs[b-p0][p1-b]+mb_index,
frames[b]->i_inv_qscale_factor+mb_index, h->sps->i_mb_width );
+ if( referenced )
+ propagate_cost += h->sps->i_mb_width;
for( h->mb.i_mb_x = 0; h->mb.i_mb_x < h->sps->i_mb_width; h->mb.i_mb_x++, mb_index++ )
{
int propagate_amount = buf[h->mb.i_mb_x];
}
}
- if( h->param.rc.i_vbv_buffer_size && b == p1 )
- x264_macroblock_tree_finish( h, frames[b], b-p0 );
+ if( h->param.rc.i_vbv_buffer_size && referenced )
+ x264_macroblock_tree_finish( h, frames[b], b == p1 ? b - p0 : 0 );
}
static void x264_macroblock_tree( x264_t *h, x264_mb_analysis_t *a, x264_frame_t **frames, int num_frames, int b_intra )
{
int i, idx = !b_intra;
int last_nonb, cur_nonb = 1;
+ int bframes = 0;
if( b_intra )
x264_slicetype_frame_cost( h, a, frames, 0, 0, 0, 0 );
break;
x264_slicetype_frame_cost( h, a, frames, cur_nonb, last_nonb, last_nonb, 0 );
memset( frames[cur_nonb]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
- while( i > cur_nonb )
+ bframes = last_nonb - cur_nonb - 1;
+ if( h->param.i_bframe_pyramid && bframes > 1 )
+ {
+ int middle = (bframes + 1)/2 + cur_nonb;
+ x264_slicetype_frame_cost( h, a, frames, cur_nonb, last_nonb, middle, 0 );
+ memset( frames[middle]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
+ while( i > cur_nonb )
+ {
+ int p0 = i > middle ? middle : cur_nonb;
+ int p1 = i < middle ? middle : last_nonb;
+ if( i != middle )
+ {
+ x264_slicetype_frame_cost( h, a, frames, p0, p1, i, 0 );
+ x264_macroblock_tree_propagate( h, frames, p0, p1, i, 0 );
+ }
+ i--;
+ }
+ x264_macroblock_tree_propagate( h, frames, cur_nonb, last_nonb, middle, 1 );
+ }
+ else
{
- x264_slicetype_frame_cost( h, a, frames, cur_nonb, last_nonb, i, 0 );
- memset( frames[i]->i_propagate_cost, 0, h->mb.i_mb_count * sizeof(uint16_t) );
- x264_macroblock_tree_propagate( h, frames, cur_nonb, last_nonb, i );
- i--;
+ while( i > cur_nonb )
+ {
+ x264_slicetype_frame_cost( h, a, frames, cur_nonb, last_nonb, i, 0 );
+ x264_macroblock_tree_propagate( h, frames, cur_nonb, last_nonb, i, 0 );
+ i--;
+ }
}
- x264_macroblock_tree_propagate( h, frames, cur_nonb, last_nonb, last_nonb );
+ x264_macroblock_tree_propagate( h, frames, cur_nonb, last_nonb, last_nonb, 1 );
last_nonb = cur_nonb;
}
x264_macroblock_tree_finish( h, frames[last_nonb], last_nonb );
+ if( h->param.i_bframe_pyramid && bframes > 1 && !h->param.rc.i_vbv_buffer_size )
+ x264_macroblock_tree_finish( h, frames[last_nonb+(bframes+1)/2], 0 );
}
static int x264_vbv_frame_cost( x264_t *h, x264_mb_analysis_t *a, x264_frame_t **frames, int p0, int p1, int b )
if( cost > threshold )
break;
- for( next_b = loc; next_b < next_p && cost < threshold; next_b++ )
- cost += x264_slicetype_frame_cost( h, a, frames, cur_p, next_p, next_b, 0 );
+ if( h->param.i_bframe_pyramid && next_p - cur_p > 2 )
+ {
+ int middle = cur_p + (next_p - cur_p)/2;
+ cost += x264_slicetype_frame_cost( h, a, frames, cur_p, next_p, middle, 0 );
+ for( next_b = loc; next_b < middle && cost < threshold; next_b++ )
+ cost += x264_slicetype_frame_cost( h, a, frames, cur_p, middle, next_b, 0 );
+ for( next_b = middle+1; next_b < next_p && cost < threshold; next_b++ )
+ cost += x264_slicetype_frame_cost( h, a, frames, middle, next_p, next_b, 0 );
+ }
+ else
+ for( next_b = loc; next_b < next_p && cost < threshold; next_b++ )
+ cost += x264_slicetype_frame_cost( h, a, frames, cur_p, next_p, next_b, 0 );
loc = next_p + 1;
cur_p = next_p;
int icost = frame->i_cost_est[0][0];
int pcost = frame->i_cost_est[p1-p0][0];
float f_bias;
- int i_gop_size = frame->i_frame - h->lookahead->i_last_idr;
+ int i_gop_size = frame->i_frame - h->lookahead->i_last_keyframe;
float f_thresh_max = h->param.i_scenecut_threshold / 100.0;
/* magic numbers pulled out of thin air */
float f_thresh_min = f_thresh_max * h->param.i_keyint_min
if( h->param.i_keyint_min == h->param.i_keyint_max )
f_thresh_min= f_thresh_max;
- if( i_gop_size < h->param.i_keyint_min / 4 )
+ if( i_gop_size < h->param.i_keyint_min / 4 || h->param.b_intra_refresh )
f_bias = f_thresh_min / 4;
else if( i_gop_size <= h->param.i_keyint_min )
f_bias = f_thresh_min * i_gop_size / h->param.i_keyint_min;
if( !j )
return;
- keyint_limit = h->param.i_keyint_max - frames[0]->i_frame + h->lookahead->i_last_idr - 1;
- orig_num_frames = num_frames = X264_MIN( j, keyint_limit );
+ keyint_limit = h->param.i_keyint_max - frames[0]->i_frame + h->lookahead->i_last_keyframe - 1;
+ orig_num_frames = num_frames = h->param.b_intra_refresh ? j : X264_MIN( j, keyint_limit );
x264_lowres_context_init( h, &a );
- idr_frame_type = frames[1]->i_frame - h->lookahead->i_last_idr >= h->param.i_keyint_min ? X264_TYPE_IDR : X264_TYPE_I;
+ idr_frame_type = frames[1]->i_frame - h->lookahead->i_last_keyframe >= h->param.i_keyint_min ? X264_TYPE_IDR : X264_TYPE_I;
/* This is important psy-wise: if we have a non-scenecut keyframe,
* there will be significant visual artifacts if the frames just before
x264_macroblock_tree( h, &a, frames, X264_MIN(num_frames, h->param.i_keyint_max), keyframe );
/* Enforce keyframe limit. */
- for( j = 0; j < num_frames; j++ )
- {
- if( ((j-keyint_limit) % h->param.i_keyint_max) == 0 )
+ if( !h->param.b_intra_refresh )
+ for( j = 0; j < num_frames; j++ )
{
- if( j && h->param.i_keyint_max > 1 )
- frames[j]->i_type = X264_TYPE_P;
- frames[j+1]->i_type = X264_TYPE_IDR;
- reset_start = X264_MIN( reset_start, j+2 );
+ if( ((j-keyint_limit) % h->param.i_keyint_max) == 0 )
+ {
+ if( j && h->param.i_keyint_max > 1 )
+ frames[j]->i_type = X264_TYPE_P;
+ frames[j+1]->i_type = X264_TYPE_IDR;
+ reset_start = X264_MIN( reset_start, j+2 );
+ }
}
- }
if( h->param.rc.i_vbv_buffer_size )
x264_vbv_lookahead( h, &a, frames, num_frames, keyframe );
}
/* Limit GOP size */
- if( frm->i_frame - h->lookahead->i_last_idr >= h->param.i_keyint_max )
+ if( (!h->param.b_intra_refresh || frm->i_frame == 0) && frm->i_frame - h->lookahead->i_last_keyframe >= h->param.i_keyint_max )
{
if( frm->i_type == X264_TYPE_AUTO )
frm->i_type = X264_TYPE_IDR;
if( frm->i_type == X264_TYPE_IDR )
{
/* Close GOP */
- h->lookahead->i_last_idr = frm->i_frame;
+ h->lookahead->i_last_keyframe = frm->i_frame;
+ frm->b_keyframe = 1;
if( bframes > 0 )
{
bframes--;
/* Analyse for weighted P frames */
if( !h->param.rc.b_stat_read && h->lookahead->next.list[bframes]->i_type == X264_TYPE_P
&& h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
+ {
+ x264_emms();
x264_weights_analyse( h, h->lookahead->next.list[bframes], h->lookahead->last_nonb, 0 );
+ }
/* shift sequence to coded order.
use a small temporary list to avoid shifting the entire next buffer around */
- int i_dts = h->lookahead->next.list[0]->i_frame;
+ int i_coded = h->lookahead->next.list[0]->i_frame;
if( bframes )
{
int index[] = { brefs+1, 1 };
for( i = 0; i < bframes; i++ )
- frames[ index[h->lookahead->next.list[i]->i_type == X264_TYPE_BREF]++ ] = h->lookahead->next.list[i];
+ {
+ int idx = index[h->lookahead->next.list[i]->i_type == X264_TYPE_BREF]++;
+ frames[idx] = h->lookahead->next.list[i];
+ frames[idx]->i_reordered_pts = h->lookahead->next.list[idx]->i_pts;
+ }
frames[0] = h->lookahead->next.list[bframes];
+ frames[0]->i_reordered_pts = h->lookahead->next.list[0]->i_pts;
memcpy( h->lookahead->next.list, frames, (bframes+1) * sizeof(x264_frame_t*) );
}
for( i = 0; i <= bframes; i++ )
- h->lookahead->next.list[i]->i_dts = i_dts++;
+ h->lookahead->next.list[i]->i_coded = i_coded++;
}
int x264_rc_analyse_slice( x264_t *h )
{
int p0=0, p1, b;
int cost;
+ x264_emms();
if( IS_X264_TYPE_I(h->fenc->i_type) )
p1 = b = 0;
memcpy( h->fdec->i_row_satd, h->fenc->i_row_satd, h->sps->i_mb_height * sizeof(int) );
if( !IS_X264_TYPE_I(h->fenc->i_type) )
memcpy( h->fdec->i_row_satds[0][0], h->fenc->i_row_satds[0][0], h->sps->i_mb_height * sizeof(int) );
+
+ if( h->param.b_intra_refresh && h->param.rc.i_vbv_buffer_size && h->fenc->i_type == X264_TYPE_P )
+ {
+ int x, y;
+ int ip_factor = 256 * h->param.rc.f_ip_factor; /* fix8 */
+ for( y = 0; y < h->sps->i_mb_height; y++ )
+ {
+ int mb_xy = y * h->mb.i_mb_stride;
+ for( x = h->fdec->i_pir_start_col; x <= h->fdec->i_pir_end_col; x++, mb_xy++ )
+ {
+ int intra_cost = (h->fenc->i_intra_cost[mb_xy] * ip_factor) >> 8;
+ int inter_cost = h->fenc->lowres_costs[b-p0][p1-b][mb_xy];
+ int diff = intra_cost - inter_cost;
+ h->fdec->i_row_satd[y] += diff;
+ cost += diff;
+ }
+ }
+ }
+
return cost;
}