s->rtp_payload_size = avctx->rtp_payload_size;
if (avctx->me_penalty_compensation)
s->me_penalty_compensation = avctx->me_penalty_compensation;
+ if (avctx->pre_me)
+ s->me_pre = avctx->pre_me;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
s->intra_only = 0;
}
-#if FF_API_MOTION_EST
-FF_DISABLE_DEPRECATION_WARNINGS
- s->me_method = avctx->me_method;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
/* Fixed QSCALE */
s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
-#if FF_API_MPV_OPT
- FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->border_masking != 0.0)
- s->border_masking = avctx->border_masking;
- FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
s->adaptive_quant = (s->avctx->lumi_masking ||
s->avctx->dark_masking ||
s->avctx->temporal_cplx_masking ||
s->codec_id != AV_CODEC_ID_MPEG4 &&
s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
- av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
+ av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
return -1;
}
}
if (s->max_b_frames != 0) {
av_log(avctx, AV_LOG_ERROR,
- "b frames cannot be used with low delay\n");
+ "B-frames cannot be used with low delay\n");
return -1;
}
}
s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
}
-#if FF_API_QUANT_BIAS
-FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
- s->intra_quant_bias = avctx->intra_quant_bias;
- if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
- s->inter_quant_bias = avctx->inter_quant_bias;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
s->avctx->time_base.den > (1 << 16) - 1) {
av_log(avctx, AV_LOG_ERROR,
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
} else {
- /* mpeg1/2 */
+ /* MPEG-1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}
if (ff_rate_control_init(s) < 0)
return -1;
-#if FF_API_ERROR_RATE
- FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->error_rate)
- s->error_rate = avctx->error_rate;
- FF_ENABLE_DEPRECATION_WARNINGS;
-#endif
-
-#if FF_API_NORMALIZE_AQP
- FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
- s->mpv_flags |= FF_MPV_FLAG_NAQ;
- FF_ENABLE_DEPRECATION_WARNINGS;
-#endif
-
-#if FF_API_MV0
- FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->flags & CODEC_FLAG_MV0)
- s->mpv_flags |= FF_MPV_FLAG_MV0;
- FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
-#if FF_API_MPV_OPT
- FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->rc_qsquish != 0.0)
- s->rc_qsquish = avctx->rc_qsquish;
- if (avctx->rc_qmod_amp != 0.0)
- s->rc_qmod_amp = avctx->rc_qmod_amp;
- if (avctx->rc_qmod_freq)
- s->rc_qmod_freq = avctx->rc_qmod_freq;
- if (avctx->rc_buffer_aggressivity != 1.0)
- s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
- if (avctx->rc_initial_cplx != 0.0)
- s->rc_initial_cplx = avctx->rc_initial_cplx;
- if (avctx->lmin)
- s->lmin = avctx->lmin;
- if (avctx->lmax)
- s->lmax = avctx->lmax;
-
- if (avctx->rc_eq) {
- av_freep(&s->rc_eq);
- s->rc_eq = av_strdup(avctx->rc_eq);
- if (!s->rc_eq)
- return AVERROR(ENOMEM);
- }
- FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
#if FF_API_PRIVATE_OPT
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->brd_scale)
int i;
ff_rate_control_uninit(s);
-
ff_mpv_common_end(s);
if (CONFIG_MJPEG_ENCODER &&
s->out_format == FMT_MJPEG)
return ret;
pic->f->display_picture_number = display_picture_number;
- pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
+ pic->f->pts = pts; // we set this here to avoid modifying pic_arg
} else {
/* Flushing: When we have not received enough input frames,
* ensure s->input_picture[0] contains the first picture */
static int encode_frame(AVCodecContext *c, AVFrame *frame)
{
AVPacket pkt = { 0 };
- int ret, got_output;
+ int ret;
+ int size = 0;
av_init_packet(&pkt);
- ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
+
+ ret = avcodec_send_frame(c, frame);
if (ret < 0)
return ret;
- ret = pkt.size;
- av_packet_unref(&pkt);
- return ret;
+ do {
+ ret = avcodec_receive_packet(c, &pkt);
+ if (ret >= 0) {
+ size += pkt.size;
+ av_packet_unref(&pkt);
+ } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ } while (ret >= 0);
+
+ return size;
}
static int estimate_best_b_count(MpegEncContext *s)
{
- AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
- AVCodecContext *c = avcodec_alloc_context3(NULL);
+ const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
const int scale = s->brd_scale;
+ int width = s->width >> scale;
+ int height = s->height >> scale;
int i, j, out_size, p_lambda, b_lambda, lambda2;
int64_t best_rd = INT64_MAX;
int best_b_count = -1;
+ int ret = 0;
- if (!c)
- return AVERROR(ENOMEM);
assert(scale >= 0 && scale <= 3);
//emms_c();
lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
FF_LAMBDA_SHIFT;
- c->width = s->width >> scale;
- c->height = s->height >> scale;
- c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
- c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
- c->mb_decision = s->avctx->mb_decision;
- c->me_cmp = s->avctx->me_cmp;
- c->mb_cmp = s->avctx->mb_cmp;
- c->me_sub_cmp = s->avctx->me_sub_cmp;
- c->pix_fmt = AV_PIX_FMT_YUV420P;
- c->time_base = s->avctx->time_base;
- c->max_b_frames = s->max_b_frames;
-
- if (avcodec_open2(c, codec, NULL) < 0)
- return -1;
-
for (i = 0; i < s->max_b_frames + 2; i++) {
Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
s->next_picture_ptr;
s->tmp_frames[i]->linesize[0],
pre_input.f->data[0],
pre_input.f->linesize[0],
- c->width, c->height);
+ width, height);
s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
s->tmp_frames[i]->linesize[1],
pre_input.f->data[1],
pre_input.f->linesize[1],
- c->width >> 1, c->height >> 1);
+ width >> 1, height >> 1);
s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
s->tmp_frames[i]->linesize[2],
pre_input.f->data[2],
pre_input.f->linesize[2],
- c->width >> 1, c->height >> 1);
+ width >> 1, height >> 1);
}
}
for (j = 0; j < s->max_b_frames + 1; j++) {
+ AVCodecContext *c;
int64_t rd = 0;
if (!s->input_picture[j])
break;
- c->error[0] = c->error[1] = c->error[2] = 0;
+ c = avcodec_alloc_context3(NULL);
+ if (!c)
+ return AVERROR(ENOMEM);
+
+ c->width = width;
+ c->height = height;
+ c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
+ c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
+ c->mb_decision = s->avctx->mb_decision;
+ c->me_cmp = s->avctx->me_cmp;
+ c->mb_cmp = s->avctx->mb_cmp;
+ c->me_sub_cmp = s->avctx->me_sub_cmp;
+ c->pix_fmt = AV_PIX_FMT_YUV420P;
+ c->time_base = s->avctx->time_base;
+ c->max_b_frames = s->max_b_frames;
+
+ ret = avcodec_open2(c, codec, NULL);
+ if (ret < 0)
+ goto fail;
s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
out_size = encode_frame(c, s->tmp_frames[0]);
+ if (out_size < 0) {
+ ret = out_size;
+ goto fail;
+ }
//rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
out_size = encode_frame(c, s->tmp_frames[i + 1]);
+ if (out_size < 0) {
+ ret = out_size;
+ goto fail;
+ }
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
}
/* get the delayed frames */
- while (out_size) {
- out_size = encode_frame(c, NULL);
- rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
+ out_size = encode_frame(c, NULL);
+ if (out_size < 0) {
+ ret = out_size;
+ goto fail;
}
+ rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
rd += c->error[0] + c->error[1] + c->error[2];
best_rd = rd;
best_b_count = j;
}
- }
- avcodec_close(c);
- av_freep(&c);
+fail:
+ avcodec_free_context(&c);
+ if (ret < 0)
+ return ret;
+ }
return best_b_count;
}
if (s->frame_skip_threshold || s->frame_skip_factor) {
if (s->picture_in_gop_number < s->gop_size &&
skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
- // FIXME check that te gop check above is +-1 correct
+ // FIXME check that the gop check above is +-1 correct
av_frame_unref(s->input_picture[0]->f);
emms_c();
}
} else if (s->b_frame_strategy == 2) {
b_frames = estimate_best_b_count(s);
+ if (b_frames < 0)
+ return b_frames;
}
emms_c();
if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
b_frames == s->max_b_frames) {
av_log(s->avctx, AV_LOG_ERROR,
- "warning, too many b frames in a row\n");
+ "warning, too many B-frames in a row\n");
}
if (s->picture_in_gop_number + b_frames >= s->gop_size) {
return ret;
if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
- // input is a shared pix, so we can't modifiy it -> alloc a new
+ // input is a shared pix, so we can't modify it -> allocate a new
// one & ensure that the shared one is reuseable
Picture *pic;
return 0;
}
+static void write_pass1_stats(MpegEncContext *s)
+{
+ snprintf(s->avctx->stats_out, 256,
+ "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
+ "fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d "
+ "hbits:%d;\n",
+ s->current_picture_ptr->f->display_picture_number,
+ s->current_picture_ptr->f->coded_picture_number,
+ s->pict_type,
+ s->current_picture.f->quality,
+ s->i_tex_bits,
+ s->p_tex_bits,
+ s->mv_bits,
+ s->misc_bits,
+ s->f_code,
+ s->b_code,
+ s->current_picture.mc_mb_var_sum,
+ s->current_picture.mb_var_sum,
+ s->i_count, s->skip_count,
+ s->header_bits);
+}
+
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet)
{
}
if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
- ff_write_pass1_stats(s);
+ write_pass1_stats(s);
for (i = 0; i < 4; i++) {
s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
s->frame_bits = put_bits_count(&s->pb);
}
- /* update mpeg1/2 vbv_delay for CBR */
+ /* update MPEG-1/2 vbv_delay for CBR */
if (s->avctx->rc_max_rate &&
s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
s->out_format == FMT_MPEG1 &&
memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
- /* mpeg1 */
+ /* MPEG-1 */
d->mb_skip_run= s->mb_skip_run;
for(i=0; i<3; i++)
d->last_dc[i] = s->last_dc[i];
memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
- /* mpeg1 */
+ /* MPEG-1 */
d->mb_skip_run= s->mb_skip_run;
for(i=0; i<3; i++)
d->last_dc[i] = s->last_dc[i];
static int encode_thread(AVCodecContext *c, void *arg){
MpegEncContext *s= *(void**)arg;
- int mb_x, mb_y, pdif = 0;
+ int mb_x, mb_y;
int chr_h= 16>>s->chroma_y_shift;
int i, j;
MpegEncContext best_s = { 0 }, backup_s;
ff_mpv_decode_mb(s, s->block);
}
- /* clean the MV table in IPS frames for direct mode in B frames */
+ /* clean the MV table in IPS frames for direct mode in B-frames */
if(s->mb_intra /* && I,P,S_TYPE */){
s->p_mv_table[xy][0]=0;
s->p_mv_table[xy][1]=0;
/* Send the last GOB if RTP */
if (s->avctx->rtp_callback) {
int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
- pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
+ int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
/* Call the RTP callback to send the last GOB */
emms_c();
s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
s->current_picture.f->quality = s->next_lambda;
if(!dry_run) s->next_lambda= 0;
} else if (!s->fixed_qscale) {
+ int quality;
+ quality = ff_rate_estimate_qscale(s, dry_run);
s->current_picture_ptr->f->quality =
- s->current_picture.f->quality = ff_rate_estimate_qscale(s, dry_run);
+ s->current_picture.f->quality = quality;
if (s->current_picture.f->quality < 0)
return -1;
}
s->me.mb_var_sum_temp =
s->me.mc_mb_var_sum_temp = 0;
- /* we need to initialize some time vars before we can encode b-frames */
+ /* we need to initialize some time vars before we can encode B-frames */
// RAL: Condition added for MPEG1VIDEO
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
set_frame_distances(s);
s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
if (s->pict_type != AV_PICTURE_TYPE_B) {
- if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
+ if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
+ s->me_pre == 2) {
s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
}
}
if(s->out_format == FMT_H263){
unquant_coeff= alevel*qmul + qadd;
- }else{ //MPEG1
+ } else { // MPEG-1
j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
if(s->mb_intra){
unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
score_tab[i+1]= best_score;
- //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
+ // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
if(last_non_zero <= 27){
for(; survivor_count; survivor_count--){
if(score_tab[ survivor[survivor_count-1] ] <= best_score)
last_score= 256*256*256*120;
for(i= survivor[0]; i<=last_non_zero + 1; i++){
int score= score_tab[i];
- if(i) score += lambda*2; //FIXME exacter?
+ if (i)
+ score += lambda * 2; // FIXME more exact?
if(score < last_score){
last_score= score;
if(s->out_format == FMT_H263){
unquant_coeff= (alevel*qmul + qadd)>>3;
- }else{ //MPEG1
+ } else { // MPEG-1
unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
unquant_coeff = (unquant_coeff - 1) | 1;
}
#endif
dc += (1<<(RECON_SHIFT-1));
for(i=0; i<64; i++){
- rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
+ rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
}
#ifdef REFINE_STATS
STOP_TIMER("memset rem[]")}
}
/**
- * Permute an 8x8 block according to permuatation.
+ * Permute an 8x8 block according to permutation.
* @param block the block which will be permuted according to
* the given permutation vector
* @param permutation the permutation vector