X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=avconv.c;h=7f8c6a619ed375750ed9bfd873ac595355795039;hb=a70519aad1f41afc053d22ad0d90257c06259112;hp=658dadc0939109a15e1ea24226275d906854a9ed;hpb=fa2a34cd40d124161c748bb0f430dc63c94dd0da;p=ffmpeg diff --git a/avconv.c b/avconv.c index 658dadc0939..7f8c6a619ed 100644 --- a/avconv.c +++ b/avconv.c @@ -348,6 +348,25 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) bsfc = bsfc->next; } + if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) && + ost->last_mux_dts != AV_NOPTS_VALUE && + pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) { + av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream " + "%d:%d; previous: %"PRId64", current: %"PRId64"; ", + ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); + if (exit_on_error) { + av_log(NULL, AV_LOG_FATAL, "aborting.\n"); + exit(1); + } + av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result " + "in incorrect timestamps in the output file.\n", + ost->last_mux_dts + 1); + pkt->dts = ost->last_mux_dts + 1; + if (pkt->pts != AV_NOPTS_VALUE) + pkt->pts = FFMAX(pkt->pts, pkt->dts); + } + ost->last_mux_dts = pkt->dts; + pkt->stream_index = ost->index; ret = av_interleaved_write_frame(s, pkt); if (ret < 0) { @@ -380,9 +399,6 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, pkt.data = NULL; pkt.size = 0; - if (!check_recording_time(ost)) - return; - if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0) frame->pts = ost->sync_opts; ost->sync_opts = frame->pts + frame->nb_samples; @@ -406,47 +422,6 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, } } -#if FF_API_DEINTERLACE -static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp) -{ - AVCodecContext *dec; - AVPicture *picture2; - AVPicture picture_tmp; - uint8_t *buf = 0; - - dec = ist->st->codec; - - /* deinterlace : must be done before any resize */ - if (do_deinterlace) { - int size; - - /* create temporary picture */ - size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height); - buf = av_malloc(size); - if (!buf) - return; - - picture2 = &picture_tmp; - avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height); - - if (avpicture_deinterlace(picture2, picture, - dec->pix_fmt, dec->width, dec->height) < 0) { - /* if error, do not deinterlace */ - av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n"); - av_free(buf); - buf = NULL; - picture2 = picture; - } - } else { - picture2 = picture; - } - - if (picture != picture2) - *picture = *picture2; - *bufp = buf; -} -#endif - static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, @@ -549,8 +524,7 @@ static void do_video_out(AVFormatContext *s, pkt.data = NULL; pkt.size = 0; - if (!check_recording_time(ost) || - ost->frame_number >= ost->max_frames) + if (ost->frame_number >= ost->max_frames) return; if (s->oformat->flags & AVFMT_RAWPICTURE && @@ -685,11 +659,6 @@ static int poll_filter(OutputStream *ost) av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->codec->time_base); - - if (of->start_time && filtered_frame->pts < 0) { - av_frame_unref(filtered_frame); - return 0; - } } switch (ost->filter->filter->inputs[0]->type) { @@ -1046,16 +1015,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p ost->st->codec->frame_number++; } -static void rate_emu_sleep(InputStream *ist) -{ - if (input_files[ist->file_index]->rate_emu) { - int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE); - int64_t now = av_gettime() - ist->start; - if (pts > now) - av_usleep(pts - now); - } -} - int guess_input_channel_layout(InputStream *ist) { AVCodecContext *dec = ist->st->codec; @@ -1104,8 +1063,6 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) pkt->pts = AV_NOPTS_VALUE; } - rate_emu_sleep(ist); - resample_changed = ist->resample_sample_fmt != decoded_frame->format || ist->resample_channels != avctx->channels || ist->resample_channel_layout != decoded_frame->channel_layout || @@ -1195,11 +1152,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts, decoded_frame->pkt_dts); pkt->size = 0; -#if FF_API_DEINTERLACE - pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); -#endif - - rate_emu_sleep(ist); if (ist->st->sample_aspect_ratio.num) decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; @@ -1260,8 +1212,6 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) if (!*got_output) return ret; - rate_emu_sleep(ist); - for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; @@ -1349,7 +1299,6 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) /* handle stream copy */ if (!ist->decoding_needed) { - rate_emu_sleep(ist); ist->last_dts = ist->next_dts; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: @@ -2056,6 +2005,17 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt) static int get_input_packet(InputFile *f, AVPacket *pkt) { + if (f->rate_emu) { + int i; + for (i = 0; i < f->nb_streams; i++) { + InputStream *ist = input_streams[f->ist_index + i]; + int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE); + int64_t now = av_gettime() - ist->start; + if (pts > now) + return AVERROR(EAGAIN); + } + } + #if HAVE_PTHREADS if (nb_input_files > 1) return get_input_packet_mt(f, pkt);