]> git.sesse.net Git - ffmpeg/blobdiff - ffmpeg.c
Merge commit '86bbdf865e04bc5ddc2021b0620e6de634375253'
[ffmpeg] / ffmpeg.c
index 860499a1f3cf09fae912e57d12312285e0dc6e2c..e0cd1e61495222872797e96bbf18b7a624f3c4bc 100644 (file)
--- a/ffmpeg.c
+++ b/ffmpeg.c
 #include <errno.h>
 #include <limits.h>
 #if HAVE_ISATTY
+#if HAVE_IO_H
+#include <io.h>
+#endif
+#if HAVE_UNISTD_H
 #include <unistd.h>
 #endif
+#endif
 #include "libavformat/avformat.h"
 #include "libavdevice/avdevice.h"
 #include "libswscale/swscale.h"
@@ -193,16 +198,18 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts)
         av_buffersrc_add_ref(ist->filters[i]->filter,
                              avfilter_ref_buffer(ref, ~0),
                              AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
-                             AV_BUFFERSRC_FLAG_NO_COPY);
+                             AV_BUFFERSRC_FLAG_NO_COPY |
+                             AV_BUFFERSRC_FLAG_PUSH);
 }
 
-static void sub2video_update(InputStream *ist, AVSubtitle *sub, int64_t pts)
+static void sub2video_update(InputStream *ist, AVSubtitle *sub)
 {
     int w = ist->sub2video.w, h = ist->sub2video.h;
     AVFilterBufferRef *ref = ist->sub2video.ref;
     int8_t *dst;
     int     dst_linesize;
     int i;
+    int64_t pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ist->st->time_base);
 
     if (!ref)
         return;
@@ -410,10 +417,11 @@ void av_noreturn exit_program(int ret)
             bsfc = next;
         }
         output_streams[i]->bitstream_filters = NULL;
+        avcodec_free_frame(&output_streams[i]->filtered_frame);
 
         av_freep(&output_streams[i]->forced_keyframes);
         av_freep(&output_streams[i]->avfilter);
-        av_freep(&output_streams[i]->filtered_frame);
+        av_freep(&output_streams[i]->logfile_prefix);
         av_freep(&output_streams[i]);
     }
     for (i = 0; i < nb_input_files; i++) {
@@ -421,7 +429,7 @@ void av_noreturn exit_program(int ret)
         av_freep(&input_files[i]);
     }
     for (i = 0; i < nb_input_streams; i++) {
-        av_freep(&input_streams[i]->decoded_frame);
+        avcodec_free_frame(&input_streams[i]->decoded_frame);
         av_dict_free(&input_streams[i]->opts);
         free_buffer_pool(&input_streams[i]->buffer_pool);
         avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
@@ -505,11 +513,14 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
         (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
         pkt->pts = pkt->dts = AV_NOPTS_VALUE;
 
-    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
+    if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {
         int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
         if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE &&  max > pkt->dts) {
-            av_log(s, max - pkt->dts > 2 ? AV_LOG_WARNING : AV_LOG_DEBUG, "Audio timestamp %"PRId64" < %"PRId64" invalid, cliping\n", pkt->dts, max);
-            pkt->pts = pkt->dts = max;
+            av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,
+                   "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);
+            if(pkt->pts >= pkt->dts)
+                pkt->pts = FFMAX(pkt->pts, max);
+            pkt->dts = max;
         }
     }
 
@@ -561,6 +572,17 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
     }
 
     pkt->stream_index = ost->index;
+
+    if (debug_ts) {
+        av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
+                "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
+                av_get_media_type_string(ost->st->codec->codec_type),
+                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
+                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
+                pkt->size
+              );
+    }
+
     ret = av_interleaved_write_frame(s, pkt);
     if (ret < 0) {
         print_error("av_interleaved_write_frame()", ret);
@@ -568,6 +590,18 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
     }
 }
 
+static void close_output_stream(OutputStream *ost)
+{
+    OutputFile *of = output_files[ost->file_index];
+
+    ost->finished = 1;
+    if (of->shortest) {
+        int i;
+        for (i = 0; i < of->ctx->nb_streams; i++)
+            output_streams[of->ost_index + i]->finished = 1;
+    }
+}
+
 static int check_recording_time(OutputStream *ost)
 {
     OutputFile *of = output_files[ost->file_index];
@@ -575,7 +609,7 @@ static int check_recording_time(OutputStream *ost)
     if (of->recording_time != INT64_MAX &&
         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
                       AV_TIME_BASE_Q) >= 0) {
-        ost->finished = 1;
+        close_output_stream(ost);
         return 0;
     }
     return 1;
@@ -622,9 +656,9 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
                    av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
         }
 
+        audio_size += pkt.size;
         write_frame(s, &pkt, ost);
 
-        audio_size += pkt.size;
         av_free_packet(&pkt);
     }
 }
@@ -671,15 +705,15 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
 static void do_subtitle_out(AVFormatContext *s,
                             OutputStream *ost,
                             InputStream *ist,
-                            AVSubtitle *sub,
-                            int64_t pts)
+                            AVSubtitle *sub)
 {
     int subtitle_out_max_size = 1024 * 1024;
     int subtitle_out_size, nb, i;
     AVCodecContext *enc;
     AVPacket pkt;
+    int64_t pts;
 
-    if (pts == AV_NOPTS_VALUE) {
+    if (sub->pts == AV_NOPTS_VALUE) {
         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
         if (exit_on_error)
             exit_program(1);
@@ -701,8 +735,7 @@ static void do_subtitle_out(AVFormatContext *s,
         nb = 1;
 
     /* shift timestamp to honor -ss and make check_recording_time() work with -t */
-    pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q)
-        - output_files[ost->file_index]->start_time;
+    pts = sub->pts - output_files[ost->file_index]->start_time;
     for (i = 0; i < nb; i++) {
         ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
         if (!check_recording_time(ost))
@@ -735,8 +768,8 @@ static void do_subtitle_out(AVFormatContext *s,
             else
                 pkt.pts += 90 * sub->end_display_time;
         }
-        write_frame(s, &pkt, ost);
         subtitle_size += pkt.size;
+        write_frame(s, &pkt, ost);
     }
 }
 
@@ -799,7 +832,7 @@ static void do_video_out(AVFormatContext *s,
         return;
     } else if (nb_frames > 1) {
         if (nb_frames > dts_error_threshold * 30) {
-            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skiping\n", nb_frames - 1);
+            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
             nb_frames_drop++;
             return;
         }
@@ -830,8 +863,8 @@ static void do_video_out(AVFormatContext *s,
         pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
         pkt.flags |= AV_PKT_FLAG_KEY;
 
-        write_frame(s, &pkt, ost);
         video_size += pkt.size;
+        write_frame(s, &pkt, ost);
     } else {
         int got_packet;
         AVFrame big_picture;
@@ -881,9 +914,9 @@ static void do_video_out(AVFormatContext *s,
                     av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
             }
 
-            write_frame(s, &pkt, ost);
             frame_size = pkt.size;
             video_size += pkt.size;
+            write_frame(s, &pkt, ost);
             av_free_packet(&pkt);
 
             /* if two pass, output log */
@@ -947,106 +980,90 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
     }
 }
 
-/* check for new output on any of the filtergraphs */
-static int poll_filters(void)
+/**
+ * Get and encode new output from any of the filtergraphs, without causing
+ * activity.
+ *
+ * @return  0 for success, <0 for severe errors
+ */
+static int reap_filters(void)
 {
     AVFilterBufferRef *picref;
     AVFrame *filtered_frame = NULL;
-    int i, ret, ret_all;
-    unsigned nb_success = 1, av_uninit(nb_eof);
+    int i;
     int64_t frame_pts;
 
-    while (1) {
-        /* Reap all buffers present in the buffer sinks */
-        for (i = 0; i < nb_output_streams; i++) {
-            OutputStream *ost = output_streams[i];
-            OutputFile    *of = output_files[ost->file_index];
-            int ret = 0;
-
-            if (!ost->filter)
-                continue;
+    /* Reap all buffers present in the buffer sinks */
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+        OutputFile    *of = output_files[ost->file_index];
+        int ret = 0;
 
-            if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
-                return AVERROR(ENOMEM);
-            } else
-                avcodec_get_frame_defaults(ost->filtered_frame);
-            filtered_frame = ost->filtered_frame;
+        if (!ost->filter)
+            continue;
 
-            while (1) {
-                ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
-                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
-                if (ret < 0) {
-                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
-                        char buf[256];
-                        av_strerror(ret, buf, sizeof(buf));
-                        av_log(NULL, AV_LOG_WARNING,
-                               "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
-                    }
-                    break;
+        if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
+            return AVERROR(ENOMEM);
+        } else
+            avcodec_get_frame_defaults(ost->filtered_frame);
+        filtered_frame = ost->filtered_frame;
+
+        while (1) {
+            ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
+                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
+            if (ret < 0) {
+                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
+                    char buf[256];
+                    av_strerror(ret, buf, sizeof(buf));
+                    av_log(NULL, AV_LOG_WARNING,
+                           "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
                 }
-                frame_pts = AV_NOPTS_VALUE;
-                if (picref->pts != AV_NOPTS_VALUE) {
-                    filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
-                                                    ost->filter->filter->inputs[0]->time_base,
-                                                    ost->st->codec->time_base) -
-                                        av_rescale_q(of->start_time,
-                                                    AV_TIME_BASE_Q,
-                                                    ost->st->codec->time_base);
-
-                    if (of->start_time && filtered_frame->pts < 0) {
-                        avfilter_unref_buffer(picref);
-                        continue;
-                    }
+                break;
+            }
+            frame_pts = AV_NOPTS_VALUE;
+            if (picref->pts != AV_NOPTS_VALUE) {
+                filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
+                                                ost->filter->filter->inputs[0]->time_base,
+                                                ost->st->codec->time_base) -
+                                    av_rescale_q(of->start_time,
+                                                AV_TIME_BASE_Q,
+                                                ost->st->codec->time_base);
+
+                if (of->start_time && filtered_frame->pts < 0) {
+                    avfilter_unref_buffer(picref);
+                    continue;
                 }
-                //if (ost->source_index >= 0)
-                //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
-
-
-                switch (ost->filter->filter->inputs[0]->type) {
-                case AVMEDIA_TYPE_VIDEO:
-                    avfilter_copy_buf_props(filtered_frame, picref);
-                    filtered_frame->pts = frame_pts;
-                    if (!ost->frame_aspect_ratio)
-                        ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
+            }
+            //if (ost->source_index >= 0)
+            //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
 
-                    do_video_out(of->ctx, ost, filtered_frame,
-                                 same_quant ? ost->last_quality :
-                                              ost->st->codec->global_quality);
-                    break;
-                case AVMEDIA_TYPE_AUDIO:
-                    avfilter_copy_buf_props(filtered_frame, picref);
-                    filtered_frame->pts = frame_pts;
-                    do_audio_out(of->ctx, ost, filtered_frame);
-                    break;
-                default:
-                    // TODO support subtitle filters
-                    av_assert0(0);
-                }
 
-                avfilter_unref_buffer(picref);
-            }
-        }
-        if (!nb_success) /* from last round */
-            break;
-        /* Request frames through all the graphs */
-        ret_all = nb_success = nb_eof = 0;
-        for (i = 0; i < nb_filtergraphs; i++) {
-            ret = avfilter_graph_request_oldest(filtergraphs[i]->graph);
-            if (!ret) {
-                nb_success++;
-            } else if (ret == AVERROR_EOF) {
-                nb_eof++;
-            } else if (ret != AVERROR(EAGAIN)) {
-                char buf[256];
-                av_strerror(ret, buf, sizeof(buf));
-                av_log(NULL, AV_LOG_WARNING,
-                       "Error in request_frame(): %s\n", buf);
-                ret_all = ret;
+            switch (ost->filter->filter->inputs[0]->type) {
+            case AVMEDIA_TYPE_VIDEO:
+                avfilter_copy_buf_props(filtered_frame, picref);
+                filtered_frame->pts = frame_pts;
+                if (!ost->frame_aspect_ratio)
+                    ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
+
+                do_video_out(of->ctx, ost, filtered_frame,
+                             same_quant ? ost->last_quality :
+                                          ost->st->codec->global_quality);
+                break;
+            case AVMEDIA_TYPE_AUDIO:
+                avfilter_copy_buf_props(filtered_frame, picref);
+                filtered_frame->pts = frame_pts;
+                do_audio_out(of->ctx, ost, filtered_frame);
+                break;
+            default:
+                // TODO support subtitle filters
+                av_assert0(0);
             }
+
+            avfilter_unref_buffer(picref);
         }
-        /* Try again if anything succeeded */
     }
-    return nb_eof == nb_filtergraphs ? AVERROR_EOF : ret_all;
+
+    return 0;
 }
 
 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
@@ -1059,7 +1076,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
     AVCodecContext *enc;
     int frame_number, vid, i;
     double bitrate;
-    int64_t pts = INT64_MAX;
+    int64_t pts = INT64_MIN;
     static int64_t last_time = -1;
     static int qp_histogram[52];
     int hours, mins, secs, us;
@@ -1154,8 +1171,9 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
             vid = 1;
         }
         /* compute min output value */
-        pts = FFMIN(pts, av_rescale_q(ost->st->pts.val,
-                                      ost->st->time_base, AV_TIME_BASE_Q));
+        if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
+            pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
+                                          ost->st->time_base, AV_TIME_BASE_Q));
     }
 
     secs = pts / AV_TIME_BASE;
@@ -1323,9 +1341,13 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         !ost->copy_initial_nonkeyframes)
         return;
 
+    if (!ost->frame_number && ist->pts < of->start_time &&
+        !ost->copy_prior_start)
+        return;
+
     if (of->recording_time != INT64_MAX &&
         ist->pts >= of->recording_time + of->start_time) {
-        ost->finished = 1;
+        close_output_stream(ost);
         return;
     }
 
@@ -1426,14 +1448,13 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 
     if (ret >= 0 && avctx->sample_rate <= 0) {
         av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
-        return AVERROR_INVALIDDATA;
+        ret = AVERROR_INVALIDDATA;
     }
 
     if (!*got_output || ret < 0) {
         if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
-                av_buffersrc_add_ref(ist->filters[i]->filter, NULL,
-                                     AV_BUFFERSRC_FLAG_NO_COPY);
+                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
         }
         return ret;
     }
@@ -1522,7 +1543,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
                                           decoded_frame_tb,
                                           (AVRational){1, ist->st->codec->sample_rate});
     for (i = 0; i < ist->nb_filters; i++)
-        av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0);
+        av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
+                               AV_BUFFERSRC_FLAG_PUSH);
 
     decoded_frame->pts = AV_NOPTS_VALUE;
 
@@ -1552,7 +1574,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     if (!*got_output || ret < 0) {
         if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
-                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, AV_BUFFERSRC_FLAG_NO_COPY);
+                av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
         }
         return ret;
     }
@@ -1633,9 +1655,10 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
             buf->refcount++;
             av_buffersrc_add_ref(ist->filters[i]->filter, fb,
                                  AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
-                                 AV_BUFFERSRC_FLAG_NO_COPY);
+                                 AV_BUFFERSRC_FLAG_NO_COPY |
+                                 AV_BUFFERSRC_FLAG_PUSH);
         } else
-        if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0)<0) {
+        if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
             av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
             exit_program(1);
         }
@@ -1649,7 +1672,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
 {
     AVSubtitle subtitle;
-    int64_t pts = pkt->pts;
     int i, ret = avcodec_decode_subtitle2(ist->st->codec,
                                           &subtitle, got_output, pkt);
     if (ret < 0 || !*got_output) {
@@ -1660,8 +1682,8 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
 
     if (ist->fix_sub_duration) {
         if (ist->prev_sub.got_output) {
-            int end = av_rescale_q(pts - ist->prev_sub.pts, ist->st->time_base,
-                                   (AVRational){ 1, 1000 });
+            int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
+                                 1000, AV_TIME_BASE);
             if (end < ist->prev_sub.subtitle.end_display_time) {
                 av_log(ist->st->codec, AV_LOG_DEBUG,
                        "Subtitle duration reduced from %d to %d\n",
@@ -1669,26 +1691,25 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
                 ist->prev_sub.subtitle.end_display_time = end;
             }
         }
-        FFSWAP(int64_t,    pts,         ist->prev_sub.pts);
         FFSWAP(int,        *got_output, ist->prev_sub.got_output);
         FFSWAP(int,        ret,         ist->prev_sub.ret);
         FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
     }
 
+    sub2video_update(ist, &subtitle);
+
     if (!*got_output || !subtitle.num_rects)
         return ret;
 
     rate_emu_sleep(ist);
 
-    sub2video_update(ist, &subtitle, pkt->pts);
-
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream *ost = output_streams[i];
 
         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
             continue;
 
-        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pts);
+        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
     }
 
     avsubtitle_free(&subtitle);
@@ -2051,8 +2072,10 @@ static int transcode_init(void)
             } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
                       && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
                       && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
+                      && strcmp(oc->oformat->name, "f4v")
             ) {
-                if(   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
+                if(   copy_tb<0 && icodec->time_base.den
+                                && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
                                 && av_q2d(ist->st->time_base) < 1.0/500
                    || copy_tb==0){
                     codec->time_base = icodec->time_base;
@@ -2119,7 +2142,7 @@ static int transcode_init(void)
             }
 
             if (ist)
-                ist->decoding_needed = 1;
+                ist->decoding_needed++;
             ost->encoding_needed = 1;
 
             if (!ost->filter &&
@@ -2136,6 +2159,8 @@ static int transcode_init(void)
             if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                 if (ost->filter && !ost->frame_rate.num)
                     ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+                if (ist && !ost->frame_rate.num)
+                    ost->frame_rate = ist->framerate;
                 if (ist && !ost->frame_rate.num)
                     ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
 //                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
@@ -2203,7 +2228,8 @@ static int transcode_init(void)
                 FILE *f;
 
                 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
-                         pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
+                         ost->logfile_prefix ? ost->logfile_prefix :
+                                               DEFAULT_PASS_LOGFILENAME_PREFIX,
                          i);
                 if (!strcmp(ost->enc->name, "libx264")) {
                     av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
@@ -2409,7 +2435,7 @@ static int need_output(void)
         if (ost->frame_number >= ost->max_frames) {
             int j;
             for (j = 0; j < of->ctx->nb_streams; j++)
-                output_streams[of->ost_index + j]->finished = 1;
+                close_output_stream(output_streams[of->ost_index + j]);
             continue;
         }
 
@@ -2419,84 +2445,27 @@ static int need_output(void)
     return 0;
 }
 
-static int input_acceptable(InputStream *ist)
-{
-    av_assert1(!ist->discard);
-    return !input_files[ist->file_index]->eagain &&
-           !input_files[ist->file_index]->eof_reached;
-}
-
-static int find_graph_input(FilterGraph *graph)
-{
-    int i, nb_req_max = 0, file_index = -1;
-
-    for (i = 0; i < graph->nb_inputs; i++) {
-        int nb_req = av_buffersrc_get_nb_failed_requests(graph->inputs[i]->filter);
-        if (nb_req > nb_req_max) {
-            InputStream *ist = graph->inputs[i]->ist;
-            if (input_acceptable(ist)) {
-                nb_req_max = nb_req;
-                file_index = ist->file_index;
-            }
-        }
-    }
-
-    return file_index;
-}
-
 /**
- * Select the input file to read from.
+ * Select the output stream to process.
  *
- * @return  >=0 index of the input file to use;
- *          -1  if no file is acceptable;
- *          -2  to read from filters without reading from a file
+ * @return  selected output stream, or NULL if none available
  */
-static int select_input_file(void)
+static OutputStream *choose_output(void)
 {
-    int i, ret, nb_active_out = nb_output_streams, ost_index = -1;
-    int64_t opts_min;
-    OutputStream *ost;
-    AVFilterBufferRef *dummy;
+    int i;
+    int64_t opts_min = INT64_MAX;
+    OutputStream *ost_min = NULL;
 
-    for (i = 0; i < nb_output_streams; i++)
-        nb_active_out -= output_streams[i]->unavailable =
-            output_streams[i]->finished;
-    while (nb_active_out) {
-        opts_min = INT64_MAX;
-        ost_index = -1;
-        for (i = 0; i < nb_output_streams; i++) {
-            OutputStream *ost = output_streams[i];
-            int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
-                                        AV_TIME_BASE_Q);
-            if (!ost->unavailable && opts < opts_min) {
-                opts_min  = opts;
-                ost_index = i;
-            }
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+        int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
+                                    AV_TIME_BASE_Q);
+        if (!ost->unavailable && !ost->finished && opts < opts_min) {
+            opts_min = opts;
+            ost_min  = ost;
         }
-        if (ost_index < 0)
-            return -1;
-
-        ost = output_streams[ost_index];
-        if (ost->source_index >= 0) {
-            /* ost is directly connected to an input */
-            InputStream *ist = input_streams[ost->source_index];
-            if (input_acceptable(ist))
-                return ist->file_index;
-        } else {
-            /* ost is connected to a complex filtergraph */
-            av_assert1(ost->filter);
-            ret = av_buffersink_get_buffer_ref(ost->filter->filter, &dummy,
-                                               AV_BUFFERSINK_FLAG_PEEK);
-            if (ret >= 0)
-                return -2;
-            ret = find_graph_input(ost->filter->graph);
-            if (ret >= 0)
-                return ret;
-        }
-        ost->unavailable = 1;
-        nb_active_out--;
     }
-    return -1;
+    return ost_min;
 }
 
 static int check_keyboard_interaction(int64_t cur_time)
@@ -2714,8 +2683,8 @@ static int get_input_packet(InputFile *f, AVPacket *pkt)
 static int got_eagain(void)
 {
     int i;
-    for (i = 0; i < nb_input_files; i++)
-        if (input_files[i]->eagain)
+    for (i = 0; i < nb_output_streams; i++)
+        if (output_streams[i]->unavailable)
             return 1;
     return 0;
 }
@@ -2725,6 +2694,8 @@ static void reset_eagain(void)
     int i;
     for (i = 0; i < nb_input_files; i++)
         input_files[i]->eagain = 0;
+    for (i = 0; i < nb_output_streams; i++)
+        output_streams[i]->unavailable = 0;
 }
 
 /**
@@ -2734,32 +2705,13 @@ static void reset_eagain(void)
  *   this function should be called again
  * - AVERROR_EOF -- this function should not be called again
  */
-static int process_input(void)
+static int process_input(int file_index)
 {
-    InputFile *ifile;
+    InputFile *ifile = input_files[file_index];
     AVFormatContext *is;
     InputStream *ist;
     AVPacket pkt;
     int ret, i, j;
-    int file_index;
-
-    /* select the stream that we must read now */
-    file_index = select_input_file();
-    /* if none, if is finished */
-    if (file_index == -2) {
-        poll_filters() ;
-        return AVERROR(EAGAIN);
-    }
-    if (file_index < 0) {
-        if (got_eagain()) {
-            reset_eagain();
-            av_usleep(10000);
-            return AVERROR(EAGAIN);
-        }
-        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
-        return AVERROR_EOF;
-    }
-    ifile = input_files[file_index];
 
     is  = ifile->ctx;
     ret = get_input_packet(ifile, &pkt);
@@ -2780,19 +2732,14 @@ static int process_input(void)
             ist = input_streams[ifile->ist_index + i];
             if (ist->decoding_needed)
                 output_packet(ist, NULL);
-            poll_filters();
-        }
 
-        for (i = 0; i < nb_output_streams; i++) {
-            OutputStream *ost    = output_streams[i];
-            OutputFile *of       = output_files[ost->file_index];
-            AVFormatContext *os  = output_files[ost->file_index]->ctx;
+            /* mark all outputs that don't go through lavfi as finished */
+            for (j = 0; j < nb_output_streams; j++) {
+                OutputStream *ost = output_streams[j];
 
-            if (of->shortest) {
-                int j;
-                for (j = 0; j < of->ctx->nb_streams; j++)
-                    output_streams[of->ost_index + j]->finished = 1;
-                continue;
+                if (ost->source_index == ifile->ist_index + i &&
+                    (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
+                    close_output_stream(ost);
             }
         }
 
@@ -2817,15 +2764,16 @@ static int process_input(void)
         goto discard_packet;
 
     if(!ist->wrap_correction_done && input_files[file_index]->ctx->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
-        uint64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
-        uint64_t stime2= stime + (1LL<<ist->st->pts_wrap_bits);
+        int64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
+        int64_t stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
         ist->wrap_correction_done = 1;
-        if(pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime && pkt.dts - stime > stime2 - pkt.dts) {
-            pkt.dts -= 1LL<<ist->st->pts_wrap_bits;
+
+        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
             ist->wrap_correction_done = 0;
         }
-        if(pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime && pkt.pts - stime > stime2 - pkt.pts) {
-            pkt.pts -= 1LL<<ist->st->pts_wrap_bits;
+        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
             ist->wrap_correction_done = 0;
         }
     }
@@ -2890,16 +2838,14 @@ static int process_input(void)
 
     sub2video_heartbeat(ist, pkt.pts);
 
-    if ((ret = output_packet(ist, &pkt)) < 0 ||
-        ((ret = poll_filters()) < 0 && ret != AVERROR_EOF)) {
+    ret = output_packet(ist, &pkt);
+    if (ret < 0) {
         char buf[128];
         av_strerror(ret, buf, sizeof(buf));
         av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
                 ist->file_index, ist->st->index, buf);
         if (exit_on_error)
             exit_program(1);
-        av_free_packet(&pkt);
-        return AVERROR(EAGAIN);
     }
 
 discard_packet:
@@ -2908,6 +2854,98 @@ discard_packet:
     return 0;
 }
 
+/**
+ * Perform a step of transcoding for the specified filter graph.
+ *
+ * @param[in]  graph     filter graph to consider
+ * @param[out] best_ist  input stream where a frame would allow to continue
+ * @return  0 for success, <0 for error
+ */
+static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
+{
+    int i, ret;
+    int nb_requests, nb_requests_max = 0;
+    InputFilter *ifilter;
+    InputStream *ist;
+
+    *best_ist = NULL;
+    ret = avfilter_graph_request_oldest(graph->graph);
+    if (ret >= 0)
+        return reap_filters();
+
+    if (ret == AVERROR_EOF) {
+        ret = reap_filters();
+        for (i = 0; i < graph->nb_outputs; i++)
+            close_output_stream(graph->outputs[i]->ost);
+        return ret;
+    }
+    if (ret != AVERROR(EAGAIN))
+        return ret;
+
+    for (i = 0; i < graph->nb_inputs; i++) {
+        ifilter = graph->inputs[i];
+        ist = ifilter->ist;
+        if (input_files[ist->file_index]->eagain ||
+            input_files[ist->file_index]->eof_reached)
+            continue;
+        nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
+        if (nb_requests > nb_requests_max) {
+            nb_requests_max = nb_requests;
+            *best_ist = ist;
+        }
+    }
+
+    if (!*best_ist)
+        for (i = 0; i < graph->nb_outputs; i++)
+            graph->outputs[i]->ost->unavailable = 1;
+
+    return 0;
+}
+
+/**
+ * Run a single step of transcoding.
+ *
+ * @return  0 for success, <0 for error
+ */
+static int transcode_step(void)
+{
+    OutputStream *ost;
+    InputStream  *ist;
+    int ret;
+
+    ost = choose_output();
+    if (!ost) {
+        if (got_eagain()) {
+            reset_eagain();
+            av_usleep(10000);
+            return 0;
+        }
+        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
+        return AVERROR_EOF;
+    }
+
+    if (ost->filter) {
+        if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
+            return ret;
+        if (!ist)
+            return 0;
+    } else {
+        av_assert0(ost->source_index >= 0);
+        ist = input_streams[ost->source_index];
+    }
+
+    ret = process_input(ist->file_index);
+    if (ret == AVERROR(EAGAIN)) {
+        if (input_files[ist->file_index]->eagain)
+            ost->unavailable = 1;
+        return 0;
+    }
+    if (ret < 0)
+        return ret == AVERROR_EOF ? 0 : ret;
+
+    return reap_filters();
+}
+
 /*
  * The following code is the main loop of the file converter
  */
@@ -2948,12 +2986,11 @@ static int transcode(void)
             break;
         }
 
-        ret = process_input();
+        ret = transcode_step();
         if (ret < 0) {
-            if (ret == AVERROR(EAGAIN))
+            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                 continue;
-            if (ret == AVERROR_EOF)
-                break;
+
             av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
             break;
         }
@@ -2972,7 +3009,6 @@ static int transcode(void)
             output_packet(ist, NULL);
         }
     }
-    poll_filters();
     flush_encoders();
 
     term_exit();
@@ -3075,7 +3111,7 @@ static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
 {
     int idx = locate_option(argc, argv, options, "cpuflags");
     if (idx && argv[idx + 1])
-        opt_cpuflags("cpuflags", argv[idx + 1]);
+        opt_cpuflags(NULL, "cpuflags", argv[idx + 1]);
 }
 
 int main(int argc, char **argv)
@@ -3085,6 +3121,8 @@ int main(int argc, char **argv)
 
     reset_options(&o, 0);
 
+    setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
+
     av_log_set_flags(AV_LOG_SKIP_REPEATED);
     parse_loglevel(argc, argv, options);