]> git.sesse.net Git - ffmpeg/blobdiff - avconv.c
avconv: rename 'icodec' to 'dec_ctx'
[ffmpeg] / avconv.c
index ad4a975a1cebe97132944b261cd0c0cfd57da965..1dad8900c7c6c4f8003b057cec6318a7fa37c140 100644 (file)
--- a/avconv.c
+++ b/avconv.c
 #include <errno.h>
 #include <signal.h>
 #include <limits.h>
+#include <stdint.h>
+
 #include "libavformat/avformat.h"
 #include "libavdevice/avdevice.h"
 #include "libswscale/swscale.h"
 #include "libavresample/avresample.h"
 #include "libavutil/opt.h"
-#include "libavutil/audioconvert.h"
+#include "libavutil/channel_layout.h"
 #include "libavutil/parseutils.h"
 #include "libavutil/samplefmt.h"
-#include "libavutil/colorspace.h"
 #include "libavutil/fifo.h"
 #include "libavutil/intreadwrite.h"
 #include "libavutil/dict.h"
 #include "libavformat/os_support.h"
 
 # include "libavfilter/avfilter.h"
-# include "libavfilter/avfiltergraph.h"
 # include "libavfilter/buffersrc.h"
 # include "libavfilter/buffersink.h"
 
 #if HAVE_SYS_RESOURCE_H
+#include <sys/time.h>
 #include <sys/types.h>
 #include <sys/resource.h>
 #elif HAVE_GETPROCESSTIMES
@@ -83,10 +84,6 @@ const int program_birth_year = 2000;
 
 static FILE *vstats_file;
 
-static int64_t video_size = 0;
-static int64_t audio_size = 0;
-static int64_t extra_size = 0;
-static int nb_frames_dup = 0;
 static int nb_frames_drop = 0;
 
 
@@ -111,535 +108,6 @@ int         nb_output_files   = 0;
 FilterGraph **filtergraphs;
 int        nb_filtergraphs;
 
-/**
- * Define a function for building a string containing a list of
- * allowed formats,
- */
-#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
-static char *choose_ ## var ## s(OutputStream *ost)                             \
-{                                                                               \
-    if (ost->st->codec->var != none) {                                          \
-        get_name(ost->st->codec->var);                                          \
-        return av_strdup(name);                                                 \
-    } else if (ost->enc->supported_list) {                                      \
-        const type *p;                                                          \
-        AVIOContext *s = NULL;                                                  \
-        uint8_t *ret;                                                           \
-        int len;                                                                \
-                                                                                \
-        if (avio_open_dyn_buf(&s) < 0)                                          \
-            exit_program(1);                                                    \
-                                                                                \
-        for (p = ost->enc->supported_list; *p != none; p++) {                   \
-            get_name(*p);                                                       \
-            avio_printf(s, "%s" separator, name);                               \
-        }                                                                       \
-        len = avio_close_dyn_buf(s, &ret);                                      \
-        ret[len - 1] = 0;                                                       \
-        return ret;                                                             \
-    } else                                                                      \
-        return NULL;                                                            \
-}
-
-#define GET_PIX_FMT_NAME(pix_fmt)\
-    const char *name = av_get_pix_fmt_name(pix_fmt);
-
-DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
-                  GET_PIX_FMT_NAME, ":")
-
-#define GET_SAMPLE_FMT_NAME(sample_fmt)\
-    const char *name = av_get_sample_fmt_name(sample_fmt)
-
-DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
-                  AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
-
-#define GET_SAMPLE_RATE_NAME(rate)\
-    char name[16];\
-    snprintf(name, sizeof(name), "%d", rate);
-
-DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
-                  GET_SAMPLE_RATE_NAME, ",")
-
-#define GET_CH_LAYOUT_NAME(ch_layout)\
-    char name[16];\
-    snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
-
-DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
-                  GET_CH_LAYOUT_NAME, ",")
-
-static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
-{
-    FilterGraph *fg = av_mallocz(sizeof(*fg));
-
-    if (!fg)
-        exit_program(1);
-    fg->index = nb_filtergraphs;
-
-    fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
-                             fg->nb_outputs + 1);
-    if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
-        exit_program(1);
-    fg->outputs[0]->ost   = ost;
-    fg->outputs[0]->graph = fg;
-
-    ost->filter = fg->outputs[0];
-
-    fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
-                            fg->nb_inputs + 1);
-    if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
-        exit_program(1);
-    fg->inputs[0]->ist   = ist;
-    fg->inputs[0]->graph = fg;
-
-    ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
-                              &ist->nb_filters, ist->nb_filters + 1);
-    ist->filters[ist->nb_filters - 1] = fg->inputs[0];
-
-    filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
-                              &nb_filtergraphs, nb_filtergraphs + 1);
-    filtergraphs[nb_filtergraphs - 1] = fg;
-
-    return fg;
-}
-
-static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
-{
-    InputStream *ist = NULL;
-    enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
-    int i;
-
-    // TODO: support other filter types
-    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
-        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
-               "currently.\n");
-        exit_program(1);
-    }
-
-    if (in->name) {
-        AVFormatContext *s;
-        AVStream       *st = NULL;
-        char *p;
-        int file_idx = strtol(in->name, &p, 0);
-
-        if (file_idx < 0 || file_idx >= nb_input_files) {
-            av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
-                   file_idx, fg->graph_desc);
-            exit_program(1);
-        }
-        s = input_files[file_idx]->ctx;
-
-        for (i = 0; i < s->nb_streams; i++) {
-            if (s->streams[i]->codec->codec_type != type)
-                continue;
-            if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
-                st = s->streams[i];
-                break;
-            }
-        }
-        if (!st) {
-            av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
-                   "matches no streams.\n", p, fg->graph_desc);
-            exit_program(1);
-        }
-        ist = input_streams[input_files[file_idx]->ist_index + st->index];
-    } else {
-        /* find the first unused stream of corresponding type */
-        for (i = 0; i < nb_input_streams; i++) {
-            ist = input_streams[i];
-            if (ist->st->codec->codec_type == type && ist->discard)
-                break;
-        }
-        if (i == nb_input_streams) {
-            av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
-                   "unlabeled input pad %d on filter %s", in->pad_idx,
-                   in->filter_ctx->name);
-            exit_program(1);
-        }
-    }
-    av_assert0(ist);
-
-    ist->discard         = 0;
-    ist->decoding_needed = 1;
-    ist->st->discard = AVDISCARD_NONE;
-
-    fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
-                            &fg->nb_inputs, fg->nb_inputs + 1);
-    if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
-        exit_program(1);
-    fg->inputs[fg->nb_inputs - 1]->ist   = ist;
-    fg->inputs[fg->nb_inputs - 1]->graph = fg;
-
-    ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
-                              &ist->nb_filters, ist->nb_filters + 1);
-    ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
-}
-
-static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
-{
-    char *pix_fmts;
-    OutputStream *ost = ofilter->ost;
-    AVCodecContext *codec = ost->st->codec;
-    AVFilterContext *last_filter = out->filter_ctx;
-    int pad_idx = out->pad_idx;
-    int ret;
-    char name[255];
-
-    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
-    ret = avfilter_graph_create_filter(&ofilter->filter,
-                                       avfilter_get_by_name("buffersink"),
-                                       name, NULL, pix_fmts, fg->graph);
-    if (ret < 0)
-        return ret;
-
-    if (codec->width || codec->height) {
-        char args[255];
-        AVFilterContext *filter;
-
-        snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
-                 codec->width,
-                 codec->height,
-                 (unsigned)ost->sws_flags);
-        snprintf(name, sizeof(name), "scaler for output stream %d:%d",
-                 ost->file_index, ost->index);
-        if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
-                                                name, args, NULL, fg->graph)) < 0)
-            return ret;
-        if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
-            return ret;
-
-        last_filter = filter;
-        pad_idx = 0;
-    }
-
-    if ((pix_fmts = choose_pix_fmts(ost))) {
-        AVFilterContext *filter;
-        snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
-                 ost->file_index, ost->index);
-        if ((ret = avfilter_graph_create_filter(&filter,
-                                                avfilter_get_by_name("format"),
-                                                "format", pix_fmts, NULL,
-                                                fg->graph)) < 0)
-            return ret;
-        if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
-            return ret;
-
-        last_filter = filter;
-        pad_idx     = 0;
-        av_freep(&pix_fmts);
-    }
-
-    if (ost->frame_rate.num) {
-        AVFilterContext *fps;
-        char args[255];
-
-        snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
-                 ost->frame_rate.den);
-        snprintf(name, sizeof(name), "fps for output stream %d:%d",
-                 ost->file_index, ost->index);
-        ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
-                                           name, args, NULL, fg->graph);
-        if (ret < 0)
-            return ret;
-
-        ret = avfilter_link(last_filter, pad_idx, fps, 0);
-        if (ret < 0)
-            return ret;
-        last_filter = fps;
-        pad_idx = 0;
-    }
-
-    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
-        return ret;
-
-    return 0;
-}
-
-static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
-{
-    OutputStream *ost = ofilter->ost;
-    AVCodecContext *codec  = ost->st->codec;
-    AVFilterContext *last_filter = out->filter_ctx;
-    int pad_idx = out->pad_idx;
-    char *sample_fmts, *sample_rates, *channel_layouts;
-    char name[255];
-    int ret;
-
-
-    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
-    ret = avfilter_graph_create_filter(&ofilter->filter,
-                                       avfilter_get_by_name("abuffersink"),
-                                       name, NULL, NULL, fg->graph);
-    if (ret < 0)
-        return ret;
-
-    if (codec->channels && !codec->channel_layout)
-        codec->channel_layout = av_get_default_channel_layout(codec->channels);
-
-    sample_fmts     = choose_sample_fmts(ost);
-    sample_rates    = choose_sample_rates(ost);
-    channel_layouts = choose_channel_layouts(ost);
-    if (sample_fmts || sample_rates || channel_layouts) {
-        AVFilterContext *format;
-        char args[256];
-        int len = 0;
-
-        if (sample_fmts)
-            len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
-                            sample_fmts);
-        if (sample_rates)
-            len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
-                            sample_rates);
-        if (channel_layouts)
-            len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
-                            channel_layouts);
-        args[len - 1] = 0;
-
-        av_freep(&sample_fmts);
-        av_freep(&sample_rates);
-        av_freep(&channel_layouts);
-
-        snprintf(name, sizeof(name), "audio format for output stream %d:%d",
-                 ost->file_index, ost->index);
-        ret = avfilter_graph_create_filter(&format,
-                                           avfilter_get_by_name("aformat"),
-                                           name, args, NULL, fg->graph);
-        if (ret < 0)
-            return ret;
-
-        ret = avfilter_link(last_filter, pad_idx, format, 0);
-        if (ret < 0)
-            return ret;
-
-        last_filter = format;
-        pad_idx = 0;
-    }
-
-    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
-        return ret;
-
-    return 0;
-}
-
-#define DESCRIBE_FILTER_LINK(f, inout, in)                         \
-{                                                                  \
-    AVFilterContext *ctx = inout->filter_ctx;                      \
-    AVFilterPad *pads = in ? ctx->input_pads  : ctx->output_pads;  \
-    int       nb_pads = in ? ctx->input_count : ctx->output_count; \
-    AVIOContext *pb;                                               \
-                                                                   \
-    if (avio_open_dyn_buf(&pb) < 0)                                \
-        exit_program(1);                                           \
-                                                                   \
-    avio_printf(pb, "%s", ctx->filter->name);                      \
-    if (nb_pads > 1)                                               \
-        avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
-    avio_w8(pb, 0);                                                \
-    avio_close_dyn_buf(pb, &f->name);                              \
-}
-
-int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
-{
-    av_freep(&ofilter->name);
-    DESCRIBE_FILTER_LINK(ofilter, out, 0);
-
-    switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
-    case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
-    case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
-    default: av_assert0(0);
-    }
-}
-
-static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
-                                        AVFilterInOut *in)
-{
-    AVFilterContext *first_filter = in->filter_ctx;
-    AVFilter *filter = avfilter_get_by_name("buffer");
-    InputStream *ist = ifilter->ist;
-    AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
-                                         ist->st->time_base;
-    AVRational sar;
-    char args[255], name[255];
-    int pad_idx = in->pad_idx;
-    int ret;
-
-    sar = ist->st->sample_aspect_ratio.num ?
-          ist->st->sample_aspect_ratio :
-          ist->st->codec->sample_aspect_ratio;
-    snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
-             ist->st->codec->height, ist->st->codec->pix_fmt,
-             tb.num, tb.den, sar.num, sar.den);
-    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
-             ist->file_index, ist->st->index);
-
-    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
-                                            args, NULL, fg->graph)) < 0)
-        return ret;
-
-    if (ist->framerate.num) {
-        AVFilterContext *setpts;
-
-        snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
-                 ist->file_index, ist->st->index);
-        if ((ret = avfilter_graph_create_filter(&setpts,
-                                                avfilter_get_by_name("setpts"),
-                                                name, "N", NULL,
-                                                fg->graph)) < 0)
-            return ret;
-
-        if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
-            return ret;
-
-        first_filter = setpts;
-        pad_idx = 0;
-    }
-
-    if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
-        return ret;
-    return 0;
-}
-
-static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
-                                        AVFilterInOut *in)
-{
-    AVFilterContext *first_filter = in->filter_ctx;
-    AVFilter *filter = avfilter_get_by_name("abuffer");
-    InputStream *ist = ifilter->ist;
-    int pad_idx = in->pad_idx;
-    char args[255], name[255];
-    int ret;
-
-    snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
-             ":channel_layout=0x%"PRIx64,
-             1, ist->st->codec->sample_rate,
-             ist->st->codec->sample_rate,
-             av_get_sample_fmt_name(ist->st->codec->sample_fmt),
-             ist->st->codec->channel_layout);
-    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
-             ist->file_index, ist->st->index);
-
-    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
-                                            name, args, NULL,
-                                            fg->graph)) < 0)
-        return ret;
-
-    if (audio_sync_method > 0) {
-        AVFilterContext *async;
-        char args[256];
-        int  len = 0;
-
-        av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
-               "asyncts audio filter instead.\n");
-
-        if (audio_sync_method > 1)
-            len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
-                            "max_comp=%d:", audio_sync_method);
-        snprintf(args + len, sizeof(args) - len, "min_delta=%f",
-                 audio_drift_threshold);
-
-        snprintf(name, sizeof(name), "graph %d audio sync for input stream %d:%d",
-                 fg->index, ist->file_index, ist->st->index);
-        ret = avfilter_graph_create_filter(&async,
-                                           avfilter_get_by_name("asyncts"),
-                                           name, args, NULL, fg->graph);
-        if (ret < 0)
-            return ret;
-
-        ret = avfilter_link(async, 0, first_filter, pad_idx);
-        if (ret < 0)
-            return ret;
-
-        first_filter = async;
-        pad_idx = 0;
-    }
-    if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
-        return ret;
-
-    return 0;
-}
-
-static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
-                                  AVFilterInOut *in)
-{
-    av_freep(&ifilter->name);
-    DESCRIBE_FILTER_LINK(ifilter, in, 1);
-
-    switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
-    case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
-    case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
-    default: av_assert0(0);
-    }
-}
-
-int configure_filtergraph(FilterGraph *fg)
-{
-    AVFilterInOut *inputs, *outputs, *cur;
-    int ret, i, init = !fg->graph, simple = !fg->graph_desc;
-    const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
-                                      fg->graph_desc;
-
-    avfilter_graph_free(&fg->graph);
-    if (!(fg->graph = avfilter_graph_alloc()))
-        return AVERROR(ENOMEM);
-
-    if (simple) {
-        OutputStream *ost = fg->outputs[0]->ost;
-        char args[255];
-        snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
-        fg->graph->scale_sws_opts = av_strdup(args);
-    }
-
-    if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
-        return ret;
-
-    if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
-        av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
-               "exactly one input and output.\n", graph_desc);
-        return AVERROR(EINVAL);
-    }
-
-    for (cur = inputs; !simple && init && cur; cur = cur->next)
-        init_input_filter(fg, cur);
-
-    for (cur = inputs, i = 0; cur; cur = cur->next, i++)
-        if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
-            return ret;
-    avfilter_inout_free(&inputs);
-
-    if (!init || simple) {
-        /* we already know the mappings between lavfi outputs and output streams,
-         * so we can finish the setup */
-        for (cur = outputs, i = 0; cur; cur = cur->next, i++)
-            configure_output_filter(fg, fg->outputs[i], cur);
-        avfilter_inout_free(&outputs);
-
-        if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
-            return ret;
-    } else {
-        /* wait until output mappings are processed */
-        for (cur = outputs; cur;) {
-            fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
-                                     &fg->nb_outputs, fg->nb_outputs + 1);
-            if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
-                exit_program(1);
-            fg->outputs[fg->nb_outputs - 1]->graph   = fg;
-            fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
-            cur = cur->next;
-            fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
-        }
-    }
-
-    return 0;
-}
-
-static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
-{
-    int i;
-    for (i = 0; i < fg->nb_inputs; i++)
-        if (fg->inputs[i]->ist == ist)
-            return 1;
-    return 0;
-}
-
 static void term_exit(void)
 {
     av_log(NULL, AV_LOG_QUIET, "");
@@ -672,47 +140,57 @@ static int decode_interrupt_cb(void *ctx)
 
 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
 
-void exit_program(int ret)
+static void avconv_cleanup(int ret)
 {
     int i, j;
 
     for (i = 0; i < nb_filtergraphs; i++) {
-        avfilter_graph_free(&filtergraphs[i]->graph);
-        for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
-            av_freep(&filtergraphs[i]->inputs[j]->name);
-            av_freep(&filtergraphs[i]->inputs[j]);
+        FilterGraph *fg = filtergraphs[i];
+        avfilter_graph_free(&fg->graph);
+        for (j = 0; j < fg->nb_inputs; j++) {
+            av_freep(&fg->inputs[j]->name);
+            av_freep(&fg->inputs[j]);
         }
-        av_freep(&filtergraphs[i]->inputs);
-        for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
-            av_freep(&filtergraphs[i]->outputs[j]->name);
-            av_freep(&filtergraphs[i]->outputs[j]);
+        av_freep(&fg->inputs);
+        for (j = 0; j < fg->nb_outputs; j++) {
+            av_freep(&fg->outputs[j]->name);
+            av_freep(&fg->outputs[j]);
         }
-        av_freep(&filtergraphs[i]->outputs);
+        av_freep(&fg->outputs);
+        av_freep(&fg->graph_desc);
+
         av_freep(&filtergraphs[i]);
     }
     av_freep(&filtergraphs);
 
     /* close files */
     for (i = 0; i < nb_output_files; i++) {
-        AVFormatContext *s = output_files[i]->ctx;
-        if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
+        OutputFile *of = output_files[i];
+        AVFormatContext *s = of->ctx;
+        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
             avio_close(s->pb);
         avformat_free_context(s);
-        av_dict_free(&output_files[i]->opts);
+        av_dict_free(&of->opts);
+
         av_freep(&output_files[i]);
     }
     for (i = 0; i < nb_output_streams; i++) {
-        AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
+        OutputStream *ost = output_streams[i];
+        AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
         while (bsfc) {
             AVBitStreamFilterContext *next = bsfc->next;
             av_bitstream_filter_close(bsfc);
             bsfc = next;
         }
-        output_streams[i]->bitstream_filters = NULL;
+        ost->bitstream_filters = NULL;
+        av_frame_free(&ost->filtered_frame);
+
+        av_parser_close(ost->parser);
+
+        av_freep(&ost->forced_keyframes);
+        av_freep(&ost->avfilter);
+        av_freep(&ost->logfile_prefix);
 
-        av_freep(&output_streams[i]->forced_keyframes);
-        av_freep(&output_streams[i]->avfilter);
-        av_freep(&output_streams[i]->filtered_frame);
         av_freep(&output_streams[i]);
     }
     for (i = 0; i < nb_input_files; i++) {
@@ -720,10 +198,14 @@ void exit_program(int ret)
         av_freep(&input_files[i]);
     }
     for (i = 0; i < nb_input_streams; i++) {
-        av_freep(&input_streams[i]->decoded_frame);
-        av_dict_free(&input_streams[i]->opts);
-        free_buffer_pool(&input_streams[i]->buffer_pool);
-        av_freep(&input_streams[i]->filters);
+        InputStream *ist = input_streams[i];
+
+        av_frame_free(&ist->decoded_frame);
+        av_frame_free(&ist->filter_frame);
+        av_dict_free(&ist->decoder_opts);
+        av_freep(&ist->filters);
+        av_freep(&ist->hwaccel_device);
+
         av_freep(&input_streams[i]);
     }
 
@@ -738,7 +220,6 @@ void exit_program(int ret)
 
     uninit_opts();
 
-    avfilter_uninit();
     avformat_network_deinit();
 
     if (received_sigterm) {
@@ -746,8 +227,6 @@ void exit_program(int ret)
                (int) received_sigterm);
         exit (255);
     }
-
-    exit(ret);
 }
 
 void assert_avoptions(AVDictionary *m)
@@ -759,24 +238,21 @@ void assert_avoptions(AVDictionary *m)
     }
 }
 
-static void assert_codec_experimental(AVCodecContext *c, int encoder)
+static void abort_codec_experimental(AVCodec *c, int encoder)
 {
     const char *codec_string = encoder ? "encoder" : "decoder";
     AVCodec *codec;
-    if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
-        c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
-        av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
-                "results.\nAdd '-strict experimental' if you want to use it.\n",
-                codec_string, c->codec->name);
-        codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
-        if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
-            av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
-                   codec_string, codec->name);
-        exit_program(1);
-    }
+    av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
+            "results.\nAdd '-strict experimental' if you want to use it.\n",
+            codec_string, c->name);
+    codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
+    if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
+        av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
+               codec_string, codec->name);
+    exit_program(1);
 }
 
-/**
+/*
  * Update the requested input sample format based on the output sample format.
  * This is currently only used to request float output from decoders which
  * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
@@ -797,22 +273,41 @@ static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
         dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
         dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
         const enum AVSampleFormat *p;
-        int min_dec = -1, min_inc = -1;
+        int min_dec = INT_MAX, min_inc = INT_MAX;
+        enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
+        enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
 
         /* find a matching sample format in the encoder */
         for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
             if (*p == enc->sample_fmt) {
                 dec->request_sample_fmt = *p;
                 return;
-            } else if (*p > enc->sample_fmt) {
-                min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
-            } else
-                min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
+            } else {
+                enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
+                enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
+                int fmt_diff = 32 * abs(dfmt - efmt);
+                if (av_sample_fmt_is_planar(*p) !=
+                    av_sample_fmt_is_planar(enc->sample_fmt))
+                    fmt_diff++;
+                if (dfmt == efmt) {
+                    min_inc = fmt_diff;
+                    inc_fmt = *p;
+                } else if (dfmt > efmt) {
+                    if (fmt_diff < min_inc) {
+                        min_inc = fmt_diff;
+                        inc_fmt = *p;
+                    }
+                } else {
+                    if (fmt_diff < min_dec) {
+                        min_dec = fmt_diff;
+                        dec_fmt = *p;
+                    }
+                }
+            }
         }
 
         /* if none match, provide the one that matches quality closest */
-        dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
-                                  enc->sample_fmt - min_dec;
+        dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
     }
 }
 
@@ -845,7 +340,10 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
                                            pkt->flags & AV_PKT_FLAG_KEY);
         if (a > 0) {
             av_free_packet(pkt);
-            new_pkt.destruct = av_destruct_packet;
+            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
+                                           av_buffer_default_free, NULL, 0);
+            if (!new_pkt.buf)
+                exit_program(1);
         } else if (a < 0) {
             av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
                    bsfc->filter->name, pkt->stream_index,
@@ -859,6 +357,28 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
         bsfc = bsfc->next;
     }
 
+    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
+        ost->last_mux_dts != AV_NOPTS_VALUE &&
+        pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
+        av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
+               "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
+               ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
+        if (exit_on_error) {
+            av_log(NULL, AV_LOG_FATAL, "aborting.\n");
+            exit_program(1);
+        }
+        av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
+               "in incorrect timestamps in the output file.\n",
+               ost->last_mux_dts + 1);
+        pkt->dts = ost->last_mux_dts + 1;
+        if (pkt->pts != AV_NOPTS_VALUE)
+            pkt->pts = FFMAX(pkt->pts, pkt->dts);
+    }
+    ost->last_mux_dts = pkt->dts;
+
+    ost->data_size += pkt->size;
+    ost->packets_written++;
+
     pkt->stream_index = ost->index;
     ret = av_interleaved_write_frame(s, pkt);
     if (ret < 0) {
@@ -874,7 +394,7 @@ static int check_recording_time(OutputStream *ost)
     if (of->recording_time != INT64_MAX &&
         av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
                       AV_TIME_BASE_Q) >= 0) {
-        ost->is_past_recording_time = 1;
+        ost->finished = 1;
         return 0;
     }
     return 1;
@@ -891,13 +411,13 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     pkt.data = NULL;
     pkt.size = 0;
 
-    if (!check_recording_time(ost))
-        return;
-
     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
         frame->pts = ost->sync_opts;
     ost->sync_opts = frame->pts + frame->nb_samples;
 
+    ost->samples_encoded += frame->nb_samples;
+    ost->frames_encoded++;
+
     if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
         av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
         exit_program(1);
@@ -912,48 +432,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
             pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
 
         write_frame(s, &pkt, ost);
-
-        audio_size += pkt.size;
-    }
-}
-
-static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
-{
-    AVCodecContext *dec;
-    AVPicture *picture2;
-    AVPicture picture_tmp;
-    uint8_t *buf = 0;
-
-    dec = ist->st->codec;
-
-    /* deinterlace : must be done before any resize */
-    if (do_deinterlace) {
-        int size;
-
-        /* create temporary picture */
-        size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
-        buf  = av_malloc(size);
-        if (!buf)
-            return;
-
-        picture2 = &picture_tmp;
-        avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
-
-        if (avpicture_deinterlace(picture2, picture,
-                                 dec->pix_fmt, dec->width, dec->height) < 0) {
-            /* if error, do not deinterlace */
-            av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
-            av_free(buf);
-            buf = NULL;
-            picture2 = picture;
-        }
-    } else {
-        picture2 = picture;
     }
-
-    if (picture != picture2)
-        *picture = *picture2;
-    *bufp = buf;
 }
 
 static void do_subtitle_out(AVFormatContext *s,
@@ -999,6 +478,9 @@ static void do_subtitle_out(AVFormatContext *s,
         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
         sub->end_display_time  -= sub->start_display_time;
         sub->start_display_time = 0;
+
+        ost->frames_encoded++;
+
         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                     subtitle_out_max_size, sub);
         if (subtitle_out_size < 0) {
@@ -1025,7 +507,7 @@ static void do_subtitle_out(AVFormatContext *s,
 static void do_video_out(AVFormatContext *s,
                          OutputStream *ost,
                          AVFrame *in_picture,
-                         int *frame_size, float quality)
+                         int *frame_size)
 {
     int ret, format_video_sync;
     AVPacket pkt;
@@ -1042,7 +524,9 @@ static void do_video_out(AVFormatContext *s,
         in_picture->pts != AV_NOPTS_VALUE &&
         in_picture->pts < ost->sync_opts) {
         nb_frames_drop++;
-        av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
+        av_log(NULL, AV_LOG_WARNING,
+               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
+               ost->frame_number, ost->st->index, in_picture->pts);
         return;
     }
 
@@ -1058,8 +542,7 @@ static void do_video_out(AVFormatContext *s,
     pkt.data = NULL;
     pkt.size = 0;
 
-    if (!check_recording_time(ost) ||
-        ost->frame_number >= ost->max_frames)
+    if (ost->frame_number >= ost->max_frames)
         return;
 
     if (s->oformat->flags & AVFMT_RAWPICTURE &&
@@ -1077,30 +560,23 @@ static void do_video_out(AVFormatContext *s,
         write_frame(s, &pkt, ost);
     } else {
         int got_packet;
-        AVFrame big_picture;
-
-        big_picture = *in_picture;
-        /* better than nothing: use input picture interlaced
-           settings */
-        big_picture.interlaced_frame = in_picture->interlaced_frame;
-        if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
-            if (ost->top_field_first == -1)
-                big_picture.top_field_first = in_picture->top_field_first;
-            else
-                big_picture.top_field_first = !!ost->top_field_first;
-        }
 
-        /* handles same_quant here. This is not correct because it may
-           not be a global option */
-        big_picture.quality = quality;
+        if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
+            ost->top_field_first >= 0)
+            in_picture->top_field_first = !!ost->top_field_first;
+
+        in_picture->quality = ost->st->codec->global_quality;
         if (!enc->me_threshold)
-            big_picture.pict_type = 0;
+            in_picture->pict_type = 0;
         if (ost->forced_kf_index < ost->forced_kf_count &&
-            big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
-            big_picture.pict_type = AV_PICTURE_TYPE_I;
+            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
+            in_picture->pict_type = AV_PICTURE_TYPE_I;
             ost->forced_kf_index++;
         }
-        ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
+
+        ost->frames_encoded++;
+
+        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
         if (ret < 0) {
             av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
             exit_program(1);
@@ -1114,7 +590,6 @@ static void do_video_out(AVFormatContext *s,
 
             write_frame(s, &pkt, ost);
             *frame_size = pkt.size;
-            video_size += pkt.size;
 
             /* if two pass, output log */
             if (ost->logfile && enc->stats_out) {
@@ -1136,8 +611,7 @@ static double psnr(double d)
     return -10.0 * log(d) / log(10.0);
 }
 
-static void do_video_stats(AVFormatContext *os, OutputStream *ost,
-                           int frame_size)
+static void do_video_stats(OutputStream *ost, int frame_size)
 {
     AVCodecContext *enc;
     int frame_number;
@@ -1166,85 +640,224 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
             ti1 = 0.01;
 
         bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
-        avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
+        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
-               (double)video_size / 1024, ti1, bitrate, avg_bitrate);
+               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
     }
 }
 
-/* check for new output on any of the filtergraphs */
+/*
+ * Read one frame for lavfi output for ost and encode it.
+ */
+static int poll_filter(OutputStream *ost)
+{
+    OutputFile    *of = output_files[ost->file_index];
+    AVFrame *filtered_frame = NULL;
+    int frame_size, ret;
+
+    if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
+        return AVERROR(ENOMEM);
+    }
+    filtered_frame = ost->filtered_frame;
+
+    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
+        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
+        ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
+                                         ost->st->codec->frame_size);
+    else
+        ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
+
+    if (ret < 0)
+        return ret;
+
+    if (filtered_frame->pts != AV_NOPTS_VALUE) {
+        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+        filtered_frame->pts = av_rescale_q(filtered_frame->pts,
+                                           ost->filter->filter->inputs[0]->time_base,
+                                           ost->st->codec->time_base) -
+                              av_rescale_q(start_time,
+                                           AV_TIME_BASE_Q,
+                                           ost->st->codec->time_base);
+    }
+
+    switch (ost->filter->filter->inputs[0]->type) {
+    case AVMEDIA_TYPE_VIDEO:
+        if (!ost->frame_aspect_ratio)
+            ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
+
+        do_video_out(of->ctx, ost, filtered_frame, &frame_size);
+        if (vstats_filename && frame_size)
+            do_video_stats(ost, frame_size);
+        break;
+    case AVMEDIA_TYPE_AUDIO:
+        do_audio_out(of->ctx, ost, filtered_frame);
+        break;
+    default:
+        // TODO support subtitle filters
+        av_assert0(0);
+    }
+
+    av_frame_unref(filtered_frame);
+
+    return 0;
+}
+
+/*
+ * Read as many frames from possible from lavfi and encode them.
+ *
+ * Always read from the active stream with the lowest timestamp. If no frames
+ * are available for it then return EAGAIN and wait for more input. This way we
+ * can use lavfi sources that generate unlimited amount of frames without memory
+ * usage exploding.
+ */
 static int poll_filters(void)
 {
-    AVFilterBufferRef *picref;
-    AVFrame *filtered_frame = NULL;
-    int i, frame_size;
+    int i, j, ret = 0;
+
+    while (ret >= 0 && !received_sigterm) {
+        OutputStream *ost = NULL;
+        int64_t min_pts = INT64_MAX;
+
+        /* choose output stream with the lowest timestamp */
+        for (i = 0; i < nb_output_streams; i++) {
+            int64_t pts = output_streams[i]->sync_opts;
+
+            if (!output_streams[i]->filter || output_streams[i]->finished)
+                continue;
+
+            pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
+                               AV_TIME_BASE_Q);
+            if (pts < min_pts) {
+                min_pts = pts;
+                ost = output_streams[i];
+            }
+        }
+
+        if (!ost)
+            break;
+
+        ret = poll_filter(ost);
+
+        if (ret == AVERROR_EOF) {
+            OutputFile *of = output_files[ost->file_index];
+
+            ost->finished = 1;
+
+            if (of->shortest) {
+                for (j = 0; j < of->ctx->nb_streams; j++)
+                    output_streams[of->ost_index + j]->finished = 1;
+            }
+
+            ret = 0;
+        } else if (ret == AVERROR(EAGAIN))
+            return 0;
+    }
+
+    return ret;
+}
+
+static void print_final_stats(int64_t total_size)
+{
+    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
+    uint64_t data_size = 0;
+    float percent = -1.0;
+    int i, j;
 
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream *ost = output_streams[i];
-        OutputFile    *of = output_files[ost->file_index];
-        int ret = 0;
+        switch (ost->st->codec->codec_type) {
+            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
+            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
+            default:                 other_size += ost->data_size; break;
+        }
+        extra_size += ost->st->codec->extradata_size;
+        data_size  += ost->data_size;
+    }
+
+    if (data_size && total_size >= data_size)
+        percent = 100.0 * (total_size - data_size) / data_size;
+
+    av_log(NULL, AV_LOG_INFO, "\n");
+    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
+           video_size / 1024.0,
+           audio_size / 1024.0,
+           other_size / 1024.0,
+           extra_size / 1024.0);
+    if (percent >= 0.0)
+        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
+    else
+        av_log(NULL, AV_LOG_INFO, "unknown");
+    av_log(NULL, AV_LOG_INFO, "\n");
 
-        if (!ost->filter)
-            continue;
+    /* print verbose per-stream stats */
+    for (i = 0; i < nb_input_files; i++) {
+        InputFile *f = input_files[i];
+        uint64_t total_packets = 0, total_size = 0;
 
-        if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
-            return AVERROR(ENOMEM);
-        } else
-            avcodec_get_frame_defaults(ost->filtered_frame);
-        filtered_frame = ost->filtered_frame;
-
-        while (ret >= 0 && !ost->is_past_recording_time) {
-            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
-                !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
-                ret = av_buffersink_read_samples(ost->filter->filter, &picref,
-                                                 ost->st->codec->frame_size);
-            else
-                ret = av_buffersink_read(ost->filter->filter, &picref);
+        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
+               i, f->ctx->filename);
 
-            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
-                break;
-            else if (ret < 0)
-                return ret;
-
-            avfilter_copy_buf_props(filtered_frame, picref);
-            if (picref->pts != AV_NOPTS_VALUE) {
-                filtered_frame->pts = av_rescale_q(picref->pts,
-                                                   ost->filter->filter->inputs[0]->time_base,
-                                                   ost->st->codec->time_base) -
-                                      av_rescale_q(of->start_time,
-                                                   AV_TIME_BASE_Q,
-                                                   ost->st->codec->time_base);
-
-                if (of->start_time && filtered_frame->pts < 0) {
-                    avfilter_unref_buffer(picref);
-                    continue;
-                }
+        for (j = 0; j < f->nb_streams; j++) {
+            InputStream *ist = input_streams[f->ist_index + j];
+            enum AVMediaType type = ist->st->codec->codec_type;
+
+            total_size    += ist->data_size;
+            total_packets += ist->nb_packets;
+
+            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
+                   i, j, media_type_string(type));
+            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
+                   ist->nb_packets, ist->data_size);
+
+            if (ist->decoding_needed) {
+                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
+                       ist->frames_decoded);
+                if (type == AVMEDIA_TYPE_AUDIO)
+                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
+                av_log(NULL, AV_LOG_VERBOSE, "; ");
             }
 
-            switch (ost->filter->filter->inputs[0]->type) {
-            case AVMEDIA_TYPE_VIDEO:
-                if (!ost->frame_aspect_ratio)
-                    ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
-
-                do_video_out(of->ctx, ost, filtered_frame, &frame_size,
-                             same_quant ? ost->last_quality :
-                                          ost->st->codec->global_quality);
-                if (vstats_filename && frame_size)
-                    do_video_stats(of->ctx, ost, frame_size);
-                break;
-            case AVMEDIA_TYPE_AUDIO:
-                do_audio_out(of->ctx, ost, filtered_frame);
-                break;
-            default:
-                // TODO support subtitle filters
-                av_assert0(0);
+            av_log(NULL, AV_LOG_VERBOSE, "\n");
+        }
+
+        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
+               total_packets, total_size);
+    }
+
+    for (i = 0; i < nb_output_files; i++) {
+        OutputFile *of = output_files[i];
+        uint64_t total_packets = 0, total_size = 0;
+
+        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
+               i, of->ctx->filename);
+
+        for (j = 0; j < of->ctx->nb_streams; j++) {
+            OutputStream *ost = output_streams[of->ost_index + j];
+            enum AVMediaType type = ost->st->codec->codec_type;
+
+            total_size    += ost->data_size;
+            total_packets += ost->packets_written;
+
+            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
+                   i, j, media_type_string(type));
+            if (ost->encoding_needed) {
+                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
+                       ost->frames_encoded);
+                if (type == AVMEDIA_TYPE_AUDIO)
+                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
+                av_log(NULL, AV_LOG_VERBOSE, "; ");
             }
 
-            avfilter_unref_buffer(picref);
+            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
+                   ost->packets_written, ost->data_size);
+
+            av_log(NULL, AV_LOG_VERBOSE, "\n");
         }
+
+        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
+               total_packets, total_size);
     }
-    return 0;
 }
 
 static void print_report(int is_last_report, int64_t timer_start)
@@ -1279,8 +892,15 @@ static void print_report(int is_last_report, int64_t timer_start)
     oc = output_files[0]->ctx;
 
     total_size = avio_size(oc->pb);
-    if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
+    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
         total_size = avio_tell(oc->pb);
+    if (total_size < 0) {
+        char errbuf[128];
+        av_strerror(total_size, errbuf, sizeof(errbuf));
+        av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
+               "avio_tell() failed: %s\n", errbuf);
+        total_size = 0;
+    }
 
     buf[0] = '\0';
     ti1 = 1e10;
@@ -1308,7 +928,7 @@ static void print_report(int is_last_report, int64_t timer_start)
                 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
                     qp_histogram[qp]++;
                 for (j = 0; j < 32; j++)
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
             }
             if (enc->flags&CODEC_FLAG_PSNR) {
                 int j;
@@ -1348,24 +968,17 @@ static void print_report(int is_last_report, int64_t timer_start)
             "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
             (double)total_size / 1024, ti1, bitrate);
 
-    if (nb_frames_dup || nb_frames_drop)
-        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
-                nb_frames_dup, nb_frames_drop);
+    if (nb_frames_drop)
+        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
+                 nb_frames_drop);
 
     av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
 
     fflush(stderr);
 
-    if (is_last_report) {
-        int64_t raw= audio_size + video_size + extra_size;
-        av_log(NULL, AV_LOG_INFO, "\n");
-        av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
-               video_size / 1024.0,
-               audio_size / 1024.0,
-               extra_size / 1024.0,
-               100.0 * (total_size - raw) / raw
-        );
-    }
+    if (is_last_report)
+        print_final_stats(total_size);
+
 }
 
 static void flush_encoders(void)
@@ -1389,18 +1002,15 @@ static void flush_encoders(void)
         for (;;) {
             int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
             const char *desc;
-            int64_t *size;
 
             switch (ost->st->codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 encode = avcodec_encode_audio2;
                 desc   = "Audio";
-                size   = &audio_size;
                 break;
             case AVMEDIA_TYPE_VIDEO:
                 encode = avcodec_encode_video2;
                 desc   = "Video";
-                size   = &video_size;
                 break;
             default:
                 stop_encoding = 1;
@@ -1418,7 +1028,6 @@ static void flush_encoders(void)
                     av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
                     exit_program(1);
                 }
-                *size += ret;
                 if (ost->logfile && enc->stats_out) {
                     fprintf(ost->logfile, "%s", enc->stats_out);
                 }
@@ -1430,6 +1039,8 @@ static void flush_encoders(void)
                     pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                 if (pkt.dts != AV_NOPTS_VALUE)
                     pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
+                if (pkt.duration > 0)
+                    pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
                 write_frame(os, &pkt, ost);
             }
 
@@ -1450,7 +1061,7 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
     if (ost->source_index != ist_index)
         return 0;
 
-    if (of->start_time && ist->last_dts < of->start_time)
+    if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
         return 0;
 
     return 1;
@@ -1459,7 +1070,9 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
 {
     OutputFile *of = output_files[ost->file_index];
-    int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
+    InputFile   *f = input_files [ist->file_index];
+    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
     AVPacket opkt;
 
     av_init_packet(&opkt);
@@ -1469,18 +1082,24 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         return;
 
     if (of->recording_time != INT64_MAX &&
-        ist->last_dts >= of->recording_time + of->start_time) {
-        ost->is_past_recording_time = 1;
+        ist->last_dts >= of->recording_time + start_time) {
+        ost->finished = 1;
         return;
     }
 
+    if (f->recording_time != INT64_MAX) {
+        start_time = f->ctx->start_time;
+        if (f->start_time != AV_NOPTS_VALUE)
+            start_time += f->start_time;
+        if (ist->last_dts >= f->recording_time + start_time) {
+            ost->finished = 1;
+            return;
+        }
+    }
+
     /* force the input stream PTS */
-    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
-        audio_size += pkt->size;
-    else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-        video_size += pkt->size;
+    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
         ost->sync_opts++;
-    }
 
     if (pkt->pts != AV_NOPTS_VALUE)
         opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
@@ -1502,8 +1121,14 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
        && ost->st->codec->codec_id != AV_CODEC_ID_VC1
        ) {
-        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
-            opkt.destruct = av_destruct_packet;
+        if (av_parser_change(ost->parser, ost->st->codec,
+                             &opkt.data, &opkt.size,
+                             pkt->data, pkt->size,
+                             pkt->flags & AV_PKT_FLAG_KEY)) {
+            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
+            if (!opkt.buf)
+                exit_program(1);
+        }
     } else {
         opkt.data = pkt->data;
         opkt.size = pkt->size;
@@ -1511,17 +1136,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
 
     write_frame(of->ctx, &opkt, ost);
     ost->st->codec->frame_number++;
-    av_free_packet(&opkt);
-}
-
-static void rate_emu_sleep(InputStream *ist)
-{
-    if (input_files[ist->file_index]->rate_emu) {
-        int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
-        int64_t now = av_gettime() - ist->start;
-        if (pts > now)
-            av_usleep(pts - now);
-    }
 }
 
 int guess_input_channel_layout(InputStream *ist)
@@ -1544,98 +1158,35 @@ int guess_input_channel_layout(InputStream *ist)
 
 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 {
-    AVFrame *decoded_frame;
+    AVFrame *decoded_frame, *f;
     AVCodecContext *avctx = ist->st->codec;
-    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
-    int i, ret, resample_changed;
+    int i, ret, err = 0, resample_changed;
 
-    if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
+    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
-    else
-        avcodec_get_frame_defaults(ist->decoded_frame);
     decoded_frame = ist->decoded_frame;
 
     ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
-    if (ret < 0) {
-        return ret;
-    }
-
-    if (!*got_output) {
-        /* no audio frame */
-        if (!pkt->size)
+    if (!*got_output || ret < 0) {
+        if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
-                av_buffersrc_buffer(ist->filters[i]->filter, NULL);
+                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+        }
         return ret;
     }
 
+    ist->samples_decoded += decoded_frame->nb_samples;
+    ist->frames_decoded++;
+
     /* if the decoder provides a pts, use it instead of the last packet pts.
        the decoder could be delaying output by a packet or more. */
     if (decoded_frame->pts != AV_NOPTS_VALUE)
         ist->next_dts = decoded_frame->pts;
-    else if (pkt->pts != AV_NOPTS_VALUE) {
+    else if (pkt->pts != AV_NOPTS_VALUE)
         decoded_frame->pts = pkt->pts;
-        pkt->pts           = AV_NOPTS_VALUE;
-    }
-
-    // preprocess audio (volume)
-    if (audio_volume != 256) {
-        int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
-        void *samples = decoded_frame->data[0];
-        switch (avctx->sample_fmt) {
-        case AV_SAMPLE_FMT_U8:
-        {
-            uint8_t *volp = samples;
-            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
-                *volp++ = av_clip_uint8(v);
-            }
-            break;
-        }
-        case AV_SAMPLE_FMT_S16:
-        {
-            int16_t *volp = samples;
-            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                int v = ((*volp) * audio_volume + 128) >> 8;
-                *volp++ = av_clip_int16(v);
-            }
-            break;
-        }
-        case AV_SAMPLE_FMT_S32:
-        {
-            int32_t *volp = samples;
-            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
-                *volp++ = av_clipl_int32(v);
-            }
-            break;
-        }
-        case AV_SAMPLE_FMT_FLT:
-        {
-            float *volp = samples;
-            float scale = audio_volume / 256.f;
-            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                *volp++ *= scale;
-            }
-            break;
-        }
-        case AV_SAMPLE_FMT_DBL:
-        {
-            double *volp = samples;
-            double scale = audio_volume / 256.;
-            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                *volp++ *= scale;
-            }
-            break;
-        }
-        default:
-            av_log(NULL, AV_LOG_FATAL,
-                   "Audio volume adjustment on sample format %s is not supported.\n",
-                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
-            exit_program(1);
-        }
-    }
-
-    rate_emu_sleep(ist);
+    pkt->pts           = AV_NOPTS_VALUE;
 
     resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                        ist->resample_channels       != avctx->channels               ||
@@ -1682,44 +1233,58 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         decoded_frame->pts = av_rescale_q(decoded_frame->pts,
                                           ist->st->time_base,
                                           (AVRational){1, ist->st->codec->sample_rate});
-    for (i = 0; i < ist->nb_filters; i++)
-        av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
+    for (i = 0; i < ist->nb_filters; i++) {
+        if (i < ist->nb_filters - 1) {
+            f = ist->filter_frame;
+            err = av_frame_ref(f, decoded_frame);
+            if (err < 0)
+                break;
+        } else
+            f = decoded_frame;
 
-    return ret;
+        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
+        if (err < 0)
+            break;
+    }
+
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
+    return err < 0 ? err : ret;
 }
 
 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
 {
-    AVFrame *decoded_frame;
-    void *buffer_to_free = NULL;
-    int i, ret = 0, resample_changed;
-    float quality;
+    AVFrame *decoded_frame, *f;
+    int i, ret = 0, err = 0, resample_changed;
 
-    if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
+    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
-    else
-        avcodec_get_frame_defaults(ist->decoded_frame);
     decoded_frame = ist->decoded_frame;
 
     ret = avcodec_decode_video2(ist->st->codec,
                                 decoded_frame, got_output, pkt);
-    if (ret < 0)
-        return ret;
-
-    quality = same_quant ? decoded_frame->quality : 0;
-    if (!*got_output) {
-        /* no picture yet */
-        if (!pkt->size)
+    if (!*got_output || ret < 0) {
+        if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
-                av_buffersrc_buffer(ist->filters[i]->filter, NULL);
+                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+        }
         return ret;
     }
+
+    ist->frames_decoded++;
+
+    if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
+        err = ist->hwaccel_retrieve_data(ist->st->codec, decoded_frame);
+        if (err < 0)
+            goto fail;
+    }
+    ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
+
     decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
                                            decoded_frame->pkt_dts);
     pkt->size = 0;
-    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
-
-    rate_emu_sleep(ist);
 
     if (ist->st->sample_aspect_ratio.num)
         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
@@ -1734,6 +1299,10 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
                ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
                decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
 
+        ret = poll_filters();
+        if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
+            av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
+
         ist->resample_width   = decoded_frame->width;
         ist->resample_height  = decoded_frame->height;
         ist->resample_pix_fmt = decoded_frame->format;
@@ -1747,30 +1316,23 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     }
 
     for (i = 0; i < ist->nb_filters; i++) {
-        // XXX what an ugly hack
-        if (ist->filters[i]->graph->nb_outputs == 1)
-            ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
-
-        if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
-            FrameBuffer      *buf = decoded_frame->opaque;
-            AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
-                                        decoded_frame->data, decoded_frame->linesize,
-                                        AV_PERM_READ | AV_PERM_PRESERVE,
-                                        ist->st->codec->width, ist->st->codec->height,
-                                        ist->st->codec->pix_fmt);
-
-            avfilter_copy_frame_props(fb, decoded_frame);
-            fb->buf->priv           = buf;
-            fb->buf->free           = filter_release_buffer;
-
-            buf->refcount++;
-            av_buffersrc_buffer(ist->filters[i]->filter, fb);
+        if (i < ist->nb_filters - 1) {
+            f = ist->filter_frame;
+            err = av_frame_ref(f, decoded_frame);
+            if (err < 0)
+                break;
         } else
-            av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
+            f = decoded_frame;
+
+        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
+        if (err < 0)
+            break;
     }
 
-    av_free(buffer_to_free);
-    return ret;
+fail:
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
+    return err < 0 ? err : ret;
 }
 
 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
@@ -1783,7 +1345,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
     if (!*got_output)
         return ret;
 
-    rate_emu_sleep(ist);
+    ist->frames_decoded++;
 
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream *ost = output_streams[i];
@@ -1828,7 +1390,8 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
 
         ist->last_dts = ist->next_dts;
 
-        if (avpkt.size && avpkt.size != pkt->size) {
+        if (avpkt.size && avpkt.size != pkt->size &&
+            !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
             av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                    "Multiple frames in a packet from stream %d\n", pkt->stream_index);
             ist->showed_multi_packet_warning = 1;
@@ -1872,7 +1435,6 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
 
     /* handle stream copy */
     if (!ist->decoding_needed) {
-        rate_emu_sleep(ist);
         ist->last_dts = ist->next_dts;
         switch (ist->st->codec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
@@ -1903,7 +1465,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
 
 static void print_sdp(void)
 {
-    char sdp[2048];
+    char sdp[16384];
     int i;
     AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
 
@@ -1918,9 +1480,66 @@ static void print_sdp(void)
     av_freep(&avc);
 }
 
-static int init_input_stream(int ist_index, char *error, int error_len)
+static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
 {
     int i;
+    for (i = 0; hwaccels[i].name; i++)
+        if (hwaccels[i].pix_fmt == pix_fmt)
+            return &hwaccels[i];
+    return NULL;
+}
+
+static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
+{
+    InputStream *ist = s->opaque;
+    const enum AVPixelFormat *p;
+    int ret;
+
+    for (p = pix_fmts; *p != -1; p++) {
+        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
+        const HWAccel *hwaccel;
+
+        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+            break;
+
+        hwaccel = get_hwaccel(*p);
+        if (!hwaccel ||
+            (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
+            (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
+            continue;
+
+        ret = hwaccel->init(s);
+        if (ret < 0) {
+            if (ist->hwaccel_id == hwaccel->id) {
+                av_log(NULL, AV_LOG_FATAL,
+                       "%s hwaccel requested for input stream #%d:%d, "
+                       "but cannot be initialized.\n", hwaccel->name,
+                       ist->file_index, ist->st->index);
+                exit_program(1);
+            }
+            continue;
+        }
+        ist->active_hwaccel_id = hwaccel->id;
+        ist->hwaccel_pix_fmt   = *p;
+        break;
+    }
+
+    return *p;
+}
+
+static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
+{
+    InputStream *ist = s->opaque;
+
+    if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
+        return ist->hwaccel_get_buffer(s, frame, flags);
+
+    return avcodec_default_get_buffer2(s, frame, flags);
+}
+
+static int init_input_stream(int ist_index, char *error, int error_len)
+{
+    int i, ret;
     InputStream *ist = input_streams[ist_index];
     if (ist->decoding_needed) {
         AVCodec *codec = ist->dec;
@@ -1940,27 +1559,34 @@ static int init_input_stream(int ist_index, char *error, int error_len)
             }
         }
 
-        if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
-            ist->st->codec->get_buffer     = codec_get_buffer;
-            ist->st->codec->release_buffer = codec_release_buffer;
-            ist->st->codec->opaque         = &ist->buffer_pool;
-        }
+        ist->st->codec->opaque      = ist;
+        ist->st->codec->get_format  = get_format;
+        ist->st->codec->get_buffer2 = get_buffer;
+        ist->st->codec->thread_safe_callbacks = 1;
 
-        if (!av_dict_get(ist->opts, "threads", NULL, 0))
-            av_dict_set(&ist->opts, "threads", "auto", 0);
-        if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
-            snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
-                    ist->file_index, ist->st->index);
-            return AVERROR(EINVAL);
+        av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0);
+
+        if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
+            av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
+        if ((ret = avcodec_open2(ist->st->codec, codec, &ist->decoder_opts)) < 0) {
+            char errbuf[128];
+            if (ret == AVERROR_EXPERIMENTAL)
+                abort_codec_experimental(codec, 0);
+
+            av_strerror(ret, errbuf, sizeof(errbuf));
+
+            snprintf(error, error_len,
+                     "Error while opening decoder for input stream "
+                     "#%d:%d : %s",
+                     ist->file_index, ist->st->index, errbuf);
+            return ret;
         }
-        assert_codec_experimental(ist->st->codec, 0);
-        assert_avoptions(ist->opts);
+        assert_avoptions(ist->decoder_opts);
     }
 
     ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
     ist->next_dts = AV_NOPTS_VALUE;
     init_pts_correction(&ist->pts_ctx);
-    ist->is_start = 1;
 
     return 0;
 }
@@ -2017,7 +1643,7 @@ static int transcode_init(void)
 {
     int ret = 0, i, j, k;
     AVFormatContext *oc;
-    AVCodecContext *codec, *icodec;
+    AVCodecContext *codec;
     OutputStream *ost;
     InputStream *ist;
     char error[1024];
@@ -2048,6 +1674,7 @@ static int transcode_init(void)
 
     /* for each output stream, we compute the right encoding parameters */
     for (i = 0; i < nb_output_streams; i++) {
+        AVCodecContext *dec_ctx = NULL;
         ost = output_streams[i];
         oc  = output_files[ost->file_index]->ctx;
         ist = get_input_stream(ost);
@@ -2058,82 +1685,85 @@ static int transcode_init(void)
         codec  = ost->st->codec;
 
         if (ist) {
-            icodec = ist->st->codec;
+            dec_ctx = ist->st->codec;
 
             ost->st->disposition          = ist->st->disposition;
-            codec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
-            codec->chroma_sample_location = icodec->chroma_sample_location;
+            codec->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
+            codec->chroma_sample_location = dec_ctx->chroma_sample_location;
         }
 
         if (ost->stream_copy) {
+            AVRational sar;
             uint64_t extra_size;
 
             av_assert0(ist && !ost->filter);
 
-            extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
+            extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
 
             if (extra_size > INT_MAX) {
                 return AVERROR(EINVAL);
             }
 
             /* if stream_copy is selected, no need to decode or encode */
-            codec->codec_id   = icodec->codec_id;
-            codec->codec_type = icodec->codec_type;
+            codec->codec_id   = dec_ctx->codec_id;
+            codec->codec_type = dec_ctx->codec_type;
 
             if (!codec->codec_tag) {
                 if (!oc->oformat->codec_tag ||
-                     av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
-                     av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
-                    codec->codec_tag = icodec->codec_tag;
+                     av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == codec->codec_id ||
+                     av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
+                    codec->codec_tag = dec_ctx->codec_tag;
             }
 
-            codec->bit_rate       = icodec->bit_rate;
-            codec->rc_max_rate    = icodec->rc_max_rate;
-            codec->rc_buffer_size = icodec->rc_buffer_size;
-            codec->field_order    = icodec->field_order;
+            codec->bit_rate       = dec_ctx->bit_rate;
+            codec->rc_max_rate    = dec_ctx->rc_max_rate;
+            codec->rc_buffer_size = dec_ctx->rc_buffer_size;
+            codec->field_order    = dec_ctx->field_order;
             codec->extradata      = av_mallocz(extra_size);
             if (!codec->extradata) {
                 return AVERROR(ENOMEM);
             }
-            memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
-            codec->extradata_size = icodec->extradata_size;
+            memcpy(codec->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
+            codec->extradata_size = dec_ctx->extradata_size;
             if (!copy_tb) {
-                codec->time_base      = icodec->time_base;
-                codec->time_base.num *= icodec->ticks_per_frame;
+                codec->time_base      = dec_ctx->time_base;
+                codec->time_base.num *= dec_ctx->ticks_per_frame;
                 av_reduce(&codec->time_base.num, &codec->time_base.den,
                           codec->time_base.num, codec->time_base.den, INT_MAX);
             } else
                 codec->time_base = ist->st->time_base;
 
+            ost->parser = av_parser_init(codec->codec_id);
+
             switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 if (audio_volume != 256) {
                     av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
                     exit_program(1);
                 }
-                codec->channel_layout     = icodec->channel_layout;
-                codec->sample_rate        = icodec->sample_rate;
-                codec->channels           = icodec->channels;
-                codec->frame_size         = icodec->frame_size;
-                codec->audio_service_type = icodec->audio_service_type;
-                codec->block_align        = icodec->block_align;
+                codec->channel_layout     = dec_ctx->channel_layout;
+                codec->sample_rate        = dec_ctx->sample_rate;
+                codec->channels           = dec_ctx->channels;
+                codec->frame_size         = dec_ctx->frame_size;
+                codec->audio_service_type = dec_ctx->audio_service_type;
+                codec->block_align        = dec_ctx->block_align;
                 break;
             case AVMEDIA_TYPE_VIDEO:
-                codec->pix_fmt            = icodec->pix_fmt;
-                codec->width              = icodec->width;
-                codec->height             = icodec->height;
-                codec->has_b_frames       = icodec->has_b_frames;
-                if (!codec->sample_aspect_ratio.num) {
-                    codec->sample_aspect_ratio   =
-                    ost->st->sample_aspect_ratio =
-                        ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
-                        ist->st->codec->sample_aspect_ratio.num ?
-                        ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
-                }
+                codec->pix_fmt            = dec_ctx->pix_fmt;
+                codec->width              = dec_ctx->width;
+                codec->height             = dec_ctx->height;
+                codec->has_b_frames       = dec_ctx->has_b_frames;
+                if (ost->frame_aspect_ratio)
+                    sar = av_d2q(ost->frame_aspect_ratio * codec->height / codec->width, 255);
+                else if (ist->st->sample_aspect_ratio.num)
+                    sar = ist->st->sample_aspect_ratio;
+                else
+                    sar = dec_ctx->sample_aspect_ratio;
+                ost->st->sample_aspect_ratio = codec->sample_aspect_ratio = sar;
                 break;
             case AVMEDIA_TYPE_SUBTITLE:
-                codec->width  = icodec->width;
-                codec->height = icodec->height;
+                codec->width  = dec_ctx->width;
+                codec->height = dec_ctx->height;
                 break;
             case AVMEDIA_TYPE_DATA:
             case AVMEDIA_TYPE_ATTACHMENT:
@@ -2171,7 +1801,20 @@ static int transcode_init(void)
                 (video_sync_method ==  VSYNC_CFR ||
                  (video_sync_method ==  VSYNC_AUTO &&
                   !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
-                ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
+                if (ist->framerate.num)
+                    ost->frame_rate = ist->framerate;
+                else if (ist->st->avg_frame_rate.num)
+                    ost->frame_rate = ist->st->avg_frame_rate;
+                else {
+                    av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
+                           "for the output stream #%d:%d, but no information "
+                           "about the input framerate is available. Falling "
+                           "back to a default value of 25fps. Use the -r option "
+                           "if you want a different framerate.\n",
+                           ost->file_index, ost->index);
+                    ost->frame_rate = (AVRational){ 25, 1 };
+                }
+
                 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
                     int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
                     ost->frame_rate = ost->enc->supported_framerates[idx];
@@ -2185,7 +1828,7 @@ static int transcode_init(void)
                     fg = init_simple_filtergraph(ist, ost);
                     if (configure_filtergraph(fg)) {
                         av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
-                        exit(1);
+                        exit_program(1);
                     }
             }
 
@@ -2208,9 +1851,10 @@ static int transcode_init(void)
                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
 
-                if (codec->width   != icodec->width  ||
-                    codec->height  != icodec->height ||
-                    codec->pix_fmt != icodec->pix_fmt) {
+                if (dec_ctx &&
+                    (codec->width   != dec_ctx->width  ||
+                     codec->height  != dec_ctx->height ||
+                     codec->pix_fmt != dec_ctx->pix_fmt)) {
                     codec->bits_per_raw_sample = 0;
                 }
 
@@ -2231,7 +1875,8 @@ static int transcode_init(void)
                 FILE *f;
 
                 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
-                         pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
+                         ost->logfile_prefix ? ost->logfile_prefix :
+                                               DEFAULT_PASS_LOGFILENAME_PREFIX,
                          i);
                 if (!strcmp(ost->enc->name, "libx264")) {
                     av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
@@ -2279,21 +1924,19 @@ static int transcode_init(void)
             }
             if (!av_dict_get(ost->opts, "threads", NULL, 0))
                 av_dict_set(&ost->opts, "threads", "auto", 0);
-            if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
+            if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
+                if (ret == AVERROR_EXPERIMENTAL)
+                    abort_codec_experimental(codec, 1);
                 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
                         ost->file_index, ost->index);
-                ret = AVERROR(EINVAL);
                 goto dump_format;
             }
-            assert_codec_experimental(ost->st->codec, 1);
             assert_avoptions(ost->opts);
             if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
                 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
                                              "It takes bits/s as argument, not kbits/s\n");
-            extra_size += ost->st->codec->extradata_size;
-
-            if (ost->st->codec->me_threshold)
-                input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
+        } else {
+            av_opt_set_dict(ost->st->codec, &ost->opts);
         }
     }
 
@@ -2324,10 +1967,11 @@ static int transcode_init(void)
         oc->interrupt_callback = int_cb;
         if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
             char errbuf[128];
-            const char *errbuf_ptr = errbuf;
-            if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
-                errbuf_ptr = strerror(AVUNERROR(ret));
-            snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
+            av_strerror(ret, errbuf, sizeof(errbuf));
+            snprintf(error, sizeof(error),
+                     "Could not write header for output file #%d "
+                     "(incorrect codec parameters ?): %s",
+                     i, errbuf);
             ret = AVERROR(EINVAL);
             goto dump_format;
         }
@@ -2412,10 +2056,7 @@ static int transcode_init(void)
     return 0;
 }
 
-/**
- * @return 1 if there are still streams where more output is wanted,
- *         0 otherwise
- */
+/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
 static int need_output(void)
 {
     int i;
@@ -2425,13 +2066,13 @@ static int need_output(void)
         OutputFile *of       = output_files[ost->file_index];
         AVFormatContext *os  = output_files[ost->file_index]->ctx;
 
-        if (ost->is_past_recording_time ||
+        if (ost->finished ||
             (os->pb && avio_tell(os->pb) >= of->limit_filesize))
             continue;
         if (ost->frame_number >= ost->max_frames) {
             int j;
             for (j = 0; j < of->ctx->nb_streams; j++)
-                output_streams[of->ost_index + j]->is_past_recording_time = 1;
+                output_streams[of->ost_index + j]->finished = 1;
             continue;
         }
 
@@ -2441,26 +2082,27 @@ static int need_output(void)
     return 0;
 }
 
-static int select_input_file(uint8_t *no_packet)
+static InputFile *select_input_file(void)
 {
+    InputFile *ifile = NULL;
     int64_t ipts_min = INT64_MAX;
-    int i, file_index = -1;
+    int i;
 
     for (i = 0; i < nb_input_streams; i++) {
         InputStream *ist = input_streams[i];
         int64_t ipts     = ist->last_dts;
 
-        if (ist->discard || no_packet[ist->file_index])
+        if (ist->discard || input_files[ist->file_index]->eagain)
             continue;
         if (!input_files[ist->file_index]->eof_reached) {
             if (ipts < ipts_min) {
                 ipts_min = ipts;
-                file_index = ist->file_index;
+                ifile    = input_files[ist->file_index];
             }
         }
     }
 
-    return file_index;
+    return ifile;
 }
 
 #if HAVE_PTHREADS
@@ -2575,6 +2217,17 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
 
 static int get_input_packet(InputFile *f, AVPacket *pkt)
 {
+    if (f->rate_emu) {
+        int i;
+        for (i = 0; i < f->nb_streams; i++) {
+            InputStream *ist = input_streams[f->ist_index + i];
+            int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
+            int64_t now = av_gettime() - ist->start;
+            if (pts > now)
+                return AVERROR(EAGAIN);
+        }
+    }
+
 #if HAVE_PTHREADS
     if (nb_input_files > 1)
         return get_input_packet_mt(f, pkt);
@@ -2582,22 +2235,174 @@ static int get_input_packet(InputFile *f, AVPacket *pkt)
     return av_read_frame(f->ctx, pkt);
 }
 
+static int got_eagain(void)
+{
+    int i;
+    for (i = 0; i < nb_input_files; i++)
+        if (input_files[i]->eagain)
+            return 1;
+    return 0;
+}
+
+static void reset_eagain(void)
+{
+    int i;
+    for (i = 0; i < nb_input_files; i++)
+        input_files[i]->eagain = 0;
+}
+
+/*
+ * Read one packet from an input file and send it for
+ * - decoding -> lavfi (audio/video)
+ * - decoding -> encoding -> muxing (subtitles)
+ * - muxing (streamcopy)
+ *
+ * Return
+ * - 0 -- one packet was read and processed
+ * - AVERROR(EAGAIN) -- no packets were available for selected file,
+ *   this function should be called again
+ * - AVERROR_EOF -- this function should not be called again
+ */
+static int process_input(void)
+{
+    InputFile *ifile;
+    AVFormatContext *is;
+    InputStream *ist;
+    AVPacket pkt;
+    int ret, i, j;
+
+    /* select the stream that we must read now */
+    ifile = select_input_file();
+    /* if none, if is finished */
+    if (!ifile) {
+        if (got_eagain()) {
+            reset_eagain();
+            av_usleep(10000);
+            return AVERROR(EAGAIN);
+        }
+        av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
+        return AVERROR_EOF;
+    }
+
+    is  = ifile->ctx;
+    ret = get_input_packet(ifile, &pkt);
+
+    if (ret == AVERROR(EAGAIN)) {
+        ifile->eagain = 1;
+        return ret;
+    }
+    if (ret < 0) {
+        if (ret != AVERROR_EOF) {
+            print_error(is->filename, ret);
+            if (exit_on_error)
+                exit_program(1);
+        }
+        ifile->eof_reached = 1;
+
+        for (i = 0; i < ifile->nb_streams; i++) {
+            ist = input_streams[ifile->ist_index + i];
+            if (ist->decoding_needed)
+                output_packet(ist, NULL);
+
+            /* mark all outputs that don't go through lavfi as finished */
+            for (j = 0; j < nb_output_streams; j++) {
+                OutputStream *ost = output_streams[j];
+
+                if (ost->source_index == ifile->ist_index + i &&
+                    (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
+                    ost->finished= 1;
+            }
+        }
+
+        return AVERROR(EAGAIN);
+    }
+
+    reset_eagain();
+
+    if (do_pkt_dump) {
+        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
+                         is->streams[pkt.stream_index]);
+    }
+    /* the following test is needed in case new streams appear
+       dynamically in stream : we ignore them */
+    if (pkt.stream_index >= ifile->nb_streams)
+        goto discard_packet;
+
+    ist = input_streams[ifile->ist_index + pkt.stream_index];
+
+    ist->data_size += pkt.size;
+    ist->nb_packets++;
+
+    if (ist->discard)
+        goto discard_packet;
+
+    /* add the stream-global side data to the first packet */
+    if (ist->nb_packets == 1)
+        for (i = 0; i < ist->st->nb_side_data; i++) {
+            AVPacketSideData *src_sd = &ist->st->side_data[i];
+            uint8_t *dst_data;
+
+            if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
+                continue;
+
+            dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
+            if (!dst_data)
+                exit_program(1);
+
+            memcpy(dst_data, src_sd->data, src_sd->size);
+        }
+
+    if (pkt.dts != AV_NOPTS_VALUE)
+        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+    if (pkt.pts != AV_NOPTS_VALUE)
+        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+
+    if (pkt.pts != AV_NOPTS_VALUE)
+        pkt.pts *= ist->ts_scale;
+    if (pkt.dts != AV_NOPTS_VALUE)
+        pkt.dts *= ist->ts_scale;
+
+    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
+        (is->iformat->flags & AVFMT_TS_DISCONT)) {
+        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+        int64_t delta   = pkt_dts - ist->next_dts;
+
+        if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
+            ifile->ts_offset -= delta;
+            av_log(NULL, AV_LOG_DEBUG,
+                   "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+                   delta, ifile->ts_offset);
+            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+            if (pkt.pts != AV_NOPTS_VALUE)
+                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+        }
+    }
+
+    ret = output_packet(ist, &pkt);
+    if (ret < 0) {
+        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
+               ist->file_index, ist->st->index);
+        if (exit_on_error)
+            exit_program(1);
+    }
+
+discard_packet:
+    av_free_packet(&pkt);
+
+    return 0;
+}
+
 /*
  * The following code is the main loop of the file converter
  */
 static int transcode(void)
 {
-    int ret, i;
-    AVFormatContext *is, *os;
+    int ret, i, need_input = 1;
+    AVFormatContext *os;
     OutputStream *ost;
     InputStream *ist;
-    uint8_t *no_packet;
-    int no_packet_count = 0;
     int64_t timer_start;
 
-    if (!(no_packet = av_mallocz(nb_input_files)))
-        exit_program(1);
-
     ret = transcode_init();
     if (ret < 0)
         goto fail;
@@ -2612,116 +2417,29 @@ static int transcode(void)
         goto fail;
 #endif
 
-    for (; received_sigterm == 0;) {
-        int file_index, ist_index;
-        AVPacket pkt;
-
+    while (!received_sigterm) {
         /* check if there's any stream where output is still needed */
         if (!need_output()) {
             av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
             break;
         }
 
-        /* select the stream that we must read now */
-        file_index = select_input_file(no_packet);
-        /* if none, if is finished */
-        if (file_index < 0) {
-            if (no_packet_count) {
-                no_packet_count = 0;
-                memset(no_packet, 0, nb_input_files);
-                av_usleep(10000);
-                continue;
-            }
-            av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
-            break;
+        /* read and process one input packet if needed */
+        if (need_input) {
+            ret = process_input();
+            if (ret == AVERROR_EOF)
+                need_input = 0;
         }
 
-        is  = input_files[file_index]->ctx;
-        ret = get_input_packet(input_files[file_index], &pkt);
-
-        if (ret == AVERROR(EAGAIN)) {
-            no_packet[file_index] = 1;
-            no_packet_count++;
-            continue;
-        }
+        ret = poll_filters();
         if (ret < 0) {
-            if (ret != AVERROR_EOF) {
-                print_error(is->filename, ret);
-                if (exit_on_error)
-                    exit_program(1);
-            }
-            input_files[file_index]->eof_reached = 1;
-
-            for (i = 0; i < input_files[file_index]->nb_streams; i++) {
-                ist = input_streams[input_files[file_index]->ist_index + i];
-                if (ist->decoding_needed)
-                    output_packet(ist, NULL);
-            }
-
-            if (opt_shortest)
-                break;
-            else
+            if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                 continue;
-        }
-
-        no_packet_count = 0;
-        memset(no_packet, 0, nb_input_files);
-
-        if (do_pkt_dump) {
-            av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
-                             is->streams[pkt.stream_index]);
-        }
-        /* the following test is needed in case new streams appear
-           dynamically in stream : we ignore them */
-        if (pkt.stream_index >= input_files[file_index]->nb_streams)
-            goto discard_packet;
-        ist_index = input_files[file_index]->ist_index + pkt.stream_index;
-        ist = input_streams[ist_index];
-        if (ist->discard)
-            goto discard_packet;
-
-        if (pkt.dts != AV_NOPTS_VALUE)
-            pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
-        if (pkt.pts != AV_NOPTS_VALUE)
-            pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
-
-        if (pkt.pts != AV_NOPTS_VALUE)
-            pkt.pts *= ist->ts_scale;
-        if (pkt.dts != AV_NOPTS_VALUE)
-            pkt.dts *= ist->ts_scale;
-
-        //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
-        //        ist->next_dts,
-        //        pkt.dts, input_files[ist->file_index].ts_offset,
-        //        ist->st->codec->codec_type);
-        if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
-            && (is->iformat->flags & AVFMT_TS_DISCONT)) {
-            int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
-            int64_t delta   = pkt_dts - ist->next_dts;
-            if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
-                input_files[ist->file_index]->ts_offset -= delta;
-                av_log(NULL, AV_LOG_DEBUG,
-                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
-                       delta, input_files[ist->file_index]->ts_offset);
-                pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
-                if (pkt.pts != AV_NOPTS_VALUE)
-                    pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
-            }
-        }
 
-        // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
-        if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
-            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
-                   ist->file_index, ist->st->index);
-            if (exit_on_error)
-                exit_program(1);
-            av_free_packet(&pkt);
-            continue;
+            av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
+            break;
         }
 
-    discard_packet:
-        av_free_packet(&pkt);
-
         /* dump report by using the output first video and audio streams */
         print_report(0, timer_start);
     }
@@ -2764,6 +2482,8 @@ static int transcode(void)
         ist = input_streams[i];
         if (ist->decoding_needed) {
             avcodec_close(ist->st->codec);
+            if (ist->hwaccel_uninit)
+                ist->hwaccel_uninit(ist->st->codec);
         }
     }
 
@@ -2771,7 +2491,6 @@ static int transcode(void)
     ret = 0;
 
  fail:
-    av_freep(&no_packet);
 #if HAVE_PTHREADS
     free_input_threads();
 #endif
@@ -2789,6 +2508,7 @@ static int transcode(void)
                 av_freep(&ost->st->codec->subtitle_header);
                 av_free(ost->forced_kf_pts);
                 av_dict_free(&ost->opts);
+                av_dict_free(&ost->resample_opts);
             }
         }
     }
@@ -2831,19 +2551,12 @@ static int64_t getmaxrss(void)
 #endif
 }
 
-static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
-{
-    int idx = locate_option(argc, argv, options, "cpuflags");
-    if (idx && argv[idx + 1])
-        opt_cpuflags("cpuflags", argv[idx + 1]);
-}
-
 int main(int argc, char **argv)
 {
-    OptionsContext o = { 0 };
+    int ret;
     int64_t ti;
 
-    reset_options(&o);
+    register_exit(avconv_cleanup);
 
     av_log_set_flags(AV_LOG_SKIP_REPEATED);
     parse_loglevel(argc, argv, options);
@@ -2858,10 +2571,10 @@ int main(int argc, char **argv)
 
     show_banner();
 
-    parse_cpuflags(argc, argv, options);
-
-    /* parse options */
-    parse_options(&o, argc, argv, options, opt_output_file);
+    /* parse options and open all input/output files */
+    ret = avconv_parse_options(argc, argv);
+    if (ret < 0)
+        exit_program(1);
 
     if (nb_output_files <= 0 && nb_input_files == 0) {
         show_usage();
@@ -2875,11 +2588,6 @@ int main(int argc, char **argv)
         exit_program(1);
     }
 
-    if (nb_input_files == 0) {
-        av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
-        exit_program(1);
-    }
-
     ti = getutime();
     if (transcode() < 0)
         exit_program(1);