]> git.sesse.net Git - ffmpeg/blobdiff - avconv.c
configure: MMAL-related decoders should depend on, not select, mmal
[ffmpeg] / avconv.c
index 3b61ee26c6cc49030f103f07020a6ba2af084518..5c31332812b743b7f3907808c4b0ab06348d98d2 100644 (file)
--- a/avconv.c
+++ b/avconv.c
@@ -38,6 +38,7 @@
 #include "libavutil/parseutils.h"
 #include "libavutil/samplefmt.h"
 #include "libavutil/fifo.h"
+#include "libavutil/hwcontext.h"
 #include "libavutil/internal.h"
 #include "libavutil/intreadwrite.h"
 #include "libavutil/dict.h"
@@ -87,7 +88,7 @@ static FILE *vstats_file;
 
 static int nb_frames_drop = 0;
 
-
+static int want_sdp = 1;
 
 #if HAVE_PTHREADS
 /* signal to input threads that they should exit; set by the main thread */
@@ -147,12 +148,23 @@ static void avconv_cleanup(int ret)
         FilterGraph *fg = filtergraphs[i];
         avfilter_graph_free(&fg->graph);
         for (j = 0; j < fg->nb_inputs; j++) {
+            while (av_fifo_size(fg->inputs[j]->frame_queue)) {
+                AVFrame *frame;
+                av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
+                                     sizeof(frame), NULL);
+                av_frame_free(&frame);
+            }
+            av_fifo_free(fg->inputs[j]->frame_queue);
+            av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
             av_freep(&fg->inputs[j]->name);
             av_freep(&fg->inputs[j]);
         }
         av_freep(&fg->inputs);
         for (j = 0; j < fg->nb_outputs; j++) {
             av_freep(&fg->outputs[j]->name);
+            av_freep(&fg->outputs[j]->formats);
+            av_freep(&fg->outputs[j]->channel_layouts);
+            av_freep(&fg->outputs[j]->sample_rates);
             av_freep(&fg->outputs[j]);
         }
         av_freep(&fg->outputs);
@@ -179,11 +191,11 @@ static void avconv_cleanup(int ret)
         for (j = 0; j < ost->nb_bitstream_filters; j++)
             av_bsf_free(&ost->bsf_ctx[j]);
         av_freep(&ost->bsf_ctx);
-        av_freep(&ost->bitstream_filters);
 
         av_frame_free(&ost->filtered_frame);
 
         av_parser_close(ost->parser);
+        avcodec_free_context(&ost->parser_avctx);
 
         av_freep(&ost->forced_keyframes);
         av_freep(&ost->avfilter);
@@ -191,6 +203,14 @@ static void avconv_cleanup(int ret)
 
         avcodec_free_context(&ost->enc_ctx);
 
+        if (ost->muxing_queue) {
+            while (av_fifo_size(ost->muxing_queue)) {
+                AVPacket pkt;
+                av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
+                av_packet_unref(&pkt);
+            }
+            av_fifo_free(ost->muxing_queue);
+        }
         av_freep(&output_streams[i]);
     }
     for (i = 0; i < nb_input_files; i++) {
@@ -254,11 +274,33 @@ static void abort_codec_experimental(AVCodec *c, int encoder)
     exit_program(1);
 }
 
-static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
 {
+    AVFormatContext *s = of->ctx;
     AVStream *st = ost->st;
     int ret;
 
+    if (!of->header_written) {
+        AVPacket tmp_pkt;
+        /* the muxer is not initialized yet, buffer the packet */
+        if (!av_fifo_space(ost->muxing_queue)) {
+            int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
+                                 ost->max_muxing_queue_size);
+            if (new_size <= av_fifo_size(ost->muxing_queue)) {
+                av_log(NULL, AV_LOG_ERROR,
+                       "Too many packets buffered for output stream %d:%d.\n",
+                       ost->file_index, ost->st->index);
+                exit_program(1);
+            }
+            ret = av_fifo_realloc2(ost->muxing_queue, new_size);
+            if (ret < 0)
+                exit_program(1);
+        }
+        av_packet_move_ref(&tmp_pkt, pkt);
+        av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
+        return;
+    }
+
     /*
      * Audio encoders may split the packets --  #frames in != #packets out.
      * But there is no reordering, so we can limit the number of output packets
@@ -280,7 +322,7 @@ static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
 
         if (ost->frame_rate.num) {
             pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
-                                         ost->st->time_base);
+                                         ost->mux_timebase);
         }
     }
 
@@ -307,6 +349,8 @@ static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
     ost->packets_written++;
 
     pkt->stream_index = ost->index;
+    av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
+
     ret = av_interleaved_write_frame(s, pkt);
     if (ret < 0) {
         print_error("av_interleaved_write_frame()", ret);
@@ -314,7 +358,7 @@ static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
     }
 }
 
-static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
 {
     int ret = 0;
 
@@ -344,10 +388,10 @@ static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
                     goto finish;
                 idx++;
             } else
-                write_packet(s, pkt, ost);
+                write_packet(of, pkt, ost);
         }
     } else
-        write_packet(s, pkt, ost);
+        write_packet(of, pkt, ost);
 
 finish:
     if (ret < 0 && ret != AVERROR_EOF) {
@@ -370,7 +414,7 @@ static int check_recording_time(OutputStream *ost)
     return 1;
 }
 
-static void do_audio_out(AVFormatContext *s, OutputStream *ost,
+static void do_audio_out(OutputFile *of, OutputStream *ost,
                          AVFrame *frame)
 {
     AVCodecContext *enc = ost->enc_ctx;
@@ -399,8 +443,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
         if (ret < 0)
             goto error;
 
-        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
-        output_packet(s, &pkt, ost);
+        output_packet(of, &pkt, ost);
     }
 
     return;
@@ -409,7 +452,7 @@ error:
     exit_program(1);
 }
 
-static void do_subtitle_out(AVFormatContext *s,
+static void do_subtitle_out(OutputFile *of,
                             OutputStream *ost,
                             InputStream *ist,
                             AVSubtitle *sub,
@@ -465,7 +508,7 @@ static void do_subtitle_out(AVFormatContext *s,
         av_init_packet(&pkt);
         pkt.data = subtitle_out;
         pkt.size = subtitle_out_size;
-        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
             /* XXX: the pts correction is handled here. Maybe handling
                it in the codec would be better */
@@ -474,11 +517,11 @@ static void do_subtitle_out(AVFormatContext *s,
             else
                 pkt.pts += 90 * sub->end_display_time;
         }
-        output_packet(s, &pkt, ost);
+        output_packet(of, &pkt, ost);
     }
 }
 
-static void do_video_out(AVFormatContext *s,
+static void do_video_out(OutputFile *of,
                          OutputStream *ost,
                          AVFrame *in_picture,
                          int *frame_size)
@@ -491,8 +534,8 @@ static void do_video_out(AVFormatContext *s,
 
     format_video_sync = video_sync_method;
     if (format_video_sync == VSYNC_AUTO)
-        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
-                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
+        format_video_sync = (of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
+                            (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
     if (format_video_sync != VSYNC_PASSTHROUGH &&
         ost->frame_number &&
         in_picture->pts != AV_NOPTS_VALUE &&
@@ -550,8 +593,7 @@ static void do_video_out(AVFormatContext *s,
         if (ret < 0)
             goto error;
 
-        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
-        output_packet(s, &pkt, ost);
+        output_packet(of, &pkt, ost);
         *frame_size = pkt.size;
 
         /* if two pass, output log */
@@ -569,10 +611,12 @@ error:
     exit_program(1);
 }
 
+#if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
 static double psnr(double d)
 {
     return -10.0 * log(d) / log(10.0);
 }
+#endif
 
 static void do_video_stats(OutputStream *ost, int frame_size)
 {
@@ -595,7 +639,7 @@ static void do_video_stats(OutputStream *ost, int frame_size)
         fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
                 ost->quality / (float)FF_QP2LAMBDA);
 
-#if FF_API_CODED_FRAME
+#if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
 FF_DISABLE_DEPRECATION_WARNINGS
         if (enc->flags & AV_CODEC_FLAG_PSNR)
             fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
@@ -620,6 +664,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
     }
 }
 
+static int init_output_stream(OutputStream *ost, char *error, int error_len);
+
 /*
  * Read one frame for lavfi output for ost and encode it.
  */
@@ -634,6 +680,16 @@ static int poll_filter(OutputStream *ost)
     }
     filtered_frame = ost->filtered_frame;
 
+    if (!ost->initialized) {
+        char error[1024];
+        ret = init_output_stream(ost, error, sizeof(error));
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
+                   ost->file_index, ost->index, error);
+            exit_program(1);
+        }
+    }
+
     if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
         !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
         ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
@@ -659,12 +715,12 @@ static int poll_filter(OutputStream *ost)
         if (!ost->frame_aspect_ratio)
             ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
 
-        do_video_out(of->ctx, ost, filtered_frame, &frame_size);
+        do_video_out(of, ost, filtered_frame, &frame_size);
         if (vstats_filename && frame_size)
             do_video_stats(ost, frame_size);
         break;
     case AVMEDIA_TYPE_AUDIO:
-        do_audio_out(of->ctx, ost, filtered_frame);
+        do_audio_out(of, ost, filtered_frame);
         break;
     default:
         // TODO support subtitle filters
@@ -709,7 +765,17 @@ static int poll_filters(void)
         for (i = 0; i < nb_output_streams; i++) {
             int64_t pts = output_streams[i]->sync_opts;
 
-            if (!output_streams[i]->filter || output_streams[i]->finished)
+            if (output_streams[i]->filter && !output_streams[i]->filter->graph->graph &&
+                !output_streams[i]->filter->graph->nb_inputs) {
+                ret = configure_filtergraph(output_streams[i]->filter->graph);
+                if (ret < 0) {
+                    av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
+                    return ret;
+                }
+            }
+
+            if (!output_streams[i]->filter || output_streams[i]->finished ||
+                !output_streams[i]->filter->graph->graph)
                 continue;
 
             pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
@@ -910,7 +976,7 @@ static void print_report(int is_last_report, int64_t timer_start)
                     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
             }
 
-#if FF_API_CODED_FRAME
+#if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
 FF_DISABLE_DEPRECATION_WARNINGS
             if (enc->flags & AV_CODEC_FLAG_PSNR) {
                 int j;
@@ -939,7 +1005,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
             vid = 1;
         }
         /* compute min output value */
-        pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
+        pts = (double)ost->last_mux_dts * av_q2d(ost->mux_timebase);
         if ((pts < ti1) && (pts > 0))
             ti1 = pts;
     }
@@ -972,7 +1038,7 @@ static void flush_encoders(void)
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream   *ost = output_streams[i];
         AVCodecContext *enc = ost->enc_ctx;
-        AVFormatContext *os = output_files[ost->file_index]->ctx;
+        OutputFile      *of = output_files[ost->file_index];
         int stop_encoding = 0;
 
         if (!ost->encoding_needed)
@@ -1018,8 +1084,7 @@ static void flush_encoders(void)
                     stop_encoding = 1;
                     break;
                 }
-                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
-                output_packet(os, &pkt, ost);
+                output_packet(of, &pkt, ost);
             }
 
             if (stop_encoding)
@@ -1050,7 +1115,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
     OutputFile *of = output_files[ost->file_index];
     InputFile   *f = input_files [ist->file_index];
     int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
-    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
+    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
     AVPacket opkt;
 
     av_init_packet(&opkt);
@@ -1080,17 +1145,17 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         ost->sync_opts++;
 
     if (pkt->pts != AV_NOPTS_VALUE)
-        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
     else
         opkt.pts = AV_NOPTS_VALUE;
 
     if (pkt->dts == AV_NOPTS_VALUE)
-        opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
+        opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->mux_timebase);
     else
-        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
     opkt.dts -= ost_tb_start_time;
 
-    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
     opkt.flags    = pkt->flags;
 
     // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
@@ -1099,7 +1164,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
        && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
        && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
        ) {
-        if (av_parser_change(ost->parser, ost->st->codec,
+        if (av_parser_change(ost->parser, ost->parser_avctx,
                              &opkt.data, &opkt.size,
                              pkt->data, pkt->size,
                              pkt->flags & AV_PKT_FLAG_KEY)) {
@@ -1112,7 +1177,107 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         opkt.size = pkt->size;
     }
 
-    output_packet(of->ctx, &opkt, ost);
+    output_packet(of, &opkt, ost);
+}
+
+static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
+{
+    FilterGraph *fg = ifilter->graph;
+    int need_reinit, ret, i;
+
+    /* determine if the parameters for this input changed */
+    need_reinit = ifilter->format != frame->format;
+    if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
+        (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
+        need_reinit = 1;
+
+    switch (ifilter->ist->st->codecpar->codec_type) {
+    case AVMEDIA_TYPE_AUDIO:
+        need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
+                       ifilter->channel_layout != frame->channel_layout;
+        break;
+    case AVMEDIA_TYPE_VIDEO:
+        need_reinit |= ifilter->width  != frame->width ||
+                       ifilter->height != frame->height;
+        break;
+    }
+
+    if (need_reinit) {
+        ret = ifilter_parameters_from_frame(ifilter, frame);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* (re)init the graph if possible, otherwise buffer the frame and return */
+    if (need_reinit || !fg->graph) {
+        for (i = 0; i < fg->nb_inputs; i++) {
+            if (fg->inputs[i]->format < 0) {
+                AVFrame *tmp = av_frame_clone(frame);
+                if (!tmp)
+                    return AVERROR(ENOMEM);
+                av_frame_unref(frame);
+
+                if (!av_fifo_space(ifilter->frame_queue)) {
+                    ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
+                    if (ret < 0)
+                        return ret;
+                }
+                av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
+                return 0;
+            }
+        }
+
+        ret = poll_filters();
+        if (ret < 0 && ret != AVERROR_EOF) {
+            char errbuf[128];
+            av_strerror(ret, errbuf, sizeof(errbuf));
+
+            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
+            return ret;
+        }
+
+        ret = configure_filtergraph(fg);
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
+            return ret;
+        }
+    }
+
+    ret = av_buffersrc_add_frame(ifilter->filter, frame);
+    if (ret < 0) {
+        av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
+        return ret;
+    }
+
+    return 0;
+}
+
+static int ifilter_send_eof(InputFilter *ifilter)
+{
+    int i, j, ret;
+
+    ifilter->eof = 1;
+
+    if (ifilter->filter) {
+        ret = av_buffersrc_add_frame(ifilter->filter, NULL);
+        if (ret < 0)
+            return ret;
+    } else {
+        // the filtergraph was never configured
+        FilterGraph *fg = ifilter->graph;
+        for (i = 0; i < fg->nb_inputs; i++)
+            if (!fg->inputs[i]->eof)
+                break;
+        if (i == fg->nb_inputs) {
+            // All the input streams have finished without the filtergraph
+            // ever being configured.
+            // Mark the output streams as finished.
+            for (j = 0; j < fg->nb_outputs; j++)
+                finish_output_stream(fg->outputs[j]->ost);
+        }
+    }
+
+    return 0;
 }
 
 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
@@ -1160,11 +1325,12 @@ int guess_input_channel_layout(InputStream *ist)
     return 1;
 }
 
-static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
+static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
+                        int *decode_failed)
 {
     AVFrame *decoded_frame, *f;
     AVCodecContext *avctx = ist->dec_ctx;
-    int i, ret, err = 0, resample_changed;
+    int i, ret, err = 0;
 
     if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
@@ -1173,6 +1339,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     decoded_frame = ist->decoded_frame;
 
     ret = decode(avctx, decoded_frame, got_output, pkt);
+    if (ret < 0)
+        *decode_failed = 1;
     if (!*got_output || ret < 0)
         return ret;
 
@@ -1182,52 +1350,11 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     /* if the decoder provides a pts, use it instead of the last packet pts.
        the decoder could be delaying output by a packet or more. */
     if (decoded_frame->pts != AV_NOPTS_VALUE)
-        ist->next_dts = decoded_frame->pts;
+        ist->next_dts = av_rescale_q(decoded_frame->pts, ist->st->time_base, AV_TIME_BASE_Q);
     else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
         decoded_frame->pts = pkt->pts;
     }
 
-    resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
-                       ist->resample_channels       != avctx->channels               ||
-                       ist->resample_channel_layout != decoded_frame->channel_layout ||
-                       ist->resample_sample_rate    != decoded_frame->sample_rate;
-    if (resample_changed) {
-        char layout1[64], layout2[64];
-
-        if (!guess_input_channel_layout(ist)) {
-            av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
-                   "layout for Input Stream #%d.%d\n", ist->file_index,
-                   ist->st->index);
-            exit_program(1);
-        }
-        decoded_frame->channel_layout = avctx->channel_layout;
-
-        av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
-                                     ist->resample_channel_layout);
-        av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
-                                     decoded_frame->channel_layout);
-
-        av_log(NULL, AV_LOG_INFO,
-               "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
-               ist->file_index, ist->st->index,
-               ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
-               ist->resample_channels, layout1,
-               decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
-               avctx->channels, layout2);
-
-        ist->resample_sample_fmt     = decoded_frame->format;
-        ist->resample_sample_rate    = decoded_frame->sample_rate;
-        ist->resample_channel_layout = decoded_frame->channel_layout;
-        ist->resample_channels       = avctx->channels;
-
-        for (i = 0; i < nb_filtergraphs; i++)
-            if (ist_in_filtergraph(filtergraphs[i], ist) &&
-                configure_filtergraph(filtergraphs[i]) < 0) {
-                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
-                exit_program(1);
-            }
-    }
-
     if (decoded_frame->pts != AV_NOPTS_VALUE)
         decoded_frame->pts = av_rescale_q(decoded_frame->pts,
                                           ist->st->time_base,
@@ -1242,7 +1369,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         } else
             f = decoded_frame;
 
-        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
+        err = ifilter_send_frame(ist->filters[i], f);
         if (err < 0)
             break;
     }
@@ -1252,10 +1379,11 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     return err < 0 ? err : ret;
 }
 
-static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
+static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output,
+                        int *decode_failed)
 {
     AVFrame *decoded_frame, *f;
-    int i, ret = 0, err = 0, resample_changed;
+    int i, ret = 0, err = 0;
 
     if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
@@ -1264,6 +1392,8 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     decoded_frame = ist->decoded_frame;
 
     ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
+    if (ret < 0)
+        *decode_failed = 1;
     if (!*got_output || ret < 0)
         return ret;
 
@@ -1276,42 +1406,14 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     }
     ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
 
-    decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
+    decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pts,
                                            decoded_frame->pkt_dts);
+    if (ist->framerate.num)
+        decoded_frame->pts = ist->cfr_next_pts++;
 
     if (ist->st->sample_aspect_ratio.num)
         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
 
-    resample_changed = ist->resample_width   != decoded_frame->width  ||
-                       ist->resample_height  != decoded_frame->height ||
-                       ist->resample_pix_fmt != decoded_frame->format;
-    if (resample_changed) {
-        av_log(NULL, AV_LOG_INFO,
-               "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
-               ist->file_index, ist->st->index,
-               ist->resample_width,  ist->resample_height,  av_get_pix_fmt_name(ist->resample_pix_fmt),
-               decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
-
-        ret = poll_filters();
-        if (ret < 0 && ret != AVERROR_EOF) {
-            char errbuf[128];
-            av_strerror(ret, errbuf, sizeof(errbuf));
-
-            av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
-        }
-
-        ist->resample_width   = decoded_frame->width;
-        ist->resample_height  = decoded_frame->height;
-        ist->resample_pix_fmt = decoded_frame->format;
-
-        for (i = 0; i < nb_filtergraphs; i++)
-            if (ist_in_filtergraph(filtergraphs[i], ist) &&
-                configure_filtergraph(filtergraphs[i]) < 0) {
-                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
-                exit_program(1);
-            }
-    }
-
     for (i = 0; i < ist->nb_filters; i++) {
         if (i < ist->nb_filters - 1) {
             f = ist->filter_frame;
@@ -1321,7 +1423,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
         } else
             f = decoded_frame;
 
-        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
+        err = ifilter_send_frame(ist->filters[i], f);
         if (err < 0)
             break;
     }
@@ -1332,13 +1434,16 @@ fail:
     return err < 0 ? err : ret;
 }
 
-static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
+static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
+                               int *decode_failed)
 {
     AVSubtitle subtitle;
     int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
                                           &subtitle, got_output, pkt);
-    if (ret < 0)
+    if (ret < 0) {
+        *decode_failed = 1;
         return ret;
+    }
     if (!*got_output)
         return ret;
 
@@ -1350,7 +1455,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
         if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
             continue;
 
-        do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
+        do_subtitle_out(output_files[ost->file_index], ost, ist, &subtitle, pkt->pts);
     }
 
     avsubtitle_free(&subtitle);
@@ -1361,7 +1466,7 @@ static int send_filter_eof(InputStream *ist)
 {
     int i, ret;
     for (i = 0; i < ist->nb_filters; i++) {
-        ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+        ret = ifilter_send_eof(ist->filters[i]);
         if (ret < 0)
             return ret;
     }
@@ -1394,16 +1499,19 @@ static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_e
     while (ist->decoding_needed && (!pkt || avpkt.size > 0)) {
         int ret = 0;
         int got_output = 0;
+        int decode_failed = 0;
 
         if (!repeating)
             ist->last_dts = ist->next_dts;
 
         switch (ist->dec_ctx->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
-            ret = decode_audio    (ist, repeating ? NULL : &avpkt, &got_output);
+            ret = decode_audio    (ist, repeating ? NULL : &avpkt, &got_output,
+                                   &decode_failed);
             break;
         case AVMEDIA_TYPE_VIDEO:
-            ret = decode_video    (ist, repeating ? NULL : &avpkt, &got_output);
+            ret = decode_video    (ist, repeating ? NULL : &avpkt, &got_output,
+                                   &decode_failed);
             if (repeating && !got_output)
                 ;
             else if (pkt && pkt->duration)
@@ -1420,16 +1528,21 @@ static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_e
         case AVMEDIA_TYPE_SUBTITLE:
             if (repeating)
                 break;
-            ret = transcode_subtitles(ist, &avpkt, &got_output);
+            ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
             break;
         default:
             return;
         }
 
         if (ret < 0) {
-            av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
-                   ist->file_index, ist->st->index);
-            if (exit_on_error)
+            if (decode_failed) {
+                av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
+                       ist->file_index, ist->st->index);
+            } else {
+                av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
+                       "data for stream #%d:%d\n", ist->file_index, ist->st->index);
+            }
+            if (!decode_failed || exit_on_error)
                 exit_program(1);
             break;
         }
@@ -1484,8 +1597,14 @@ static void print_sdp(void)
 {
     char sdp[16384];
     int i;
-    AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
+    AVFormatContext **avc;
+
+    for (i = 0; i < nb_output_files; i++) {
+        if (!output_files[i]->header_written)
+            return;
+    }
 
+    avc = av_malloc(sizeof(*avc) * nb_output_files);
     if (!avc)
         exit_program(1);
     for (i = 0; i < nb_output_files; i++)
@@ -1536,6 +1655,13 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat
             }
             continue;
         }
+
+        if (ist->hw_frames_ctx) {
+            s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
+            if (!s->hw_frames_ctx)
+                return AV_PIX_FMT_NONE;
+        }
+
         ist->active_hwaccel_id = hwaccel->id;
         ist->hwaccel_pix_fmt   = *p;
         break;
@@ -1558,6 +1684,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
 {
     int ret;
     InputStream *ist = input_streams[ist_index];
+
     if (ist->decoding_needed) {
         AVCodec *codec = ist->dec;
         if (!codec) {
@@ -1615,6 +1742,53 @@ static InputStream *get_input_stream(OutputStream *ost)
     return NULL;
 }
 
+/* open the muxer when all the streams are initialized */
+static int check_init_output_file(OutputFile *of, int file_index)
+{
+    int ret, i;
+
+    for (i = 0; i < of->ctx->nb_streams; i++) {
+        OutputStream *ost = output_streams[of->ost_index + i];
+        if (!ost->initialized)
+            return 0;
+    }
+
+    of->ctx->interrupt_callback = int_cb;
+
+    ret = avformat_write_header(of->ctx, &of->opts);
+    if (ret < 0) {
+        char errbuf[128];
+
+        av_strerror(ret, errbuf, sizeof(errbuf));
+
+        av_log(NULL, AV_LOG_ERROR,
+               "Could not write header for output file #%d "
+               "(incorrect codec parameters ?): %s",
+               file_index, errbuf);
+        return ret;
+    }
+    assert_avoptions(of->opts);
+    of->header_written = 1;
+
+    av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
+
+    if (want_sdp)
+        print_sdp();
+
+    /* flush the muxing queues */
+    for (i = 0; i < of->ctx->nb_streams; i++) {
+        OutputStream *ost = output_streams[of->ost_index + i];
+
+        while (av_fifo_size(ost->muxing_queue)) {
+            AVPacket pkt;
+            av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
+            write_packet(of, &pkt, ost);
+        }
+    }
+
+    return 0;
+}
+
 static int init_output_bsfs(OutputStream *ost)
 {
     AVBSFContext *ctx;
@@ -1623,17 +1797,8 @@ static int init_output_bsfs(OutputStream *ost)
     if (!ost->nb_bitstream_filters)
         return 0;
 
-    ost->bsf_ctx = av_mallocz_array(ost->nb_bitstream_filters, sizeof(*ost->bsf_ctx));
-    if (!ost->bsf_ctx)
-        return AVERROR(ENOMEM);
-
     for (i = 0; i < ost->nb_bitstream_filters; i++) {
-        ret = av_bsf_alloc(ost->bitstream_filters[i], &ctx);
-        if (ret < 0) {
-            av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n");
-            return ret;
-        }
-        ost->bsf_ctx[i] = ctx;
+        ctx = ost->bsf_ctx[i];
 
         ret = avcodec_parameters_copy(ctx->par_in,
                                       i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
@@ -1645,12 +1810,11 @@ static int init_output_bsfs(OutputStream *ost)
         ret = av_bsf_init(ctx);
         if (ret < 0) {
             av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
-                   ost->bitstream_filters[i]->name);
+                   ctx->filter->name);
             return ret;
         }
     }
 
-    ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
     ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
     if (ret < 0)
         return ret;
@@ -1660,6 +1824,192 @@ static int init_output_bsfs(OutputStream *ost)
     return 0;
 }
 
+static int init_output_stream_streamcopy(OutputStream *ost)
+{
+    OutputFile *of = output_files[ost->file_index];
+    InputStream *ist = get_input_stream(ost);
+    AVCodecParameters *par_dst = ost->st->codecpar;
+    AVCodecParameters *par_src = ist->st->codecpar;
+    AVRational sar;
+    uint32_t codec_tag = par_dst->codec_tag;
+    int i, ret;
+
+    if (!codec_tag) {
+        if (!of->ctx->oformat->codec_tag ||
+             av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
+             av_codec_get_tag(of->ctx->oformat->codec_tag, par_src->codec_id) <= 0)
+            codec_tag = par_src->codec_tag;
+    }
+
+    ret = avcodec_parameters_copy(par_dst, par_src);
+    if (ret < 0)
+        return ret;
+
+    par_dst->codec_tag = codec_tag;
+
+    ost->st->disposition = ist->st->disposition;
+
+    ost->st->time_base = ist->st->time_base;
+
+    if (ist->st->nb_side_data) {
+        ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
+                                              sizeof(*ist->st->side_data));
+        if (!ost->st->side_data)
+            return AVERROR(ENOMEM);
+
+        for (i = 0; i < ist->st->nb_side_data; i++) {
+            const AVPacketSideData *sd_src = &ist->st->side_data[i];
+            AVPacketSideData *sd_dst = &ost->st->side_data[i];
+
+            sd_dst->data = av_malloc(sd_src->size);
+            if (!sd_dst->data)
+                return AVERROR(ENOMEM);
+            memcpy(sd_dst->data, sd_src->data, sd_src->size);
+            sd_dst->size = sd_src->size;
+            sd_dst->type = sd_src->type;
+            ost->st->nb_side_data++;
+        }
+    }
+
+    ost->parser = av_parser_init(par_dst->codec_id);
+    ost->parser_avctx = avcodec_alloc_context3(NULL);
+    if (!ost->parser_avctx)
+        return AVERROR(ENOMEM);
+
+    if (par_dst->codec_type == AVMEDIA_TYPE_VIDEO) {
+        if (ost->frame_aspect_ratio)
+            sar = av_d2q(ost->frame_aspect_ratio * par_dst->height / par_dst->width, 255);
+        else if (ist->st->sample_aspect_ratio.num)
+            sar = ist->st->sample_aspect_ratio;
+        else
+            sar = par_src->sample_aspect_ratio;
+        ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
+    }
+
+    return 0;
+}
+
+static void set_encoder_id(OutputFile *of, OutputStream *ost)
+{
+    AVDictionaryEntry *e;
+
+    uint8_t *encoder_string;
+    int encoder_string_len;
+    int format_flags = 0;
+
+    e = av_dict_get(of->opts, "fflags", NULL, 0);
+    if (e) {
+        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
+        if (!o)
+            return;
+        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
+    }
+
+    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
+    encoder_string     = av_mallocz(encoder_string_len);
+    if (!encoder_string)
+        exit_program(1);
+
+    if (!(format_flags & AVFMT_FLAG_BITEXACT))
+        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
+    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
+    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
+                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
+}
+
+static void parse_forced_key_frames(char *kf, OutputStream *ost,
+                                    AVCodecContext *avctx)
+{
+    char *p;
+    int n = 1, i;
+    int64_t t;
+
+    for (p = kf; *p; p++)
+        if (*p == ',')
+            n++;
+    ost->forced_kf_count = n;
+    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
+    if (!ost->forced_kf_pts) {
+        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
+        exit_program(1);
+    }
+
+    p = kf;
+    for (i = 0; i < n; i++) {
+        char *next = strchr(p, ',');
+
+        if (next)
+            *next++ = 0;
+
+        t = parse_time_or_die("force_key_frames", p, 1);
+        ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+        p = next;
+    }
+}
+
+static int init_output_stream_encode(OutputStream *ost)
+{
+    InputStream *ist = get_input_stream(ost);
+    AVCodecContext *enc_ctx = ost->enc_ctx;
+    AVCodecContext *dec_ctx = NULL;
+
+    set_encoder_id(output_files[ost->file_index], ost);
+
+    if (ist) {
+        ost->st->disposition = ist->st->disposition;
+
+        dec_ctx = ist->dec_ctx;
+
+        enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
+        enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
+    }
+
+    switch (enc_ctx->codec_type) {
+    case AVMEDIA_TYPE_AUDIO:
+        enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
+        enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
+        enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
+        enc_ctx->channels       = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
+        enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
+        break;
+    case AVMEDIA_TYPE_VIDEO:
+        enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
+
+        enc_ctx->width  = ost->filter->filter->inputs[0]->w;
+        enc_ctx->height = ost->filter->filter->inputs[0]->h;
+        enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+            ost->frame_aspect_ratio ? // overridden by the -aspect cli option
+            av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
+            ost->filter->filter->inputs[0]->sample_aspect_ratio;
+        enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
+
+        enc_ctx->framerate = ost->frame_rate;
+
+        ost->st->avg_frame_rate = ost->frame_rate;
+
+        if (dec_ctx &&
+            (enc_ctx->width   != dec_ctx->width  ||
+             enc_ctx->height  != dec_ctx->height ||
+             enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
+            enc_ctx->bits_per_raw_sample = 0;
+        }
+
+        if (ost->forced_keyframes)
+            parse_forced_key_frames(ost->forced_keyframes, ost,
+                                    ost->enc_ctx);
+        break;
+    case AVMEDIA_TYPE_SUBTITLE:
+        enc_ctx->time_base = (AVRational){1, 1000};
+        break;
+    default:
+        abort();
+        break;
+    }
+
+    return 0;
+}
+
 static int init_output_stream(OutputStream *ost, char *error, int error_len)
 {
     int ret = 0;
@@ -1669,6 +2019,10 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
         AVCodecContext *dec = NULL;
         InputStream *ist;
 
+        ret = init_output_stream_encode(ost);
+        if (ret < 0)
+            return ret;
+
         if ((ist = get_input_stream(ost)))
             dec = ist->dec_ctx;
         if (dec && dec->subtitle_header) {
@@ -1681,7 +2035,9 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
         if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
             av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
 
-        if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
+        if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx &&
+            ((AVHWFramesContext*)ost->filter->filter->inputs[0]->hw_frames_ctx->data)->format ==
+            ost->filter->filter->inputs[0]->format) {
             ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
             if (!ost->enc_ctx->hw_frames_ctx)
                 return AVERROR(ENOMEM);
@@ -1707,14 +2063,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
                    "Error initializing the output stream codec context.\n");
             exit_program(1);
         }
-        /*
-         * FIXME: this is only so that the bitstream filters and parsers (that still
-         * work with a codec context) get the parameter values.
-         * This should go away with the new BSF/parser API.
-         */
-        ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
-        if (ret < 0)
-            return ret;
 
         if (ost->enc_ctx->nb_coded_side_data) {
             int i;
@@ -1739,17 +2087,16 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
         }
 
         ost->st->time_base = ost->enc_ctx->time_base;
-    } else {
-        ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
+    } else if (ost->stream_copy) {
+        ret = init_output_stream_streamcopy(ost);
         if (ret < 0)
             return ret;
 
         /*
-         * FIXME: this is only so that the bitstream filters and parsers (that still
-         * work with a codec context) get the parameter values.
-         * This should go away with the new BSF/parser API.
+         * FIXME: will the codec context used by the parser during streamcopy
+         * This should go away with the new parser API.
          */
-        ret = avcodec_parameters_to_context(ost->st->codec, ost->st->codecpar);
+        ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
         if (ret < 0)
             return ret;
     }
@@ -1761,76 +2108,23 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
     if (ret < 0)
         return ret;
 
-    return ret;
-}
-
-static void parse_forced_key_frames(char *kf, OutputStream *ost,
-                                    AVCodecContext *avctx)
-{
-    char *p;
-    int n = 1, i;
-    int64_t t;
-
-    for (p = kf; *p; p++)
-        if (*p == ',')
-            n++;
-    ost->forced_kf_count = n;
-    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
-    if (!ost->forced_kf_pts) {
-        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
-        exit_program(1);
-    }
-
-    p = kf;
-    for (i = 0; i < n; i++) {
-        char *next = strchr(p, ',');
-
-        if (next)
-            *next++ = 0;
-
-        t = parse_time_or_die("force_key_frames", p, 1);
-        ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
-
-        p = next;
-    }
-}
-
-static void set_encoder_id(OutputFile *of, OutputStream *ost)
-{
-    AVDictionaryEntry *e;
-
-    uint8_t *encoder_string;
-    int encoder_string_len;
-    int format_flags = 0;
+    ost->mux_timebase = ost->st->time_base;
 
-    e = av_dict_get(of->opts, "fflags", NULL, 0);
-    if (e) {
-        const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
-        if (!o)
-            return;
-        av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
-    }
+    ost->initialized = 1;
 
-    encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
-    encoder_string     = av_mallocz(encoder_string_len);
-    if (!encoder_string)
-        exit_program(1);
+    ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
+    if (ret < 0)
+        return ret;
 
-    if (!(format_flags & AVFMT_FLAG_BITEXACT))
-        av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
-    av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
-    av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
-                AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
+    return ret;
 }
 
 static int transcode_init(void)
 {
     int ret = 0, i, j, k;
-    AVFormatContext *oc;
     OutputStream *ost;
     InputStream *ist;
     char error[1024];
-    int want_sdp = 1;
 
     /* init framerate emulation */
     for (i = 0; i < nb_input_files; i++) {
@@ -1840,227 +2134,6 @@ static int transcode_init(void)
                 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
     }
 
-    /* for each output stream, we compute the right encoding parameters */
-    for (i = 0; i < nb_output_streams; i++) {
-        ost = output_streams[i];
-        oc  = output_files[ost->file_index]->ctx;
-        ist = get_input_stream(ost);
-
-        if (ost->attachment_filename)
-            continue;
-
-        if (ist) {
-            ost->st->disposition          = ist->st->disposition;
-        }
-
-        if (ost->stream_copy) {
-            AVCodecParameters *par_dst = ost->st->codecpar;
-            AVCodecParameters *par_src = ist->st->codecpar;
-            AVRational sar;
-            uint64_t extra_size;
-
-            av_assert0(ist && !ost->filter);
-
-            extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
-
-            if (extra_size > INT_MAX) {
-                return AVERROR(EINVAL);
-            }
-
-            /* if stream_copy is selected, no need to decode or encode */
-            par_dst->codec_id   = par_src->codec_id;
-            par_dst->codec_type = par_src->codec_type;
-
-            if (!par_dst->codec_tag) {
-                if (!oc->oformat->codec_tag ||
-                     av_codec_get_id (oc->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
-                     av_codec_get_tag(oc->oformat->codec_tag, par_src->codec_id) <= 0)
-                    par_dst->codec_tag = par_src->codec_tag;
-            }
-
-            par_dst->bit_rate        = par_src->bit_rate;
-            par_dst->field_order     = par_src->field_order;
-            par_dst->chroma_location = par_src->chroma_location;
-            par_dst->extradata       = av_mallocz(extra_size);
-            if (!par_dst->extradata) {
-                return AVERROR(ENOMEM);
-            }
-            memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
-            par_dst->extradata_size = par_src->extradata_size;
-
-            ost->st->time_base = ist->st->time_base;
-
-            if (ist->st->nb_side_data) {
-                ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
-                                                      sizeof(*ist->st->side_data));
-                if (!ost->st->side_data)
-                    return AVERROR(ENOMEM);
-
-                for (j = 0; j < ist->st->nb_side_data; j++) {
-                    const AVPacketSideData *sd_src = &ist->st->side_data[j];
-                    AVPacketSideData *sd_dst = &ost->st->side_data[j];
-
-                    sd_dst->data = av_malloc(sd_src->size);
-                    if (!sd_dst->data)
-                        return AVERROR(ENOMEM);
-                    memcpy(sd_dst->data, sd_src->data, sd_src->size);
-                    sd_dst->size = sd_src->size;
-                    sd_dst->type = sd_src->type;
-                    ost->st->nb_side_data++;
-                }
-            }
-
-            ost->parser = av_parser_init(par_dst->codec_id);
-
-            switch (par_dst->codec_type) {
-            case AVMEDIA_TYPE_AUDIO:
-                if (audio_volume != 256) {
-                    av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
-                    exit_program(1);
-                }
-                par_dst->channel_layout     = par_src->channel_layout;
-                par_dst->sample_rate        = par_src->sample_rate;
-                par_dst->channels           = par_src->channels;
-                par_dst->block_align        = par_src->block_align;
-                break;
-            case AVMEDIA_TYPE_VIDEO:
-                par_dst->format             = par_src->format;
-                par_dst->width              = par_src->width;
-                par_dst->height             = par_src->height;
-                if (ost->frame_aspect_ratio)
-                    sar = av_d2q(ost->frame_aspect_ratio * par_dst->height / par_dst->width, 255);
-                else if (ist->st->sample_aspect_ratio.num)
-                    sar = ist->st->sample_aspect_ratio;
-                else
-                    sar = par_src->sample_aspect_ratio;
-                ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
-                break;
-            case AVMEDIA_TYPE_SUBTITLE:
-                par_dst->width  = par_src->width;
-                par_dst->height = par_src->height;
-                break;
-            case AVMEDIA_TYPE_DATA:
-            case AVMEDIA_TYPE_ATTACHMENT:
-                break;
-            default:
-                abort();
-            }
-        } else {
-            AVCodecContext *enc_ctx = ost->enc_ctx;
-            AVCodecContext *dec_ctx = NULL;
-
-            if (!ost->enc) {
-                /* should only happen when a default codec is not present. */
-                snprintf(error, sizeof(error), "Automatic encoder selection "
-                         "failed for output stream #%d:%d. Default encoder for "
-                         "format %s is probably disabled. Please choose an "
-                         "encoder manually.\n", ost->file_index, ost->index,
-                         oc->oformat->name);
-                ret = AVERROR(EINVAL);
-                goto dump_format;
-            }
-
-            set_encoder_id(output_files[ost->file_index], ost);
-
-            if (ist) {
-                dec_ctx = ist->dec_ctx;
-
-                enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
-                enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
-            }
-
-            /*
-             * We want CFR output if and only if one of those is true:
-             * 1) user specified output framerate with -r
-             * 2) user specified -vsync cfr
-             * 3) output format is CFR and the user didn't force vsync to
-             *    something else than CFR
-             *
-             * in such a case, set ost->frame_rate
-             */
-            if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
-                !ost->frame_rate.num && ist &&
-                (video_sync_method ==  VSYNC_CFR ||
-                 (video_sync_method ==  VSYNC_AUTO &&
-                  !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
-                if (ist->framerate.num)
-                    ost->frame_rate = ist->framerate;
-                else if (ist->st->avg_frame_rate.num)
-                    ost->frame_rate = ist->st->avg_frame_rate;
-                else {
-                    av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
-                           "for the output stream #%d:%d, but no information "
-                           "about the input framerate is available. Falling "
-                           "back to a default value of 25fps. Use the -r option "
-                           "if you want a different framerate.\n",
-                           ost->file_index, ost->index);
-                    ost->frame_rate = (AVRational){ 25, 1 };
-                }
-
-                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
-                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
-                    ost->frame_rate = ost->enc->supported_framerates[idx];
-                }
-            }
-
-#if CONFIG_LIBMFX
-            if (qsv_transcode_init(ost))
-                exit_program(1);
-#endif
-
-            if (!ost->filter &&
-                (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
-                 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
-                    FilterGraph *fg;
-                    fg = init_simple_filtergraph(ist, ost);
-                    if (configure_filtergraph(fg)) {
-                        av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
-                        exit_program(1);
-                    }
-            }
-
-            switch (enc_ctx->codec_type) {
-            case AVMEDIA_TYPE_AUDIO:
-                enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
-                enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
-                enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
-                enc_ctx->channels       = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
-                enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
-                break;
-            case AVMEDIA_TYPE_VIDEO:
-                enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
-
-                enc_ctx->width  = ost->filter->filter->inputs[0]->w;
-                enc_ctx->height = ost->filter->filter->inputs[0]->h;
-                enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
-                    ost->frame_aspect_ratio ? // overridden by the -aspect cli option
-                    av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
-                    ost->filter->filter->inputs[0]->sample_aspect_ratio;
-                enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
-
-                ost->st->avg_frame_rate = ost->frame_rate;
-
-                if (dec_ctx &&
-                    (enc_ctx->width   != dec_ctx->width  ||
-                     enc_ctx->height  != dec_ctx->height ||
-                     enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
-                    enc_ctx->bits_per_raw_sample = 0;
-                }
-
-                if (ost->forced_keyframes)
-                    parse_forced_key_frames(ost->forced_keyframes, ost,
-                                            ost->enc_ctx);
-                break;
-            case AVMEDIA_TYPE_SUBTITLE:
-                enc_ctx->time_base = (AVRational){1, 1000};
-                break;
-            default:
-                abort();
-                break;
-            }
-        }
-    }
-
     /* init input streams */
     for (i = 0; i < nb_input_streams; i++)
         if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
@@ -2068,6 +2141,10 @@ static int transcode_init(void)
 
     /* open each encoder */
     for (i = 0; i < nb_output_streams; i++) {
+        // skip streams fed from filtergraphs until we have a frame for them
+        if (output_streams[i]->filter)
+            continue;
+
         ret = init_output_stream(output_streams[i], error, sizeof(error));
         if (ret < 0)
             goto dump_format;
@@ -2090,40 +2167,14 @@ static int transcode_init(void)
         }
     }
 
-    /* open files and write file headers */
-    for (i = 0; i < nb_output_files; i++) {
-        oc = output_files[i]->ctx;
-        oc->interrupt_callback = int_cb;
-        if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
-            char errbuf[128];
-            av_strerror(ret, errbuf, sizeof(errbuf));
-            snprintf(error, sizeof(error),
-                     "Could not write header for output file #%d "
-                     "(incorrect codec parameters ?): %s",
-                     i, errbuf);
-            ret = AVERROR(EINVAL);
-            goto dump_format;
-        }
-        assert_avoptions(output_files[i]->opts);
-        if (strcmp(oc->oformat->name, "rtp")) {
-            want_sdp = 0;
-        }
-    }
-
  dump_format:
-    /* dump the file output parameters - cannot be done before in case
-       of stream copy */
-    for (i = 0; i < nb_output_files; i++) {
-        av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
-    }
-
     /* dump the stream mapping */
     av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
     for (i = 0; i < nb_input_streams; i++) {
         ist = input_streams[i];
 
         for (j = 0; j < ist->nb_filters; j++) {
-            if (ist->filters[j]->graph->graph_desc) {
+            if (!filtergraph_is_simple(ist->filters[j]->graph)) {
                 av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
                        ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
                        ist->filters[j]->name);
@@ -2144,7 +2195,7 @@ static int transcode_init(void)
             continue;
         }
 
-        if (ost->filter && ost->filter->graph->graph_desc) {
+        if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
             /* output from a complex graph */
             av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
             if (nb_filtergraphs > 1)
@@ -2205,10 +2256,6 @@ static int transcode_init(void)
         return ret;
     }
 
-    if (want_sdp) {
-        print_sdp();
-    }
-
     return 0;
 }
 
@@ -2711,6 +2758,13 @@ static int transcode(void)
     /* write the trailer if needed and close file */
     for (i = 0; i < nb_output_files; i++) {
         os = output_files[i]->ctx;
+        if (!output_files[i]->header_written) {
+            av_log(NULL, AV_LOG_ERROR,
+                   "Nothing was written into output file %d (%s), because "
+                   "at least one of its streams received no packets.\n",
+                   i, os->filename);
+            continue;
+        }
         av_write_trailer(os);
     }
 
@@ -2800,7 +2854,7 @@ static int64_t getmaxrss(void)
 
 int main(int argc, char **argv)
 {
-    int ret;
+    int i, ret;
     int64_t ti;
 
     register_exit(avconv_cleanup);
@@ -2835,6 +2889,11 @@ int main(int argc, char **argv)
         exit_program(1);
     }
 
+    for (i = 0; i < nb_output_files; i++) {
+        if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
+            want_sdp = 0;
+    }
+
     ti = getutime();
     if (transcode() < 0)
         exit_program(1);