]> git.sesse.net Git - ffmpeg/blobdiff - ffmpeg.c
lavfi/drawutils: make ff_draw_color() accept a const rgba map
[ffmpeg] / ffmpeg.c
index 428b18f99db48d7218fb9ca6d0fc6e22cd999cad..cc1b32fc7995e0962d43270578003878cb06e68e 100644 (file)
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -31,7 +31,9 @@
 #include <errno.h>
 #include <signal.h>
 #include <limits.h>
+#if HAVE_ISATTY
 #include <unistd.h>
+#endif
 #include "libavformat/avformat.h"
 #include "libavdevice/avdevice.h"
 #include "libswscale/swscale.h"
@@ -51,6 +53,7 @@
 #include "libavutil/imgutils.h"
 #include "libavutil/timestamp.h"
 #include "libavutil/bprint.h"
+#include "libavutil/time.h"
 #include "libavformat/os_support.h"
 
 #include "libavformat/ffm.h" // not public API
 #define VSYNC_VFR         2
 #define VSYNC_DROP        0xff
 
-#define SINKA
-
 const char program_name[] = "ffmpeg";
 const int program_birth_year = 2000;
 
@@ -121,7 +122,7 @@ typedef struct {
     int ofile_idx, ostream_idx;               // output
 } AudioChannelMap;
 
-static const OptionDef options[];
+static const OptionDef *options;
 
 #define MAX_STREAMS 1024    /* arbitrary sanity check value */
 
@@ -157,7 +158,7 @@ static FILE *vstats_file;
 static int audio_volume = 256;
 
 static int exit_on_error = 0;
-static int using_stdin = 0;
+static int stdin_interaction = 1;
 static int run_as_daemon  = 0;
 static volatile int received_nb_signals = 0;
 static int64_t video_size = 0;
@@ -174,6 +175,9 @@ static float dts_error_threshold = 3600*30;
 static int print_stats = 1;
 static int debug_ts = 0;
 static int current_time;
+static AVIOContext *progress_avio = NULL;
+
+static uint8_t *subtitle_out;
 
 #if HAVE_PTHREADS
 /* signal to input threads that they should exit; set by the main thread */
@@ -221,13 +225,13 @@ typedef struct InputStream {
 
     int64_t       start;     /* time when read started */
     /* predicted dts of the next packet read for this stream or (when there are
-     * several frames in a packet) of the next frame in current packet */
+     * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
     int64_t       next_dts;
-    /* dts of the last packet read for this stream */
-    int64_t       dts;
+    int64_t       dts;       ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
 
-    int64_t       next_pts;  /* synthetic pts for the next decode frame */
-    int64_t       pts;       /* current pts of the decoded frame */
+    int64_t       next_pts;  ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
+    int64_t       pts;       ///< current pts of the decoded frame  (in AV_TIME_BASE units)
+    int           wrap_correction_done;
     double ts_scale;
     int is_start;            /* is 1 at the start and after a discontinuity */
     int saw_first_ts;
@@ -258,11 +262,12 @@ typedef struct InputStream {
 typedef struct InputFile {
     AVFormatContext *ctx;
     int eof_reached;      /* true if eof reached */
+    int unavailable;      /* true if the file is unavailable (possibly temporarily) */
     int ist_index;        /* index of first stream in input_streams */
-    int buffer_size;      /* current total buffer size */
     int64_t ts_offset;
     int nb_streams;       /* number of stream that ffmpeg is aware of; may be different
                              from ctx.nb_streams if new streams appear during av_read_frame() */
+    int nb_streams_warn;  /* number of streams that the user was warned of */
     int rate_emu;
 
 #if HAVE_PTHREADS
@@ -306,6 +311,7 @@ typedef struct OutputStream {
     int64_t *forced_kf_pts;
     int forced_kf_count;
     int forced_kf_index;
+    char *forced_keyframes;
 
     /* audio only */
     int audio_channels_map[SWR_CH_MAX];  /* list of the channels id to pick from the source stream */
@@ -321,6 +327,7 @@ typedef struct OutputStream {
     double swr_dither_scale;
     AVDictionary *opts;
     int is_past_recording_time;
+    int unavailable;  /* true if the steram is unavailable (possibly temporarily) */
     int stream_copy;
     const char *attachment_filename;
     int copy_initial_nonkeyframes;
@@ -340,8 +347,8 @@ typedef struct OutputFile {
     AVFormatContext *ctx;
     AVDictionary *opts;
     int ost_index;       /* index of the first stream in output_streams */
-    int64_t recording_time; /* desired length of the resulting file in microseconds */
-    int64_t start_time;     /* start time in microseconds */
+    int64_t recording_time;  ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
+    int64_t start_time;      ///< start time in microseconds == AV_TIME_BASE units
     uint64_t limit_filesize; /* filesize limit expressed in bytes */
 } OutputFile;
 
@@ -530,8 +537,14 @@ static void reset_options(OptionsContext *o, int is_input)
 
     memset(o, 0, sizeof(*o));
 
-    if(is_input) o->recording_time = bak.recording_time;
-    else         o->recording_time = INT64_MAX;
+    if (is_input) {
+        o->recording_time = bak.recording_time;
+        if (o->recording_time != INT64_MAX)
+            av_log(NULL, AV_LOG_WARNING,
+                   "-t is not an input option, keeping it for the next output;"
+                   " consider fixing your command line.\n");
+    } else
+        o->recording_time = INT64_MAX;
     o->mux_max_delay  = 0.7;
     o->limit_filesize = UINT64_MAX;
     o->chapters_input_file = INT_MAX;
@@ -759,6 +772,8 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
             exit_program(1);
         }
     }
+    av_assert0(ist);
+
     ist->discard         = 0;
     ist->decoding_needed = 1;
     ist->st->discard = AVDISCARD_NONE;
@@ -783,17 +798,13 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
     AVFilterContext *last_filter = out->filter_ctx;
     int pad_idx = out->pad_idx;
     int ret;
+    char name[255];
     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
 
-#if FF_API_OLD_VSINK_API
+    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
     ret = avfilter_graph_create_filter(&ofilter->filter,
                                        avfilter_get_by_name("buffersink"),
-                                       "ffmpeg_buffersink", NULL, NULL, fg->graph);
-#else
-    ret = avfilter_graph_create_filter(&ofilter->filter,
-                                       avfilter_get_by_name("buffersink"),
-                                       "ffmpeg_buffersink", NULL, buffersink_params, fg->graph);
-#endif
+                                       name, NULL, NULL/*buffersink_params*/, fg->graph);
     av_freep(&buffersink_params);
 
     if (ret < 0)
@@ -807,8 +818,10 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
                  codec->width,
                  codec->height,
                  (unsigned)ost->sws_flags);
+        snprintf(name, sizeof(name), "scaler for output stream %d:%d",
+                 ost->file_index, ost->index);
         if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
-                                                NULL, args, NULL, fg->graph)) < 0)
+                                                name, args, NULL, fg->graph)) < 0)
             return ret;
         if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
             return ret;
@@ -819,6 +832,8 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
 
     if ((pix_fmts = choose_pix_fmts(ost))) {
         AVFilterContext *filter;
+        snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
+                 ost->file_index, ost->index);
         if ((ret = avfilter_graph_create_filter(&filter,
                                                 avfilter_get_by_name("format"),
                                                 "format", pix_fmts, NULL,
@@ -838,8 +853,10 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
 
         snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
                  ost->frame_rate.den);
+        snprintf(name, sizeof(name), "fps for output stream %d:%d",
+                 ost->file_index, ost->index);
         ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
-                                           "fps", args, NULL, fg->graph);
+                                           name, args, NULL, fg->graph);
         if (ret < 0)
             return ret;
 
@@ -863,11 +880,14 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
     AVFilterContext *last_filter = out->filter_ctx;
     int pad_idx = out->pad_idx;
     char *sample_fmts, *sample_rates, *channel_layouts;
+    char name[255];
     int ret;
 
+
+    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
     ret = avfilter_graph_create_filter(&ofilter->filter,
-                                       avfilter_get_by_name("abuffersink_old"),
-                                       "ffmpeg_abuffersink_old", NULL, NULL, fg->graph);
+                                       avfilter_get_by_name("abuffersink"),
+                                       name, NULL, NULL, fg->graph);
     if (ret < 0)
         return ret;
 
@@ -930,9 +950,11 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
         av_freep(&sample_rates);
         av_freep(&channel_layouts);
 
+        snprintf(name, sizeof(name), "audio format for output stream %d:%d",
+                 ost->file_index, ost->index);
         ret = avfilter_graph_create_filter(&format,
                                            avfilter_get_by_name("aformat"),
-                                           "aformat", args, NULL, fg->graph);
+                                           name, args, NULL, fg->graph);
         if (ret < 0)
             return ret;
 
@@ -944,15 +966,6 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
         pad_idx = 0;
     }
 
-    if (audio_sync_method > 0 && 0) {
-        char args[256] = {0};
-
-        av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold);
-        if (audio_sync_method > 1)
-            av_strlcatf(args, sizeof(args), ":max_soft_comp=%d", -audio_sync_method);
-        AUTO_INSERT_FILTER("-async", "aresample", args);
-    }
-
     if (audio_volume != 256 && 0) {
         char args[256];
 
@@ -1008,6 +1021,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
                                          ist->st->r_frame_rate;
     AVRational sar;
     AVBPrint args;
+    char name[255];
     int pad_idx = in->pad_idx;
     int ret;
 
@@ -1025,17 +1039,21 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
              SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
     if (fr.num && fr.den)
         av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
+    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
+             ist->file_index, ist->st->index);
 
-    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, in->name,
+    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
                                             args.str, NULL, fg->graph)) < 0)
         return ret;
 
     if (ist->framerate.num) {
         AVFilterContext *setpts;
 
+        snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
+                 ist->file_index, ist->st->index);
         if ((ret = avfilter_graph_create_filter(&setpts,
                                                 avfilter_get_by_name("setpts"),
-                                                "setpts", "N", NULL,
+                                                name, "N", NULL,
                                                 fg->graph)) < 0)
             return ret;
 
@@ -1058,18 +1076,20 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
     AVFilter *filter = avfilter_get_by_name("abuffer");
     InputStream *ist = ifilter->ist;
     int pad_idx = in->pad_idx;
-    char args[255];
+    char args[255], name[255];
     int ret;
 
     snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
              ":channel_layout=0x%"PRIx64,
-             ist->st->time_base.num, ist->st->time_base.den,
+             1, ist->st->codec->sample_rate,
              ist->st->codec->sample_rate,
              av_get_sample_fmt_name(ist->st->codec->sample_fmt),
              ist->st->codec->channel_layout);
+    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
+             ist->file_index, ist->st->index);
 
     if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
-                                            in->name, args, NULL,
+                                            name, args, NULL,
                                             fg->graph)) < 0)
         return ret;
 
@@ -1079,9 +1099,11 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
     av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi "            \
            "similarly to -af " filter_name "=%s.\n", arg);                  \
                                                                             \
+    snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d",      \
+                fg->index, filter_name, ist->file_index, ist->st->index);   \
     ret = avfilter_graph_create_filter(&filt_ctx,                           \
                                        avfilter_get_by_name(filter_name),   \
-                                       filter_name, arg, NULL, fg->graph);  \
+                                       name, arg, NULL, fg->graph);         \
     if (ret < 0)                                                            \
         return ret;                                                         \
                                                                             \
@@ -1358,6 +1380,8 @@ void av_noreturn exit_program(int ret)
     }
     av_freep(&filtergraphs);
 
+    av_freep(&subtitle_out);
+
     /* close files */
     for (i = 0; i < nb_output_files; i++) {
         AVFormatContext *s = output_files[i]->ctx;
@@ -1376,6 +1400,7 @@ void av_noreturn exit_program(int ret)
         }
         output_streams[i]->bitstream_filters = NULL;
 
+        av_freep(&output_streams[i]->forced_keyframes);
         av_freep(&output_streams[i]->filtered_frame);
         av_freep(&output_streams[i]->avfilter);
         av_freep(&output_streams[i]);
@@ -1549,10 +1574,10 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
     av_init_packet(&pkt);
     pkt.data = NULL;
     pkt.size = 0;
-#if 0
+
     if (!check_recording_time(ost))
         return;
-#endif
+
     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
         frame->pts = ost->sync_opts;
     ost->sync_opts = frame->pts + frame->nb_samples;
@@ -1632,7 +1657,6 @@ static void do_subtitle_out(AVFormatContext *s,
                             AVSubtitle *sub,
                             int64_t pts)
 {
-    static uint8_t *subtitle_out = NULL;
     int subtitle_out_max_size = 1024 * 1024;
     int subtitle_out_size, nb, i;
     AVCodecContext *enc;
@@ -1659,10 +1683,15 @@ static void do_subtitle_out(AVFormatContext *s,
     else
         nb = 1;
 
+    /* shift timestamp to honor -ss and make check_recording_time() work with -t */
+    pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q)
+        - output_files[ost->file_index]->start_time;
     for (i = 0; i < nb; i++) {
-        ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
+        ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
+        if (!check_recording_time(ost))
+            return;
 
-        sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
+        sub->pts = pts;
         // start_display_time is required to be 0
         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
         sub->end_display_time  -= sub->start_display_time;
@@ -1700,7 +1729,7 @@ static void do_video_out(AVFormatContext *s,
     int ret, format_video_sync;
     AVPacket pkt;
     AVCodecContext *enc = ost->st->codec;
-    int nb_frames;
+    int nb_frames, i;
     double sync_ipts, delta;
     double duration = 0;
     int frame_size = 0;
@@ -1750,18 +1779,26 @@ static void do_video_out(AVFormatContext *s,
         av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
         return;
     } else if (nb_frames > 1) {
+        if (nb_frames > dts_error_threshold * 30) {
+            av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skiping\n", nb_frames - 1);
+            nb_frames_drop++;
+            return;
+        }
         nb_frames_dup += nb_frames - 1;
         av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
     }
 
-
-duplicate_frame:
+  /* duplicates frame if needed */
+  for (i = 0; i < nb_frames; i++) {
     av_init_packet(&pkt);
     pkt.data = NULL;
     pkt.size = 0;
 
     in_picture->pts = ost->sync_opts;
 
+    if (!check_recording_time(ost))
+        return;
+
     if (s->oformat->flags & AVFMT_RAWPICTURE &&
         enc->codec->id == CODEC_ID_RAWVIDEO) {
         /* raw pictures are written as AVPicture structure to
@@ -1843,9 +1880,7 @@ duplicate_frame:
      * flush, we need to limit them here, before they go into encoder.
      */
     ost->frame_number++;
-
-    if(--nb_frames)
-        goto duplicate_frame;
+  }
 
     if (vstats_filename && frame_size)
         do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
@@ -1899,7 +1934,7 @@ static int poll_filters(void)
     AVFilterBufferRef *picref;
     AVFrame *filtered_frame = NULL;
     int i, ret, ret_all;
-    unsigned nb_success, nb_eof;
+    unsigned nb_success = 1, av_uninit(nb_eof);
     int64_t frame_pts;
 
     while (1) {
@@ -1918,18 +1953,9 @@ static int poll_filters(void)
                 avcodec_get_frame_defaults(ost->filtered_frame);
             filtered_frame = ost->filtered_frame;
 
-            while (!ost->is_past_recording_time) {
-                if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
-                    !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
-                    ret = av_buffersink_read_samples(ost->filter->filter, &picref,
-                                                    ost->st->codec->frame_size);
-                else
-#ifdef SINKA
-                    ret = av_buffersink_read(ost->filter->filter, &picref);
-#else
-                    ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
-                                                       AV_BUFFERSINK_FLAG_NO_REQUEST);
-#endif
+            while (1) {
+                ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
+                                                   AV_BUFFERSINK_FLAG_NO_REQUEST);
                 if (ret < 0) {
                     if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
                         char buf[256];
@@ -1959,7 +1985,7 @@ static int poll_filters(void)
 
                 switch (ost->filter->filter->inputs[0]->type) {
                 case AVMEDIA_TYPE_VIDEO:
-                    avfilter_fill_frame_from_video_buffer_ref(filtered_frame, picref);
+                    avfilter_copy_buf_props(filtered_frame, picref);
                     filtered_frame->pts = frame_pts;
                     if (!ost->frame_aspect_ratio)
                         ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
@@ -1981,6 +2007,8 @@ static int poll_filters(void)
                 avfilter_unref_buffer(picref);
             }
         }
+        if (!nb_success) /* from last round */
+            break;
         /* Request frames through all the graphs */
         ret_all = nb_success = nb_eof = 0;
         for (i = 0; i < nb_filtergraphs; i++) {
@@ -1997,8 +2025,6 @@ static int poll_filters(void)
                 ret_all = ret;
             }
         }
-        if (!nb_success)
-            break;
         /* Try again if anything succeeded */
     }
     return nb_eof == nb_filtergraphs ? AVERROR_EOF : ret_all;
@@ -2007,6 +2033,7 @@ static int poll_filters(void)
 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
 {
     char buf[1024];
+    AVBPrint buf_script;
     OutputStream *ost;
     AVFormatContext *oc;
     int64_t total_size;
@@ -2018,7 +2045,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
     static int qp_histogram[52];
     int hours, mins, secs, us;
 
-    if (!print_stats && !is_last_report)
+    if (!print_stats && !is_last_report && !progress_avio)
         return;
 
     if (!is_last_report) {
@@ -2043,6 +2070,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
 
     buf[0] = '\0';
     vid = 0;
+    av_bprint_init(&buf_script, 0, 1);
     for (i = 0; i < nb_output_streams; i++) {
         float q = -1;
         ost = output_streams[i];
@@ -2051,6 +2079,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
             q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
+            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+                       ost->file_index, ost->index, q);
         }
         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
             float fps, t = (cur_time-timer_start) / 1000000.0;
@@ -2059,6 +2089,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
             fps = t > 1 ? frame_number / t : 0;
             snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
                      frame_number, fps < 9.95, fps, q);
+            av_bprintf(&buf_script, "frame=%d\n", frame_number);
+            av_bprintf(&buf_script, "fps=%.1f\n", fps);
+            av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+                       ost->file_index, ost->index, q);
             if (is_last_report)
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
             if (qp_hist) {
@@ -2073,6 +2107,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
                 int j;
                 double error, error_sum = 0;
                 double scale, scale_sum = 0;
+                double p;
                 char type[3] = { 'Y','U','V' };
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
                 for (j = 0; j < 3; j++) {
@@ -2087,9 +2122,15 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
                         scale /= 4;
                     error_sum += error;
                     scale_sum += scale;
-                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
+                    p = psnr(error / scale);
+                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
+                    av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
+                               ost->file_index, ost->index, type[i] | 32, p);
                 }
+                p = psnr(error_sum / scale_sum);
                 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
+                av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
+                           ost->file_index, ost->index, p);
             }
             vid = 1;
         }
@@ -2114,14 +2155,35 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
              (100 * us) / AV_TIME_BASE);
     snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
              "bitrate=%6.1fkbits/s", bitrate);
+    av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
+    av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
+    av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
+               hours, mins, secs, us);
 
     if (nb_frames_dup || nb_frames_drop)
         snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
                 nb_frames_dup, nb_frames_drop);
+    av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
+    av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
 
+    if (print_stats || is_last_report) {
     av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
 
     fflush(stderr);
+    }
+
+    if (progress_avio) {
+        av_bprintf(&buf_script, "progress=%s\n",
+                   is_last_report ? "end" : "continue");
+        avio_write(progress_avio, buf_script.str,
+                   FFMIN(buf_script.len, buf_script.size - 1));
+        avio_flush(progress_avio);
+        av_bprint_finalize(&buf_script, NULL);
+        if (is_last_report) {
+            avio_close(progress_avio);
+            progress_avio = NULL;
+        }
+    }
 
     if (is_last_report) {
         int64_t raw= audio_size + video_size + subtitle_size + extra_size;
@@ -2226,13 +2288,6 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
     if (of->start_time && ist->pts < of->start_time)
         return 0;
 
-    if (of->recording_time != INT64_MAX &&
-        av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
-                      (AVRational){ 1, 1000000 }) >= 0) {
-        ost->is_past_recording_time = 1;
-        return 0;
-    }
-
     return 1;
 }
 
@@ -2249,6 +2304,12 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         !ost->copy_initial_nonkeyframes)
         return;
 
+    if (of->recording_time != INT64_MAX &&
+        ist->pts >= of->recording_time + of->start_time) {
+        ost->is_past_recording_time = 1;
+        return;
+    }
+
     /* force the input stream PTS */
     if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
         audio_size += pkt->size;
@@ -2285,7 +2346,8 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
         opkt.data = pkt->data;
         opkt.size = pkt->size;
     }
-    if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
+
+    if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
         /* store AVPicture in AVPacket, as expected by the output format */
         avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
         opkt.data = (uint8_t *)&pict;
@@ -2304,7 +2366,7 @@ static void rate_emu_sleep(InputStream *ist)
         int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
         int64_t now = av_gettime() - ist->start;
         if (pts > now)
-            usleep(pts - now);
+            av_usleep(pts - now);
     }
 }
 
@@ -2331,6 +2393,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     AVFrame *decoded_frame;
     AVCodecContext *avctx = ist->st->codec;
     int i, ret, resample_changed;
+    AVRational decoded_frame_tb;
 
     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
         return AVERROR(ENOMEM);
@@ -2358,17 +2421,6 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         return ret;
     }
 
-    /* if the decoder provides a pts, use it instead of the last packet pts.
-       the decoder could be delaying output by a packet or more. */
-    if (decoded_frame->pts != AV_NOPTS_VALUE)
-        ist->dts = ist->next_dts = ist->pts = ist->next_pts = decoded_frame->pts;
-    else if (pkt->pts != AV_NOPTS_VALUE) {
-        decoded_frame->pts = pkt->pts;
-        pkt->pts           = AV_NOPTS_VALUE;
-    }else
-        decoded_frame->pts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
-
-
 #if 1
     /* increment next_dts to use for the case where the input stream does not
        have timestamps or there are multiple frames in the packet */
@@ -2414,16 +2466,49 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         ist->resample_channels       = avctx->channels;
 
         for (i = 0; i < nb_filtergraphs; i++)
-            if (ist_in_filtergraph(filtergraphs[i], ist) &&
-                configure_filtergraph(filtergraphs[i]) < 0) {
-                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
-                exit_program(1);
+            if (ist_in_filtergraph(filtergraphs[i], ist)) {
+                FilterGraph *fg = filtergraphs[i];
+                int j;
+                if (configure_filtergraph(fg) < 0) {
+                    av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+                    exit_program(1);
+                }
+                for (j = 0; j < fg->nb_outputs; j++) {
+                    OutputStream *ost = fg->outputs[j]->ost;
+                    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
+                        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
+                        av_buffersink_set_frame_size(ost->filter->filter,
+                                                     ost->st->codec->frame_size);
+                }
             }
     }
 
+    /* if the decoder provides a pts, use it instead of the last packet pts.
+       the decoder could be delaying output by a packet or more. */
+    if (decoded_frame->pts != AV_NOPTS_VALUE) {
+        ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
+        decoded_frame_tb   = avctx->time_base;
+    } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
+        decoded_frame->pts = decoded_frame->pkt_pts;
+        pkt->pts           = AV_NOPTS_VALUE;
+        decoded_frame_tb   = ist->st->time_base;
+    } else if (pkt->pts != AV_NOPTS_VALUE) {
+        decoded_frame->pts = pkt->pts;
+        pkt->pts           = AV_NOPTS_VALUE;
+        decoded_frame_tb   = ist->st->time_base;
+    }else {
+        decoded_frame->pts = ist->dts;
+        decoded_frame_tb   = AV_TIME_BASE_Q;
+    }
+    if (decoded_frame->pts != AV_NOPTS_VALUE)
+        decoded_frame->pts = av_rescale_q(decoded_frame->pts,
+                                          decoded_frame_tb,
+                                          (AVRational){1, ist->st->codec->sample_rate});
     for (i = 0; i < ist->nb_filters; i++)
         av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, 0);
 
+    decoded_frame->pts = AV_NOPTS_VALUE;
+
     return ret;
 }
 
@@ -2466,6 +2551,16 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     if(best_effort_timestamp != AV_NOPTS_VALUE)
         ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
 
+    if (debug_ts) {
+        av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
+                "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
+                ist->st->index, av_ts2str(decoded_frame->pts),
+                av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
+                best_effort_timestamp,
+                av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
+                decoded_frame->key_frame, decoded_frame->pict_type);
+    }
+
     pkt->size = 0;
     pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
 
@@ -2758,6 +2853,52 @@ static InputStream *get_input_stream(OutputStream *ost)
     return NULL;
 }
 
+static void parse_forced_key_frames(char *kf, OutputStream *ost,
+                                    AVCodecContext *avctx)
+{
+    char *p;
+    int n = 1, i;
+    int64_t t;
+
+    for (p = kf; *p; p++)
+        if (*p == ',')
+            n++;
+    ost->forced_kf_count = n;
+    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
+    if (!ost->forced_kf_pts) {
+        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
+        exit_program(1);
+    }
+
+    p = kf;
+    for (i = 0; i < n; i++) {
+        char *next = strchr(p, ',');
+
+        if (next)
+            *next++ = 0;
+
+        t = parse_time_or_die("force_key_frames", p, 1);
+        ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+        p = next;
+    }
+}
+
+static void report_new_stream(int input_index, AVPacket *pkt)
+{
+    InputFile *file = input_files[input_index];
+    AVStream *st = file->ctx->streams[pkt->stream_index];
+
+    if (pkt->stream_index < file->nb_streams_warn)
+        return;
+    av_log(file->ctx, AV_LOG_WARNING,
+           "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
+           av_get_media_type_string(st->codec->codec_type),
+           input_index, pkt->stream_index,
+           pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
+    file->nb_streams_warn = pkt->stream_index + 1;
+}
+
 static int transcode_init(void)
 {
     int ret = 0, i, j, k;
@@ -2851,7 +2992,15 @@ static int transcode_init(void)
              * overhead
              */
             if(!strcmp(oc->oformat->name, "avi")) {
-                if (   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
+                if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
+                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
+                               && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
+                               && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
+                     || copy_tb==2){
+                    codec->time_base.num = ist->st->r_frame_rate.den;
+                    codec->time_base.den = 2*ist->st->r_frame_rate.num;
+                    codec->ticks_per_frame = 2;
+                } else if (   copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
                                  && av_q2d(ist->st->time_base) < 1.0/500
                     || copy_tb==0){
                     codec->time_base = icodec->time_base;
@@ -2889,7 +3038,7 @@ static int transcode_init(void)
                 codec->frame_size         = icodec->frame_size;
                 codec->audio_service_type = icodec->audio_service_type;
                 codec->block_align        = icodec->block_align;
-                if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3)
+                if((codec->block_align == 1 || codec->block_align == 1152) && codec->codec_id == CODEC_ID_MP3)
                     codec->block_align= 0;
                 if(codec->codec_id == CODEC_ID_AC3)
                     codec->block_align= 0;
@@ -2933,17 +3082,6 @@ static int transcode_init(void)
                 ist->decoding_needed = 1;
             ost->encoding_needed = 1;
 
-            if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-                if (ost->filter && !ost->frame_rate.num)
-                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
-                if (ist && !ost->frame_rate.num)
-                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
-                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
-                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
-                    ost->frame_rate = ost->enc->supported_framerates[idx];
-                }
-            }
-
             if (!ost->filter &&
                 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
                  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
@@ -2955,6 +3093,18 @@ static int transcode_init(void)
                     }
             }
 
+            if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+                if (ost->filter && !ost->frame_rate.num)
+                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+                if (ist && !ost->frame_rate.num)
+                    ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
+//                    ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
+                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+                    ost->frame_rate = ost->enc->supported_framerates[idx];
+                }
+            }
+
             switch (codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 codec->sample_fmt     = ost->filter->filter->inputs[0]->format;
@@ -2992,6 +3142,9 @@ static int transcode_init(void)
                     codec->bits_per_raw_sample = frame_bits_per_raw_sample;
                 }
 
+                if (ost->forced_keyframes)
+                    parse_forced_key_frames(ost->forced_keyframes, ost,
+                                            ost->st->codec);
                 break;
             case AVMEDIA_TYPE_SUBTITLE:
                 codec->time_base = (AVRational){1, 1000};
@@ -3061,6 +3214,10 @@ static int transcode_init(void)
                 ret = AVERROR(EINVAL);
                 goto dump_format;
             }
+            if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
+                !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
+                av_buffersink_set_frame_size(ost->filter->filter,
+                                             ost->st->codec->frame_size);
             assert_codec_experimental(ost->st->codec, 1);
             assert_avoptions(ost->opts);
             if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
@@ -3217,20 +3374,23 @@ static int need_output(void)
     return 0;
 }
 
-static int select_input_file(uint8_t *no_packet)
+static int input_acceptable(InputStream *ist)
 {
-    int64_t ipts_min = INT64_MAX;
-    int i, file_index = -1;
-
-    for (i = 0; i < nb_input_streams; i++) {
-        InputStream *ist = input_streams[i];
-        int64_t ipts     = ist->pts;
+    av_assert1(!ist->discard);
+    return !input_files[ist->file_index]->unavailable &&
+           !input_files[ist->file_index]->eof_reached;
+}
 
-        if (ist->discard || no_packet[ist->file_index])
-            continue;
-        if (!input_files[ist->file_index]->eof_reached) {
-            if (ipts < ipts_min) {
-                ipts_min = ipts;
+static int find_graph_input(FilterGraph *graph)
+{
+    int i, nb_req_max = 0, file_index = -1;
+
+    for (i = 0; i < graph->nb_inputs; i++) {
+        int nb_req = av_buffersrc_get_nb_failed_requests(graph->inputs[i]->filter);
+        if (nb_req > nb_req_max) {
+            InputStream *ist = graph->inputs[i]->ist;
+            if (input_acceptable(ist)) {
+                nb_req_max = nb_req;
                 file_index = ist->file_index;
             }
         }
@@ -3239,6 +3399,61 @@ static int select_input_file(uint8_t *no_packet)
     return file_index;
 }
 
+/**
+ * Select the input file to read from.
+ *
+ * @return  >=0 index of the input file to use;
+ *          -1  if no file is acceptable;
+ *          -2  to read from filters without reading from a file
+ */
+static int select_input_file(void)
+{
+    int i, ret, nb_active_out = nb_output_streams, ost_index = -1;
+    int64_t opts_min;
+    OutputStream *ost;
+    AVFilterBufferRef *dummy;
+
+    for (i = 0; i < nb_output_streams; i++)
+        nb_active_out -= output_streams[i]->unavailable =
+            output_streams[i]->is_past_recording_time;
+    while (nb_active_out) {
+        opts_min = INT64_MAX;
+        ost_index = -1;
+        for (i = 0; i < nb_output_streams; i++) {
+            OutputStream *ost = output_streams[i];
+            int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
+                                        AV_TIME_BASE_Q);
+            if (!ost->unavailable && opts < opts_min) {
+                opts_min  = opts;
+                ost_index = i;
+            }
+        }
+        if (ost_index < 0)
+            return -1;
+
+        ost = output_streams[ost_index];
+        if (ost->source_index >= 0) {
+            /* ost is directly connected to an input */
+            InputStream *ist = input_streams[ost->source_index];
+            if (input_acceptable(ist))
+                return ist->file_index;
+        } else {
+            /* ost is connected to a complex filtergraph */
+            av_assert1(ost->filter);
+            ret = av_buffersink_get_buffer_ref(ost->filter->filter, &dummy,
+                                               AV_BUFFERSINK_FLAG_PEEK);
+            if (ret >= 0)
+                return -2;
+            ret = find_graph_input(ost->filter->graph);
+            if (ret >= 0)
+                return ret;
+        }
+        ost->unavailable = 1;
+        nb_active_out--;
+    }
+    return -1;
+}
+
 static int check_keyboard_interaction(int64_t cur_time)
 {
     int i, ret, key;
@@ -3343,7 +3558,7 @@ static void *input_thread(void *arg)
         ret = av_read_frame(f->ctx, &pkt);
 
         if (ret == AVERROR(EAGAIN)) {
-            usleep(10000);
+            av_usleep(10000);
             ret = 0;
             continue;
         } else if (ret < 0)
@@ -3460,18 +3675,14 @@ static int transcode(void)
     AVFormatContext *is, *os;
     OutputStream *ost;
     InputStream *ist;
-    uint8_t *no_packet;
     int no_packet_count = 0;
     int64_t timer_start;
 
-    if (!(no_packet = av_mallocz(nb_input_files)))
-        exit_program(1);
-
     ret = transcode_init();
     if (ret < 0)
         goto fail;
 
-    if (!using_stdin) {
+    if (stdin_interaction) {
         av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
     }
 
@@ -3488,7 +3699,7 @@ static int transcode(void)
         int64_t cur_time= av_gettime();
 
         /* if 'q' pressed, exits */
-        if (!using_stdin)
+        if (stdin_interaction)
             if (check_keyboard_interaction(cur_time) < 0)
                 break;
 
@@ -3499,13 +3710,18 @@ static int transcode(void)
         }
 
         /* select the stream that we must read now */
-        file_index = select_input_file(no_packet);
+        file_index = select_input_file();
         /* if none, if is finished */
+        if (file_index == -2) {
+            poll_filters() ;
+            continue;
+        }
         if (file_index < 0) {
             if (no_packet_count) {
                 no_packet_count = 0;
-                memset(no_packet, 0, nb_input_files);
-                usleep(10000);
+                for (i = 0; i < nb_input_files; i++)
+                    input_files[i]->unavailable = 0;
+                av_usleep(10000);
                 continue;
             }
             av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
@@ -3516,17 +3732,23 @@ static int transcode(void)
         ret = get_input_packet(input_files[file_index], &pkt);
 
         if (ret == AVERROR(EAGAIN)) {
-            no_packet[file_index] = 1;
+            input_files[file_index]->unavailable = 1;
             no_packet_count++;
             continue;
         }
         if (ret < 0) {
+            if (ret != AVERROR_EOF) {
+                print_error(is->filename, ret);
+                if (exit_on_error)
+                    exit_program(1);
+            }
             input_files[file_index]->eof_reached = 1;
 
             for (i = 0; i < input_files[file_index]->nb_streams; i++) {
                 ist = input_streams[input_files[file_index]->ist_index + i];
                 if (ist->decoding_needed)
                     output_packet(ist, NULL);
+                poll_filters();
             }
 
             if (opt_shortest)
@@ -3536,7 +3758,8 @@ static int transcode(void)
         }
 
         no_packet_count = 0;
-        memset(no_packet, 0, nb_input_files);
+        for (i = 0; i < nb_input_files; i++)
+            input_files[i]->unavailable = 0;
 
         if (do_pkt_dump) {
             av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
@@ -3544,13 +3767,29 @@ static int transcode(void)
         }
         /* the following test is needed in case new streams appear
            dynamically in stream : we ignore them */
-        if (pkt.stream_index >= input_files[file_index]->nb_streams)
+        if (pkt.stream_index >= input_files[file_index]->nb_streams) {
+            report_new_stream(file_index, &pkt);
             goto discard_packet;
+        }
         ist_index = input_files[file_index]->ist_index + pkt.stream_index;
         ist = input_streams[ist_index];
         if (ist->discard)
             goto discard_packet;
 
+        if(!ist->wrap_correction_done && input_files[file_index]->ctx->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
+            uint64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
+            uint64_t stime2= stime + (1LL<<ist->st->pts_wrap_bits);
+            ist->wrap_correction_done = 1;
+            if(pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime && pkt.dts - stime > stime2 - pkt.dts) {
+                pkt.dts -= 1LL<<ist->st->pts_wrap_bits;
+                ist->wrap_correction_done = 0;
+            }
+            if(pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime && pkt.pts - stime > stime2 - pkt.pts) {
+                pkt.pts -= 1LL<<ist->st->pts_wrap_bits;
+                ist->wrap_correction_done = 0;
+            }
+        }
+
         if (pkt.dts != AV_NOPTS_VALUE)
             pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
         if (pkt.pts != AV_NOPTS_VALUE)
@@ -3565,8 +3804,8 @@ static int transcode(void)
             av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
                     "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s  pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%"PRId64"\n",
                     ist_index, av_get_media_type_string(ist->st->codec->codec_type),
-                    av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &ist->st->time_base),
-                    av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &ist->st->time_base),
+                    av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
+                    av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
                     av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
                     av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
                     input_files[ist->file_index]->ts_offset);
@@ -3673,7 +3912,6 @@ static int transcode(void)
     ret = 0;
 
  fail:
-    av_freep(&no_packet);
 #if HAVE_PTHREADS
     free_input_threads();
 #endif
@@ -3990,6 +4228,7 @@ static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFor
             METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
             meta = &context->programs[index]->metadata;\
             break;\
+        default: av_assert0(0);\
         }\
 
     SET_DICT(type_in, meta_in, ic, idx_in);
@@ -4094,7 +4333,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
         ist->file_index = nb_input_files;
         ist->discard = 1;
         st->discard  = AVDISCARD_ALL;
-        ist->opts = filter_codec_opts(codec_opts, choose_decoder(o, ic, st), ic, st);
+        ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st, choose_decoder(o, ic, st));
 
         ist->ts_scale = 1.0;
         MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
@@ -4162,7 +4401,7 @@ static void assert_file_overwrite(const char *filename)
         (strchr(filename, ':') == NULL || filename[1] == ':' ||
          av_strstart(filename, "file:", NULL))) {
         if (avio_check(filename, 0) == 0) {
-            if (!using_stdin && (!no_file_overwrite || file_overwrite)) {
+            if (stdin_interaction && (!no_file_overwrite || file_overwrite)) {
                 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
                 fflush(stderr);
                 term_exit();
@@ -4233,8 +4472,8 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
     if (!strcmp(filename, "-"))
         filename = "pipe:";
 
-    using_stdin |= !strncmp(filename, "pipe:", 5) ||
-                    !strcmp(filename, "/dev/stdin");
+    stdin_interaction &= strncmp(filename, "pipe:", 5) &&
+                         strcmp(filename, "/dev/stdin");
 
     /* get default parameters from command line */
     ic = avformat_alloc_context();
@@ -4357,29 +4596,6 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
     return 0;
 }
 
-static void parse_forced_key_frames(char *kf, OutputStream *ost)
-{
-    char *p;
-    int n = 1, i;
-
-    for (p = kf; *p; p++)
-        if (*p == ',')
-            n++;
-    ost->forced_kf_count = n;
-    ost->forced_kf_pts   = av_malloc(sizeof(*ost->forced_kf_pts) * n);
-    if (!ost->forced_kf_pts) {
-        av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
-        exit_program(1);
-    }
-    p = kf;
-    for (i = 0; i < n; i++) {
-        char *next = strchr(p, ',');
-        if (next) *next++ = 0;
-        ost->forced_kf_pts[i] = parse_time_or_die("force_key_frames", p, 1);
-        p = next;
-    }
-}
-
 static uint8_t *get_line(AVIOContext *s)
 {
     AVIOContext *line;
@@ -4473,7 +4689,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
     st->codec->codec_type = type;
     choose_encoder(o, oc, ost);
     if (ost->enc) {
-        ost->opts  = filter_codec_opts(codec_opts, ost->enc, oc, st);
+        ost->opts  = filter_codec_opts(codec_opts, ost->enc->id, oc, st, ost->enc);
     }
 
     avcodec_get_context_defaults3(st->codec, ost->enc);
@@ -4591,7 +4807,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
 
     if (!ost->stream_copy) {
         const char *p = NULL;
-        char *forced_key_frames = NULL, *frame_size = NULL;
+        char *frame_size = NULL;
         char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
         char *intra_matrix = NULL, *inter_matrix = NULL;
         const char *filters = "null";
@@ -4689,9 +4905,9 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
             }
         }
 
-        MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
-        if (forced_key_frames)
-            parse_forced_key_frames(forced_key_frames, ost);
+        MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st);
+        if (ost->forced_keyframes)
+            ost->forced_keyframes = av_strdup(ost->forced_keyframes);
 
         MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
 
@@ -4924,6 +5140,9 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
                "cannot be used together.\n", ost->file_index, ost->index);
         exit_program(1);
     }
+    if (o->recording_time != INT64_MAX)
+        av_log(NULL, AV_LOG_WARNING,
+               "-t does not work with -filter_complex (yet).\n");
 
     if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
         av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
@@ -5229,6 +5448,7 @@ loop_end:
                      AV_DICT_DONT_OVERWRITE);
         if(o->recording_time != INT64_MAX)
             av_dict_set(&oc->metadata, "duration", NULL, 0);
+        av_dict_set(&oc->metadata, "creation_time", NULL, 0);
     }
     if (!o->metadata_streams_manual)
         for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
@@ -5262,7 +5482,6 @@ loop_end:
                 } else if (ret < 0)
                     exit_program(1);
             }
-            printf("ret %d, stream_spec %s\n", ret, stream_spec);
         }
         else {
             switch (type) {
@@ -5356,6 +5575,7 @@ static int opt_help(const char *opt, const char *arg)
     show_help_children(avcodec_get_class(), flags);
     show_help_children(avformat_get_class(), flags);
     show_help_children(sws_get_class(), flags);
+    show_help_children(swr_get_class(), AV_OPT_FLAG_AUDIO_PARAM);
 
     return 0;
 }
@@ -5722,8 +5942,25 @@ static int opt_filter_complex(const char *opt, const char *arg)
     return 0;
 }
 
+static int opt_progress(const char *opt, const char *arg)
+{
+    AVIOContext *avio = NULL;
+    int ret;
+
+    if (!strcmp(arg, "-"))
+        arg = "pipe:";
+    ret = avio_open2(&avio, arg, AVIO_FLAG_WRITE, &int_cb, NULL);
+    if (ret < 0) {
+        av_log(0, AV_LOG_ERROR, "Failed to open progress URL \"%s\": %s\n",
+               arg, av_err2str(ret));
+        return ret;
+    }
+    progress_avio = avio;
+    return 0;
+}
+
 #define OFFSET(x) offsetof(OptionsContext, x)
-static const OptionDef options[] = {
+static const OptionDef real_options[] = {
     /* main options */
 #include "cmdutils_common_opts.h"
     { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
@@ -5750,6 +5987,10 @@ static const OptionDef options[] = {
       "add timings for benchmarking" },
     { "benchmark_all", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark_all},
       "add timings for each task" },
+    { "progress", HAS_ARG | OPT_EXPERT, {(void*)opt_progress},
+      "write program-readable progress information", "url" },
+    { "stdin", OPT_BOOL | OPT_EXPERT, {(void*)&stdin_interaction},
+      "enable or disable interaction on standard input" },
     { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
     { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
       "dump each input packet" },
@@ -5871,6 +6112,7 @@ int main(int argc, char **argv)
     OptionsContext o = { 0 };
     int64_t ti;
 
+    options = real_options;
     reset_options(&o, 0);
 
     av_log_set_flags(AV_LOG_SKIP_REPEATED);