]> git.sesse.net Git - ffmpeg/blobdiff - avconv.c
avconv: call flush_encoders() from transcode() directly.
[ffmpeg] / avconv.c
index fbdd89960219eab6d5e92cfa9a3507ed377895d3..11d77419eb916db2f2c99b2f3fcf850c898be0da 100644 (file)
--- a/avconv.c
+++ b/avconv.c
@@ -97,15 +97,8 @@ typedef struct MetadataMap {
 
 static const OptionDef options[];
 
-#define MAX_FILES 100
-
 static const char *last_asked_format = NULL;
-static double *ts_scale;
-static int  nb_ts_scale;
-
-static AVFormatContext *output_files[MAX_FILES];
-static AVDictionary *output_opts[MAX_FILES];
-static int nb_output_files = 0;
+static AVDictionary *ts_scale;
 
 static StreamMap *stream_maps = NULL;
 static int nb_stream_maps;
@@ -150,7 +143,6 @@ static int qp_hist = 0;
 static char *vfilters = NULL;
 #endif
 
-static int intra_only = 0;
 static int audio_sample_rate = 0;
 #define QSCALE_NONE -99999
 static float audio_qscale = QSCALE_NONE;
@@ -205,14 +197,12 @@ static int64_t extra_size = 0;
 static int nb_frames_dup = 0;
 static int nb_frames_drop = 0;
 static int input_sync;
-static uint64_t limit_filesize = 0;
+static uint64_t limit_filesize = UINT64_MAX;
 static int force_fps = 0;
 static char *forced_key_frames = NULL;
 
 static float dts_delta_threshold = 10;
 
-static int64_t timer_start;
-
 static uint8_t *audio_buf;
 static uint8_t *audio_out;
 static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
@@ -225,7 +215,31 @@ static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL;
 
 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
 
-struct InputStream;
+typedef struct InputStream {
+    int file_index;
+    AVStream *st;
+    int discard;             /* true if stream data should be discarded */
+    int decoding_needed;     /* true if the packets must be decoded in 'raw_fifo' */
+    AVCodec *dec;
+
+    int64_t       start;     /* time when read started */
+    int64_t       next_pts;  /* synthetic pts for cases where pkt.pts
+                                is not defined */
+    int64_t       pts;       /* current pts */
+    PtsCorrectionContext pts_ctx;
+    double ts_scale;
+    int is_start;            /* is 1 at the start and after a discontinuity */
+    int showed_multi_packet_warning;
+    AVDictionary *opts;
+} InputStream;
+
+typedef struct InputFile {
+    AVFormatContext *ctx;
+    int eof_reached;      /* true if eof reached */
+    int ist_index;        /* index of first stream in ist_table */
+    int buffer_size;      /* current total buffer size */
+    int64_t ts_offset;
+} InputFile;
 
 typedef struct OutputStream {
     int file_index;          /* file index */
@@ -279,43 +293,29 @@ typedef struct OutputStream {
 
    int sws_flags;
    AVDictionary *opts;
+   int is_past_recording_time;
 } OutputStream;
 
-static OutputStream **output_streams_for_file[MAX_FILES] = { NULL };
-static int nb_output_streams_for_file[MAX_FILES] = { 0 };
-
-typedef struct InputStream {
-    int file_index;
-    AVStream *st;
-    int discard;             /* true if stream data should be discarded */
-    int decoding_needed;     /* true if the packets must be decoded in 'raw_fifo' */
-    AVCodec *dec;
 
-    int64_t       start;     /* time when read started */
-    int64_t       next_pts;  /* synthetic pts for cases where pkt.pts
-                                is not defined */
-    int64_t       pts;       /* current pts */
-    PtsCorrectionContext pts_ctx;
-    double ts_scale;
-    int is_start;            /* is 1 at the start and after a discontinuity */
-    int showed_multi_packet_warning;
-    int is_past_recording_time;
-    AVDictionary *opts;
-} InputStream;
-
-typedef struct InputFile {
+typedef struct OutputFile {
     AVFormatContext *ctx;
-    int eof_reached;      /* true if eof reached */
-    int ist_index;        /* index of first stream in ist_table */
-    int buffer_size;      /* current total buffer size */
-    int64_t ts_offset;
-} InputFile;
+    AVDictionary *opts;
+    int ost_index;       /* index of the first stream in output_streams */
+    int64_t recording_time; /* desired length of the resulting file in microseconds */
+    int64_t start_time;     /* start time in microseconds */
+    uint64_t limit_filesize;
+} OutputFile;
 
 static InputStream *input_streams = NULL;
 static int         nb_input_streams = 0;
 static InputFile   *input_files   = NULL;
 static int         nb_input_files   = 0;
 
+static OutputStream *output_streams = NULL;
+static int        nb_output_streams = 0;
+static OutputFile   *output_files   = NULL;
+static int        nb_output_files   = 0;
+
 #if CONFIG_AVFILTER
 
 static int configure_video_filters(InputStream *ist, OutputStream *ost)
@@ -394,7 +394,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
     codec->width  = ost->output_video_filter->inputs[0]->w;
     codec->height = ost->output_video_filter->inputs[0]->h;
     codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
-        ost->frame_aspect_ratio ? // overriden by the -aspect cli option
+        ost->frame_aspect_ratio ? // overridden by the -aspect cli option
         av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) :
         ost->output_video_filter->inputs[0]->sample_aspect_ratio;
 
@@ -438,12 +438,11 @@ static int exit_program(int ret)
 
     /* close files */
     for(i=0;i<nb_output_files;i++) {
-        AVFormatContext *s = output_files[i];
+        AVFormatContext *s = output_files[i].ctx;
         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
             avio_close(s->pb);
         avformat_free_context(s);
-        av_free(output_streams_for_file[i]);
-        av_dict_free(&output_opts[i]);
+        av_dict_free(&output_files[i].opts);
     }
     for(i=0;i<nb_input_files;i++) {
         av_close_input_file(input_files[i].ctx);
@@ -458,11 +457,12 @@ static int exit_program(int ret)
         fclose(vstats_file);
     av_free(vstats_filename);
 
-    av_free(streamid_map);
     av_free(meta_data_maps);
 
     av_freep(&input_streams);
     av_freep(&input_files);
+    av_freep(&output_streams);
+    av_freep(&output_files);
 
     uninit_opts();
     av_free(audio_buf);
@@ -641,7 +641,8 @@ static double
 get_sync_ipts(const OutputStream *ost)
 {
     const InputStream *ist = ost->sync_ist;
-    return (double)(ist->pts - start_time)/AV_TIME_BASE;
+    OutputFile *of = &output_files[ost->file_index];
+    return (double)(ist->pts - of->start_time)/AV_TIME_BASE;
 }
 
 static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
@@ -676,8 +677,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
     }
 }
 
-#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
-
 static void do_audio_out(AVFormatContext *s,
                          OutputStream *ost,
                          InputStream *ist,
@@ -961,9 +960,6 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
     *bufp = buf;
 }
 
-/* we begin to correct av delay at this threshold */
-#define AV_DELAY_MAX 0.100
-
 static void do_subtitle_out(AVFormatContext *s,
                             OutputStream *ost,
                             InputStream *ist,
@@ -1079,7 +1075,7 @@ static void do_video_resample(OutputStream *ost,
     if (resample_changed) {
         avfilter_graph_free(&ost->graph);
         if (configure_video_filters(ist, ost)) {
-            fprintf(stderr, "Error reinitialising filters!\n");
+            fprintf(stderr, "Error reinitializing filters!\n");
             exit_program(1);
         }
     }
@@ -1156,7 +1152,7 @@ static void do_video_out(AVFormatContext *s,
 
         if (s->oformat->flags & AVFMT_RAWPICTURE) {
             /* raw pictures are written as AVPicture structure to
-               avoid any copies. We support temorarily the older
+               avoid any copies. We support temporarily the older
                method. */
             AVFrame* old_frame = enc->coded_frame;
             enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
@@ -1271,9 +1267,9 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost,
     }
 }
 
-static void print_report(AVFormatContext **output_files,
-                         OutputStream **ost_table, int nb_ostreams,
-                         int is_last_report)
+static void print_report(OutputFile *output_files,
+                         OutputStream *ost_table, int nb_ostreams,
+                         int is_last_report, int64_t timer_start)
 {
     char buf[1024];
     OutputStream *ost;
@@ -1299,7 +1295,7 @@ static void print_report(AVFormatContext **output_files,
     }
 
 
-    oc = output_files[0];
+    oc = output_files[0].ctx;
 
     total_size = avio_size(oc->pb);
     if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too
@@ -1310,7 +1306,7 @@ static void print_report(AVFormatContext **output_files,
     vid = 0;
     for(i=0;i<nb_ostreams;i++) {
         float q = -1;
-        ost = ost_table[i];
+        ost = &ost_table[i];
         enc = ost->st->codec;
         if (!ost->st->stream_copy && enc->coded_frame)
             q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
@@ -1401,9 +1397,94 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_
     memset(buf, fill_char, size);
 }
 
+static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
+{
+    int i, ret;
+
+    for (i = 0; i < nb_ostreams; i++) {
+        OutputStream   *ost = &ost_table[i];
+        AVCodecContext *enc = ost->st->codec;
+        AVFormatContext *os = output_files[ost->file_index].ctx;
+
+        if (!ost->encoding_needed)
+            continue;
+
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
+            continue;
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
+            continue;
+
+        for(;;) {
+            AVPacket pkt;
+            int fifo_bytes;
+            av_init_packet(&pkt);
+            pkt.stream_index= ost->index;
+
+            switch (ost->st->codec->codec_type) {
+            case AVMEDIA_TYPE_AUDIO:
+                fifo_bytes = av_fifo_size(ost->fifo);
+                ret = 0;
+                /* encode any samples remaining in fifo */
+                if (fifo_bytes > 0) {
+                    int osize = av_get_bytes_per_sample(enc->sample_fmt);
+                    int fs_tmp = enc->frame_size;
+
+                    av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
+                    if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
+                        enc->frame_size = fifo_bytes / (osize * enc->channels);
+                    } else { /* pad */
+                        int frame_bytes = enc->frame_size*osize*enc->channels;
+                        if (allocated_audio_buf_size < frame_bytes)
+                            exit_program(1);
+                        generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
+                    }
+
+                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
+                    pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
+                                              ost->st->time_base.num, enc->sample_rate);
+                    enc->frame_size = fs_tmp;
+                }
+                if (ret <= 0) {
+                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
+                }
+                if (ret < 0) {
+                    fprintf(stderr, "Audio encoding failed\n");
+                    exit_program(1);
+                }
+                audio_size += ret;
+                pkt.flags |= AV_PKT_FLAG_KEY;
+                break;
+            case AVMEDIA_TYPE_VIDEO:
+                ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
+                if (ret < 0) {
+                    fprintf(stderr, "Video encoding failed\n");
+                    exit_program(1);
+                }
+                video_size += ret;
+                if(enc->coded_frame && enc->coded_frame->key_frame)
+                    pkt.flags |= AV_PKT_FLAG_KEY;
+                if (ost->logfile && enc->stats_out) {
+                    fprintf(ost->logfile, "%s", enc->stats_out);
+                }
+                break;
+            default:
+                ret=-1;
+            }
+
+            if (ret <= 0)
+                break;
+            pkt.data = bit_buffer;
+            pkt.size = ret;
+            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+            write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
+        }
+    }
+}
+
 /* pkt = NULL means EOF (needed to flush decoder buffers) */
 static int output_packet(InputStream *ist, int ist_index,
-                         OutputStream **ost_table, int nb_ostreams,
+                         OutputStream *ost_table, int nb_ostreams,
                          const AVPacket *pkt)
 {
     AVFormatContext *os;
@@ -1550,25 +1631,6 @@ static int output_packet(InputStream *ist, int ist_index,
             avpkt.size = 0;
         }
 
-#if CONFIG_AVFILTER
-        if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-            for (i = 0; i < nb_ostreams; i++) {
-                ost = ost_table[i];
-                if (ost->input_video_filter && ost->source_index == ist_index) {
-                    AVRational sar;
-                    if (ist->st->sample_aspect_ratio.num)
-                        sar = ist->st->sample_aspect_ratio;
-                    else
-                        sar = ist->st->codec->sample_aspect_ratio;
-                    // add it to be filtered
-                    av_vsrc_buffer_add_frame(ost->input_video_filter, &picture,
-                                             ist->pts,
-                                             sar);
-                }
-            }
-        }
-#endif
-
         // preprocess audio (volume)
         if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
             if (audio_volume != 256) {
@@ -1592,119 +1654,139 @@ static int output_packet(InputStream *ist, int ist_index,
         }
         /* if output time reached then transcode raw format,
            encode packets and output them */
-        if (start_time == 0 || ist->pts >= start_time)
-            for(i=0;i<nb_ostreams;i++) {
-                int frame_size;
+        for (i = 0; i < nb_ostreams; i++) {
+            OutputFile *of = &output_files[ost_table[i].file_index];
+            int frame_size;
+
+            ost = &ost_table[i];
+            if (ost->source_index != ist_index)
+                continue;
+
+            if (of->start_time && ist->pts < of->start_time)
+                continue;
+
+            if (of->recording_time != INT64_MAX &&
+                av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
+                              (AVRational){1, 1000000}) >= 0) {
+                ost->is_past_recording_time = 1;
+                continue;
+            }
 
-                ost = ost_table[i];
-                if (ost->source_index == ist_index) {
 #if CONFIG_AVFILTER
-                frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
-                    !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
-                while (frame_available) {
-                    AVRational ist_pts_tb;
-                    if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
-                        get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
-                    if (ost->picref)
-                        ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
+            if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+                ost->input_video_filter) {
+                AVRational sar;
+                if (ist->st->sample_aspect_ratio.num)
+                    sar = ist->st->sample_aspect_ratio;
+                else
+                    sar = ist->st->codec->sample_aspect_ratio;
+                av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, ist->pts, sar);
+            }
+            frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
+                !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+            while (frame_available) {
+                AVRational ist_pts_tb;
+                if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
+                    get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
+                if (ost->picref)
+                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 #endif
-                    os = output_files[ost->file_index];
+                os = output_files[ost->file_index].ctx;
 
-                    /* set the input output pts pairs */
-                    //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
+                /* set the input output pts pairs */
+                //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
 
-                    if (ost->encoding_needed) {
-                        av_assert0(ist->decoding_needed);
-                        switch(ost->st->codec->codec_type) {
-                        case AVMEDIA_TYPE_AUDIO:
-                            do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
-                            break;
-                        case AVMEDIA_TYPE_VIDEO:
+                if (ost->encoding_needed) {
+                    av_assert0(ist->decoding_needed);
+                    switch(ost->st->codec->codec_type) {
+                    case AVMEDIA_TYPE_AUDIO:
+                        do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
+                        break;
+                    case AVMEDIA_TYPE_VIDEO:
 #if CONFIG_AVFILTER
-                            if (ost->picref->video && !ost->frame_aspect_ratio)
-                                ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
+                        if (ost->picref->video && !ost->frame_aspect_ratio)
+                            ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
 #endif
-                            do_video_out(os, ost, ist, &picture, &frame_size,
-                                         same_quant ? quality : ost->st->codec->global_quality);
-                            if (vstats_filename && frame_size)
-                                do_video_stats(os, ost, frame_size);
-                            break;
-                        case AVMEDIA_TYPE_SUBTITLE:
-                            do_subtitle_out(os, ost, ist, &subtitle,
-                                            pkt->pts);
-                            break;
-                        default:
-                            abort();
-                        }
-                    } else {
-                        AVFrame avframe; //FIXME/XXX remove this
-                        AVPacket opkt;
-                        int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
+                        do_video_out(os, ost, ist, &picture, &frame_size,
+                                     same_quant ? quality : ost->st->codec->global_quality);
+                        if (vstats_filename && frame_size)
+                            do_video_stats(os, ost, frame_size);
+                        break;
+                    case AVMEDIA_TYPE_SUBTITLE:
+                        do_subtitle_out(os, ost, ist, &subtitle,
+                                        pkt->pts);
+                        break;
+                    default:
+                        abort();
+                    }
+                } else {
+                    AVFrame avframe; //FIXME/XXX remove this
+                    AVPacket opkt;
+                    int64_t ost_tb_start_time= av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 
-                        av_init_packet(&opkt);
+                    av_init_packet(&opkt);
 
-                        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
+                    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
 #if !CONFIG_AVFILTER
-                            continue;
+                        continue;
 #else
-                            goto cont;
+                        goto cont;
 #endif
 
-                        /* no reencoding needed : output the packet directly */
-                        /* force the input stream PTS */
+                    /* no reencoding needed : output the packet directly */
+                    /* force the input stream PTS */
 
-                        avcodec_get_frame_defaults(&avframe);
-                        ost->st->codec->coded_frame= &avframe;
-                        avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
+                    avcodec_get_frame_defaults(&avframe);
+                    ost->st->codec->coded_frame= &avframe;
+                    avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
 
-                        if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
-                            audio_size += data_size;
-                        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-                            video_size += data_size;
-                            ost->sync_opts++;
-                        }
+                    if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+                        audio_size += data_size;
+                    else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+                        video_size += data_size;
+                        ost->sync_opts++;
+                    }
 
-                        opkt.stream_index= ost->index;
-                        if(pkt->pts != AV_NOPTS_VALUE)
-                            opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
-                        else
-                            opkt.pts= AV_NOPTS_VALUE;
-
-                        if (pkt->dts == AV_NOPTS_VALUE)
-                            opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
-                        else
-                            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
-                        opkt.dts -= ost_tb_start_time;
-
-                        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
-                        opkt.flags= pkt->flags;
-
-                        //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
-                        if(   ost->st->codec->codec_id != CODEC_ID_H264
-                           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
-                           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
-                           ) {
-                            if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
-                                opkt.destruct= av_destruct_packet;
-                        } else {
-                            opkt.data = data_buf;
-                            opkt.size = data_size;
-                        }
+                    opkt.stream_index= ost->index;
+                    if(pkt->pts != AV_NOPTS_VALUE)
+                        opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+                    else
+                        opkt.pts= AV_NOPTS_VALUE;
 
-                        write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
-                        ost->st->codec->frame_number++;
-                        ost->frame_number++;
-                        av_free_packet(&opkt);
+                    if (pkt->dts == AV_NOPTS_VALUE)
+                        opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
+                    else
+                        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+                    opkt.dts -= ost_tb_start_time;
+
+                    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+                    opkt.flags= pkt->flags;
+
+                    //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+                    if(   ost->st->codec->codec_id != CODEC_ID_H264
+                       && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
+                       && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
+                       ) {
+                        if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
+                            opkt.destruct= av_destruct_packet;
+                    } else {
+                        opkt.data = data_buf;
+                        opkt.size = data_size;
                     }
-#if CONFIG_AVFILTER
-                    cont:
-                    frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
-                                       ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
-                    if (ost->picref)
-                        avfilter_unref_buffer(ost->picref);
+
+                    write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
+                    ost->st->codec->frame_number++;
+                    ost->frame_number++;
+                    av_free_packet(&opkt);
                 }
+#if CONFIG_AVFILTER
+                cont:
+                frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
+                                   ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+                if (ost->picref)
+                    avfilter_unref_buffer(ost->picref);
+            }
 #endif
-                }
             }
 
         av_free(buffer_to_free);
@@ -1715,152 +1797,110 @@ static int output_packet(InputStream *ist, int ist_index,
         }
     }
  discard_packet:
-    if (pkt == NULL) {
-        /* EOF handling */
-
-        for(i=0;i<nb_ostreams;i++) {
-            ost = ost_table[i];
-            if (ost->source_index == ist_index) {
-                AVCodecContext *enc= ost->st->codec;
-                os = output_files[ost->file_index];
-
-                if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
-                    continue;
-                if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
-                    continue;
-
-                if (ost->encoding_needed) {
-                    for(;;) {
-                        AVPacket pkt;
-                        int fifo_bytes;
-                        av_init_packet(&pkt);
-                        pkt.stream_index= ost->index;
-
-                        switch(ost->st->codec->codec_type) {
-                        case AVMEDIA_TYPE_AUDIO:
-                            fifo_bytes = av_fifo_size(ost->fifo);
-                            ret = 0;
-                            /* encode any samples remaining in fifo */
-                            if (fifo_bytes > 0) {
-                                int osize = av_get_bytes_per_sample(enc->sample_fmt);
-                                int fs_tmp = enc->frame_size;
-
-                                av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
-                                if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
-                                    enc->frame_size = fifo_bytes / (osize * enc->channels);
-                                } else { /* pad */
-                                    int frame_bytes = enc->frame_size*osize*enc->channels;
-                                    if (allocated_audio_buf_size < frame_bytes)
-                                        exit_program(1);
-                                    generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
-                                }
-
-                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
-                                pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
-                                                          ost->st->time_base.num, enc->sample_rate);
-                                enc->frame_size = fs_tmp;
-                            }
-                            if(ret <= 0) {
-                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
-                            }
-                            if (ret < 0) {
-                                fprintf(stderr, "Audio encoding failed\n");
-                                exit_program(1);
-                            }
-                            audio_size += ret;
-                            pkt.flags |= AV_PKT_FLAG_KEY;
-                            break;
-                        case AVMEDIA_TYPE_VIDEO:
-                            ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
-                            if (ret < 0) {
-                                fprintf(stderr, "Video encoding failed\n");
-                                exit_program(1);
-                            }
-                            video_size += ret;
-                            if(enc->coded_frame && enc->coded_frame->key_frame)
-                                pkt.flags |= AV_PKT_FLAG_KEY;
-                            if (ost->logfile && enc->stats_out) {
-                                fprintf(ost->logfile, "%s", enc->stats_out);
-                            }
-                            break;
-                        default:
-                            ret=-1;
-                        }
-
-                        if(ret<=0)
-                            break;
-                        pkt.data= bit_buffer;
-                        pkt.size= ret;
-                        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
-                            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
-                        write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
-                    }
-                }
-            }
-        }
-    }
 
     return 0;
 }
 
-static void print_sdp(AVFormatContext **avc, int n)
+static void print_sdp(OutputFile *output_files, int n)
 {
     char sdp[2048];
+    int i;
+    AVFormatContext **avc = av_malloc(sizeof(*avc)*n);
+
+    if (!avc)
+        exit_program(1);
+    for (i = 0; i < n; i++)
+        avc[i] = output_files[i].ctx;
 
     av_sdp_create(avc, n, sdp, sizeof(sdp));
     printf("SDP:\n%s\n", sdp);
     fflush(stdout);
+    av_freep(&avc);
+}
+
+static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
+                             char *error, int error_len)
+{
+    int i;
+    InputStream *ist = &input_streams[ist_index];
+    if (ist->decoding_needed) {
+        AVCodec *codec = ist->dec;
+        if (!codec)
+            codec = avcodec_find_decoder(ist->st->codec->codec_id);
+        if (!codec) {
+            snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
+                    ist->st->codec->codec_id, ist->file_index, ist->st->index);
+            return AVERROR(EINVAL);
+        }
+
+        /* update requested sample format for the decoder based on the
+           corresponding encoder sample format */
+        for (i = 0; i < nb_output_streams; i++) {
+            OutputStream *ost = &output_streams[i];
+            if (ost->source_index == ist_index) {
+                update_sample_fmt(ist->st->codec, codec, ost->st->codec);
+                break;
+            }
+        }
+
+        if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
+            snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
+                    ist->file_index, ist->st->index);
+            return AVERROR(EINVAL);
+        }
+        assert_codec_experimental(ist->st->codec, 0);
+        assert_avoptions(ist->opts);
+    }
+
+    ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames*AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+    ist->next_pts = AV_NOPTS_VALUE;
+    init_pts_correction(&ist->pts_ctx);
+    ist->is_start = 1;
+
+    return 0;
 }
 
 /*
  * The following code is the main loop of the file converter
  */
-static int transcode(AVFormatContext **output_files,
+static int transcode(OutputFile *output_files,
                      int nb_output_files,
                      InputFile *input_files,
                      int nb_input_files)
 {
-    int ret = 0, i, j, k, n, nb_ostreams = 0;
+    int ret = 0, i;
     AVFormatContext *is, *os;
     AVCodecContext *codec, *icodec;
-    OutputStream *ost, **ost_table = NULL;
+    OutputStream *ost;
     InputStream *ist;
     char error[1024];
     int want_sdp = 1;
-    uint8_t no_packet[MAX_FILES]={0};
+    uint8_t *no_packet;
     int no_packet_count=0;
+    int64_t timer_start;
+
+    if (!(no_packet = av_mallocz(nb_input_files)))
+        exit_program(1);
 
     if (rate_emu)
         for (i = 0; i < nb_input_streams; i++)
             input_streams[i].start = av_gettime();
 
     /* output stream init */
-    nb_ostreams = 0;
     for(i=0;i<nb_output_files;i++) {
-        os = output_files[i];
+        os = output_files[i].ctx;
         if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) {
-            av_dump_format(output_files[i], i, output_files[i]->filename, 1);
+            av_dump_format(os, i, os->filename, 1);
             fprintf(stderr, "Output file #%d does not contain any stream\n", i);
             ret = AVERROR(EINVAL);
             goto fail;
         }
-        nb_ostreams += os->nb_streams;
-    }
-
-    ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams);
-    if (!ost_table)
-        goto fail;
-    n = 0;
-    for(k=0;k<nb_output_files;k++) {
-        os = output_files[k];
-        for (i = 0; i < os->nb_streams; i++, n++)
-            ost_table[n] = output_streams_for_file[k][i];
     }
 
     /* for each output stream, we compute the right encoding parameters */
-    for(i=0;i<nb_ostreams;i++) {
-        ost = ost_table[i];
-        os = output_files[ost->file_index];
+    for (i = 0; i < nb_output_streams; i++) {
+        ost = &output_streams[i];
+        os = output_files[ost->file_index].ctx;
         ist = &input_streams[ost->source_index];
 
         codec = ost->st->codec;
@@ -1873,8 +1913,10 @@ static int transcode(AVFormatContext **output_files,
         if (ost->st->stream_copy) {
             uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
 
-            if (extra_size > INT_MAX)
+            if (extra_size > INT_MAX) {
+                ret = AVERROR(EINVAL);
                 goto fail;
+            }
 
             /* if stream_copy is selected, no need to decode or encode */
             codec->codec_id = icodec->codec_id;
@@ -1891,8 +1933,10 @@ static int transcode(AVFormatContext **output_files,
             codec->rc_max_rate    = icodec->rc_max_rate;
             codec->rc_buffer_size = icodec->rc_buffer_size;
             codec->extradata= av_mallocz(extra_size);
-            if (!codec->extradata)
+            if (!codec->extradata) {
+                ret = AVERROR(ENOMEM);
                 goto fail;
+            }
             memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
             codec->extradata_size= icodec->extradata_size;
             if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){
@@ -1947,8 +1991,10 @@ static int transcode(AVFormatContext **output_files,
             switch(codec->codec_type) {
             case AVMEDIA_TYPE_AUDIO:
                 ost->fifo= av_fifo_alloc(1024);
-                if(!ost->fifo)
+                if (!ost->fifo) {
+                    ret = AVERROR(ENOMEM);
                     goto fail;
+                }
                 ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
                 if (!codec->sample_rate) {
                     codec->sample_rate = icodec->sample_rate;
@@ -2087,8 +2133,8 @@ static int transcode(AVFormatContext **output_files,
     }
 
     /* open each encoder */
-    for(i=0;i<nb_ostreams;i++) {
-        ost = ost_table[i];
+    for (i = 0; i < nb_output_streams; i++) {
+        ost = &output_streams[i];
         if (ost->encoding_needed) {
             AVCodec *codec = ost->enc;
             AVCodecContext *dec = input_streams[ost->source_index].st->codec;
@@ -2122,62 +2168,21 @@ static int transcode(AVFormatContext **output_files,
         }
     }
 
-    /* open each decoder */
-    for (i = 0; i < nb_input_streams; i++) {
-        ist = &input_streams[i];
-        if (ist->decoding_needed) {
-            AVCodec *codec = ist->dec;
-            if (!codec)
-                codec = avcodec_find_decoder(ist->st->codec->codec_id);
-            if (!codec) {
-                snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
-                        ist->st->codec->codec_id, ist->file_index, ist->st->index);
-                ret = AVERROR(EINVAL);
-                goto dump_format;
-            }
-
-            /* update requested sample format for the decoder based on the
-               corresponding encoder sample format */
-            for (j = 0; j < nb_ostreams; j++) {
-                ost = ost_table[j];
-                if (ost->source_index == i) {
-                    update_sample_fmt(ist->st->codec, codec, ost->st->codec);
-                    break;
-                }
-            }
-
-            if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
-                snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
-                        ist->file_index, ist->st->index);
-                ret = AVERROR(EINVAL);
-                goto dump_format;
-            }
-            assert_codec_experimental(ist->st->codec, 0);
-            assert_avoptions(ost->opts);
-        }
-    }
-
-    /* init pts */
-    for (i = 0; i < nb_input_streams; i++) {
-        AVStream *st;
-        ist = &input_streams[i];
-        st= ist->st;
-        ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0;
-        ist->next_pts = AV_NOPTS_VALUE;
-        init_pts_correction(&ist->pts_ctx);
-        ist->is_start = 1;
-    }
+    /* init input streams */
+    for (i = 0; i < nb_input_streams; i++)
+        if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error)) < 0))
+            goto dump_format;
 
     /* open files and write file headers */
-    for(i=0;i<nb_output_files;i++) {
-        os = output_files[i];
-        if (avformat_write_header(os, &output_opts[i]) < 0) {
+    for (i = 0; i < nb_output_files; i++) {
+        os = output_files[i].ctx;
+        if (avformat_write_header(os, &output_files[i].opts) < 0) {
             snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
             ret = AVERROR(EINVAL);
             goto dump_format;
         }
-        assert_avoptions(output_opts[i]);
-        if (strcmp(output_files[i]->oformat->name, "rtp")) {
+        assert_avoptions(output_files[i].opts);
+        if (strcmp(os->oformat->name, "rtp")) {
             want_sdp = 0;
         }
     }
@@ -2186,14 +2191,14 @@ static int transcode(AVFormatContext **output_files,
     /* dump the file output parameters - cannot be done before in case
        of stream copy */
     for(i=0;i<nb_output_files;i++) {
-        av_dump_format(output_files[i], i, output_files[i]->filename, 1);
+        av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
     }
 
     /* dump the stream mapping */
     if (verbose >= 0) {
         fprintf(stderr, "Stream mapping:\n");
-        for(i=0;i<nb_ostreams;i++) {
-            ost = ost_table[i];
+        for (i = 0; i < nb_output_streams;i ++) {
+            ost = &output_streams[i];
             fprintf(stderr, "  Stream #%d.%d -> #%d.%d",
                     input_streams[ost->source_index].file_index,
                     input_streams[ost->source_index].st->index,
@@ -2237,15 +2242,18 @@ static int transcode(AVFormatContext **output_files,
         /* select the stream that we must read now by looking at the
            smallest output pts */
         file_index = -1;
-        for(i=0;i<nb_ostreams;i++) {
+        for (i = 0; i < nb_output_streams; i++) {
+            OutputFile *of;
             int64_t ipts;
             double  opts;
-            ost = ost_table[i];
-            os = output_files[ost->file_index];
+            ost = &output_streams[i];
+            of = &output_files[ost->file_index];
+            os = output_files[ost->file_index].ctx;
             ist = &input_streams[ost->source_index];
-            if(ist->is_past_recording_time || no_packet[ist->file_index])
+            if (ost->is_past_recording_time || no_packet[ist->file_index] ||
+                (os->pb && avio_tell(os->pb) >= of->limit_filesize))
                 continue;
-                opts = ost->st->pts.val * av_q2d(ost->st->time_base);
+            opts = ost->st->pts.val * av_q2d(ost->st->time_base);
             ipts = ist->pts;
             if (!input_files[ist->file_index].eof_reached){
                 if(ipts < ipts_min) {
@@ -2266,17 +2274,13 @@ static int transcode(AVFormatContext **output_files,
         if (file_index < 0) {
             if(no_packet_count){
                 no_packet_count=0;
-                memset(no_packet, 0, sizeof(no_packet));
+                memset(no_packet, 0, nb_input_files);
                 usleep(10000);
                 continue;
             }
             break;
         }
 
-        /* finish if limit size exhausted */
-        if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb))
-            break;
-
         /* read a frame from it and output it in the fifo */
         is = input_files[file_index].ctx;
         ret= av_read_frame(is, &pkt);
@@ -2294,7 +2298,7 @@ static int transcode(AVFormatContext **output_files,
         }
 
         no_packet_count=0;
-        memset(no_packet, 0, sizeof(no_packet));
+        memset(no_packet, 0, nb_input_files);
 
         if (do_pkt_dump) {
             av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
@@ -2337,15 +2341,8 @@ static int transcode(AVFormatContext **output_files,
             }
         }
 
-        /* finish if recording time exhausted */
-        if (recording_time != INT64_MAX &&
-            av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
-            ist->is_past_recording_time = 1;
-            goto discard_packet;
-        }
-
         //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
-        if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
+        if (output_packet(ist, ist_index, output_streams, nb_output_streams, &pkt) < 0) {
 
             if (verbose >= 0)
                 fprintf(stderr, "Error while decoding stream #%d.%d\n",
@@ -2360,31 +2357,32 @@ static int transcode(AVFormatContext **output_files,
         av_free_packet(&pkt);
 
         /* dump report by using the output first video and audio streams */
-        print_report(output_files, ost_table, nb_ostreams, 0);
+        print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
     }
 
     /* at the end of stream, we must flush the decoder buffers */
     for (i = 0; i < nb_input_streams; i++) {
         ist = &input_streams[i];
         if (ist->decoding_needed) {
-            output_packet(ist, i, ost_table, nb_ostreams, NULL);
+            output_packet(ist, i, output_streams, nb_output_streams, NULL);
         }
     }
+    flush_encoders(output_streams, nb_output_streams);
 
     term_exit();
 
     /* write the trailer if needed and close file */
     for(i=0;i<nb_output_files;i++) {
-        os = output_files[i];
+        os = output_files[i].ctx;
         av_write_trailer(os);
     }
 
     /* dump report by using the first video and audio streams */
-    print_report(output_files, ost_table, nb_ostreams, 1);
+    print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
 
     /* close each encoder */
-    for(i=0;i<nb_ostreams;i++) {
-        ost = ost_table[i];
+    for (i = 0; i < nb_output_streams; i++) {
+        ost = &output_streams[i];
         if (ost->encoding_needed) {
             av_freep(&ost->st->codec->stats_in);
             avcodec_close(ost->st->codec);
@@ -2407,10 +2405,11 @@ static int transcode(AVFormatContext **output_files,
 
  fail:
     av_freep(&bit_buffer);
+    av_freep(&no_packet);
 
-    if (ost_table) {
-        for(i=0;i<nb_ostreams;i++) {
-            ost = ost_table[i];
+    if (output_streams) {
+        for (i = 0; i < nb_output_streams; i++) {
+            ost = &output_streams[i];
             if (ost) {
                 if (ost->st->stream_copy)
                     av_freep(&ost->st->codec->extradata);
@@ -2430,10 +2429,8 @@ static int transcode(AVFormatContext **output_files,
                 if (ost->reformat_ctx)
                     av_audio_convert_free(ost->reformat_ctx);
                 av_dict_free(&ost->opts);
-                av_free(ost);
             }
         }
-        av_free(ost_table);
     }
     return ret;
 }
@@ -2777,18 +2774,7 @@ static int opt_map_metadata(const char *opt, const char *arg)
 
 static int opt_input_ts_scale(const char *opt, const char *arg)
 {
-    unsigned int stream;
-    double scale;
-    char *p;
-
-    stream = strtol(arg, &p, 0);
-    if (*p)
-        p++;
-    scale= strtod(p, &p);
-
-    ts_scale = grow_array(ts_scale, sizeof(*ts_scale), &nb_ts_scale, stream + 1);
-    ts_scale[stream] = scale;
-    return 0;
+    return av_dict_set(&ts_scale, opt, arg, 0);
 }
 
 static int opt_recording_time(const char *opt, const char *arg)
@@ -2867,12 +2853,14 @@ static AVCodec *choose_codec(AVFormatContext *s, AVStream *st, enum AVMediaType
  */
 static void add_input_streams(AVFormatContext *ic)
 {
-    int i, rfps, rfps_base;
+    int i, rfps, rfps_base, ret;
 
     for (i = 0; i < ic->nb_streams; i++) {
         AVStream *st = ic->streams[i];
         AVCodecContext *dec = st->codec;
+        AVDictionaryEntry *e = NULL;
         InputStream *ist;
+        char *scale = NULL;
 
         dec->thread_count = thread_count;
 
@@ -2883,8 +2871,16 @@ static void add_input_streams(AVFormatContext *ic)
         ist->discard = 1;
         ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
 
-        if (i < nb_ts_scale)
-            ist->ts_scale = ts_scale[i];
+        while (e = av_dict_get(ts_scale, "", e, AV_DICT_IGNORE_SUFFIX)) {
+            char *p = strchr(e->key, ':');
+
+            if ((ret = check_stream_specifier(ic, st, p ? p + 1 : "")) > 0)
+                scale = e->value;
+            else if (ret < 0)
+                exit_program(1);
+        }
+        if (scale)
+            ist->ts_scale = strtod(scale, NULL);
 
         ist->dec = choose_codec(ic, st, dec->codec_type, codec_names);
 
@@ -3068,8 +3064,7 @@ static int opt_input_file(const char *opt, const char *filename)
     audio_sample_rate = 0;
     audio_channels    = 0;
     audio_sample_fmt  = AV_SAMPLE_FMT_NONE;
-    av_freep(&ts_scale);
-    nb_ts_scale = 0;
+    av_dict_free(&ts_scale);
 
     for (i = 0; i < orig_nb_streams; i++)
         av_dict_free(&opts[i]);
@@ -3114,17 +3109,9 @@ static OutputStream *new_output_stream(AVFormatContext *oc, enum AVMediaType typ
         exit_program(1);
     }
 
-    output_streams_for_file[nb_output_files] =
-        grow_array(output_streams_for_file[nb_output_files],
-                   sizeof(*output_streams_for_file[nb_output_files]),
-                   &nb_output_streams_for_file[nb_output_files],
-                   oc->nb_streams);
-    ost = output_streams_for_file[nb_output_files][idx] =
-        av_mallocz(sizeof(OutputStream));
-    if (!ost) {
-        fprintf(stderr, "Could not alloc output stream\n");
-        exit_program(1);
-    }
+    output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
+                                nb_output_streams + 1);
+    ost = &output_streams[nb_output_streams - 1];
     ost->file_index = nb_output_files;
     ost->index = idx;
     ost->st    = st;
@@ -3187,8 +3174,6 @@ static OutputStream *new_video_stream(AVFormatContext *oc)
         video_enc->pix_fmt = frame_pix_fmt;
         st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
 
-        if (intra_only)
-            video_enc->gop_size = 0;
         if (video_qscale || same_quant) {
             video_enc->flags |= CODEC_FLAG_QSCALE;
             video_enc->global_quality = FF_QP2LAMBDA * video_qscale;
@@ -3383,7 +3368,7 @@ static int opt_streamid(const char *opt, const char *arg)
 static int copy_chapters(int infile, int outfile)
 {
     AVFormatContext *is = input_files[infile].ctx;
-    AVFormatContext *os = output_files[outfile];
+    AVFormatContext *os = output_files[outfile].ctx;
     int i;
 
     for (i = 0; i < is->nb_chapters; i++) {
@@ -3574,8 +3559,14 @@ static void opt_output_file(const char *filename)
     av_dict_copy(&oc->metadata, metadata, 0);
     av_dict_free(&metadata);
 
-    av_dict_copy(&output_opts[nb_output_files], format_opts, 0);
-    output_files[nb_output_files++] = oc;
+
+    output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
+    output_files[nb_output_files - 1].ctx       = oc;
+    output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams;
+    output_files[nb_output_files - 1].recording_time = recording_time;
+    output_files[nb_output_files - 1].start_time     = start_time;
+    output_files[nb_output_files - 1].limit_filesize = limit_filesize;
+    av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0);
 
     /* check filename in case of an image number is expected */
     if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
@@ -3688,9 +3679,9 @@ static void opt_output_file(const char *filename)
         av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
                      AV_DICT_DONT_OVERWRITE);
     if (metadata_streams_autocopy)
-        for (i = 0; i < oc->nb_streams; i++) {
-            InputStream *ist = &input_streams[output_streams_for_file[nb_output_files-1][i]->source_index];
-            av_dict_copy(&oc->streams[i]->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
+        for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
+            InputStream *ist = &input_streams[output_streams[i].source_index];
+            av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
         }
 
     frame_rate    = (AVRational){0, 0};
@@ -3700,6 +3691,9 @@ static void opt_output_file(const char *filename)
     audio_channels    = 0;
     audio_sample_fmt  = AV_SAMPLE_FMT_NONE;
     chapters_input_file = INT_MAX;
+    recording_time = INT64_MAX;
+    start_time     = 0;
+    limit_filesize = UINT64_MAX;
 
     av_freep(&meta_data_maps);
     nb_meta_data_maps = 0;
@@ -3708,6 +3702,8 @@ static void opt_output_file(const char *filename)
     metadata_chapters_autocopy = 1;
     av_freep(&stream_maps);
     nb_stream_maps = 0;
+    av_freep(&streamid_map);
+    nb_streamid_map = 0;
 
     av_dict_free(&codec_names);
 
@@ -4061,7 +4057,7 @@ static const OptionDef options[] = {
     { "fs", HAS_ARG | OPT_INT64, {(void*)&limit_filesize}, "set the limit file size in bytes", "limit_size" }, //
     { "ss", HAS_ARG, {(void*)opt_start_time}, "set the start time offset", "time_off" },
     { "itsoffset", HAS_ARG, {(void*)opt_input_ts_offset}, "set the input ts offset", "time_off" },
-    { "itsscale", HAS_ARG, {(void*)opt_input_ts_scale}, "set the input ts scale", "stream:scale" },
+    { "itsscale", HAS_ARG, {(void*)opt_input_ts_scale}, "set the input ts scale", "scale" },
     { "metadata", HAS_ARG, {(void*)opt_metadata}, "add metadata", "string=string" },
     { "dframes", OPT_INT | HAS_ARG, {(void*)&max_frames[AVMEDIA_TYPE_DATA]}, "set the number of data frames to record", "number" },
     { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
@@ -4101,13 +4097,12 @@ static const OptionDef options[] = {
     { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
     { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" },
     { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "color" },
-    { "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"},
     { "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" },
     { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
     { "qscale", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_qscale}, "use fixed video quantizer scale (VBR)", "q" },
     { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_rc_override_string}, "rate control override for specific intervals", "override" },
     { "vcodec", HAS_ARG | OPT_VIDEO, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
-    { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold",  "threshold" },
+    { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimation threshold",  "threshold" },
     { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
       "use same quantizer as source (implies VBR)" },
     { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },