]> git.sesse.net Git - ffmpeg/blobdiff - libavformat/utils.c
cosmetics: indentation
[ffmpeg] / libavformat / utils.c
index c2beeaee7a1fffc2a8ffe095c0557173e6858361..658da8f951003b02b417cd54d761406bcc84f5f6 100644 (file)
 #include "avio_internal.h"
 #include "internal.h"
 #include "libavcodec/internal.h"
+#include "libavcodec/bytestream.h"
 #include "libavutil/opt.h"
 #include "libavutil/dict.h"
 #include "libavutil/pixdesc.h"
 #include "metadata.h"
 #include "id3v2.h"
+#include "libavutil/avassert.h"
 #include "libavutil/avstring.h"
 #include "libavutil/mathematics.h"
+#include "libavutil/parseutils.h"
 #include "riff.h"
 #include "audiointerleave.h"
 #include "url.h"
@@ -397,93 +400,6 @@ static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeDa
 /************************************************************/
 /* input media file */
 
-#if FF_API_FORMAT_PARAMETERS
-static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
-{
-    char buf[1024];
-    AVDictionary *opts = NULL;
-
-    if (!ap)
-        return NULL;
-
-    if (ap->time_base.num) {
-        snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
-        av_dict_set(&opts, "framerate", buf, 0);
-    }
-    if (ap->sample_rate) {
-        snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
-        av_dict_set(&opts, "sample_rate", buf, 0);
-    }
-    if (ap->channels) {
-        snprintf(buf, sizeof(buf), "%d", ap->channels);
-        av_dict_set(&opts, "channels", buf, 0);
-    }
-    if (ap->width || ap->height) {
-        snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
-        av_dict_set(&opts, "video_size", buf, 0);
-    }
-    if (ap->pix_fmt != PIX_FMT_NONE) {
-        av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
-    }
-    if (ap->channel) {
-        snprintf(buf, sizeof(buf), "%d", ap->channel);
-        av_dict_set(&opts, "channel", buf, 0);
-    }
-    if (ap->standard) {
-        av_dict_set(&opts, "standard", ap->standard, 0);
-    }
-    if (ap->mpeg2ts_compute_pcr) {
-        av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
-    }
-    if (ap->initial_pause) {
-        av_dict_set(&opts, "initial_pause", "1", 0);
-    }
-    return opts;
-}
-
-/**
- * Open a media file from an IO stream. 'fmt' must be specified.
- */
-int av_open_input_stream(AVFormatContext **ic_ptr,
-                         AVIOContext *pb, const char *filename,
-                         AVInputFormat *fmt, AVFormatParameters *ap)
-{
-    int err;
-    AVDictionary *opts;
-    AVFormatContext *ic;
-    AVFormatParameters default_ap;
-
-    if(!ap){
-        ap=&default_ap;
-        memset(ap, 0, sizeof(default_ap));
-    }
-    opts = convert_format_parameters(ap);
-
-    if(!ap->prealloced_context)
-        ic = avformat_alloc_context();
-    else
-        ic = *ic_ptr;
-    if (!ic) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
-    if (pb && fmt && fmt->flags & AVFMT_NOFILE)
-        av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
-                                   "will be ignored with AVFMT_NOFILE format.\n");
-    else
-        ic->pb = pb;
-
-    if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
-        goto fail;
-    ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
-
-fail:
-    *ic_ptr = ic;
-    av_dict_free(&opts);
-    return err;
-}
-#endif
-
 /** size of probe buffer, for guessing file type from file contents */
 #define PROBE_BUF_MIN 2048
 #define PROBE_BUF_MAX (1<<20)
@@ -555,27 +471,8 @@ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
     return ret;
 }
 
-#if FF_API_FORMAT_PARAMETERS
-int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
-                       AVInputFormat *fmt,
-                       int buf_size,
-                       AVFormatParameters *ap)
-{
-    int err;
-    AVDictionary *opts = convert_format_parameters(ap);
-
-    if (!ap || !ap->prealloced_context)
-        *ic_ptr = NULL;
-
-    err = avformat_open_input(ic_ptr, filename, fmt, &opts);
-
-    av_dict_free(&opts);
-    return err;
-}
-#endif
-
 /* open input file and probe the format if necessary */
-static int init_input(AVFormatContext *s, const char *filename)
+static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
 {
     int ret;
     AVProbeData pd = {filename, NULL, 0};
@@ -583,7 +480,7 @@ static int init_input(AVFormatContext *s, const char *filename)
     if (s->pb) {
         s->flags |= AVFMT_FLAG_CUSTOM_IO;
         if (!s->iformat)
-            return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
+            return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
         else if (s->iformat->flags & AVFMT_NOFILE)
             return AVERROR(EINVAL);
         return 0;
@@ -593,19 +490,49 @@ static int init_input(AVFormatContext *s, const char *filename)
         (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
         return 0;
 
-    if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
+    if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
+                          &s->interrupt_callback, options)) < 0)
         return ret;
     if (s->iformat)
         return 0;
-    return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
+    return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
+}
+
+static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
+                               AVPacketList **plast_pktl){
+    AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
+    if (!pktl)
+        return NULL;
+
+    if (*packet_buffer)
+        (*plast_pktl)->next = pktl;
+    else
+        *packet_buffer = pktl;
+
+    /* add the packet in the buffered packet list */
+    *plast_pktl = pktl;
+    pktl->pkt= *pkt;
+    return &pktl->pkt;
+}
+
+static void queue_attached_pictures(AVFormatContext *s)
+{
+    int i;
+    for (i = 0; i < s->nb_streams; i++)
+        if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
+            s->streams[i]->discard < AVDISCARD_ALL) {
+            AVPacket copy = s->streams[i]->attached_pic;
+            copy.destruct = NULL;
+            add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
+        }
 }
 
 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
 {
     AVFormatContext *s = *ps;
     int ret = 0;
-    AVFormatParameters ap = { { 0 } };
     AVDictionary *tmp = NULL;
+    ID3v2ExtraMeta *id3v2_extra_meta = NULL;
 
     if (!s && !(s = avformat_alloc_context()))
         return AVERROR(ENOMEM);
@@ -618,7 +545,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
     if ((ret = av_opt_set_dict(s, &tmp)) < 0)
         goto fail;
 
-    if ((ret = init_input(s, filename)) < 0)
+    if ((ret = init_input(s, filename, &tmp)) < 0)
         goto fail;
 
     /* check filename in case an image number is expected */
@@ -648,12 +575,19 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
 
     /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
     if (s->pb)
-        ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
+        ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
 
     if (s->iformat->read_header)
-        if ((ret = s->iformat->read_header(s, &ap)) < 0)
+        if ((ret = s->iformat->read_header(s)) < 0)
             goto fail;
 
+    if (id3v2_extra_meta &&
+        (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
+        goto fail;
+    ff_id3v2_free_extra_meta(&id3v2_extra_meta);
+
+    queue_attached_pictures(s);
+
     if (s->pb && !s->data_offset)
         s->data_offset = avio_tell(s->pb);
 
@@ -667,6 +601,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
     return 0;
 
 fail:
+    ff_id3v2_free_extra_meta(&id3v2_extra_meta);
     av_dict_free(&tmp);
     if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
         avio_close(s->pb);
@@ -677,24 +612,7 @@ fail:
 
 /*******************************************************/
 
-static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
-                               AVPacketList **plast_pktl){
-    AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
-    if (!pktl)
-        return NULL;
-
-    if (*packet_buffer)
-        (*plast_pktl)->next = pktl;
-    else
-        *packet_buffer = pktl;
-
-    /* add the packet in the buffered packet list */
-    *plast_pktl = pktl;
-    pktl->pkt= *pkt;
-    return &pktl->pkt;
-}
-
-int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
 {
     int ret, i;
     AVStream *st;
@@ -768,7 +686,7 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt)
             memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
 
             if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
-                //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
+                //FIXME we do not reduce score to 0 for the case of running out of buffer space in bytes
                 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
                 if(st->codec->codec_id != CODEC_ID_PROBE){
                     pd->buf_size=0;
@@ -780,35 +698,35 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt)
     }
 }
 
+#if FF_API_READ_PACKET
+int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+    return ff_read_packet(s, pkt);
+}
+#endif
+
+
 /**********************************************************/
 
 /**
  * Get the number of samples of an audio frame. Return -1 on error.
  */
-static int get_audio_frame_size(AVCodecContext *enc, int size)
+static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
 {
     int frame_size;
 
-    if(enc->codec_id == CODEC_ID_VORBIS)
-        return -1;
+    /* give frame_size priority if demuxing */
+    if (!mux && enc->frame_size > 1)
+        return enc->frame_size;
 
-    if (enc->frame_size <= 1) {
-        int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+    if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
+        return frame_size;
 
-        if (bits_per_sample) {
-            if (enc->channels == 0)
-                return -1;
-            frame_size = (size << 3) / (bits_per_sample * enc->channels);
-        } else {
-            /* used for example by ADPCM codecs */
-            if (enc->bit_rate == 0)
-                return -1;
-            frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
-        }
-    } else {
-        frame_size = enc->frame_size;
-    }
-    return frame_size;
+    /* fallback to using frame_size if muxing */
+    if (enc->frame_size > 1)
+        return enc->frame_size;
+
+    return -1;
 }
 
 
@@ -824,7 +742,10 @@ static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
     *pden = 0;
     switch(st->codec->codec_type) {
     case AVMEDIA_TYPE_VIDEO:
-        if(st->time_base.num*1000LL > st->time_base.den){
+        if (st->r_frame_rate.num) {
+            *pnum = st->r_frame_rate.den;
+            *pden = st->r_frame_rate.num;
+        } else if(st->time_base.num*1000LL > st->time_base.den) {
             *pnum = st->time_base.num;
             *pden = st->time_base.den;
         }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
@@ -841,7 +762,7 @@ static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
         }
         break;
     case AVMEDIA_TYPE_AUDIO:
-        frame_size = get_audio_frame_size(st->codec, pkt->size);
+        frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
         if (frame_size <= 0 || st->codec->sample_rate <= 0)
             break;
         *pnum = frame_size;
@@ -906,7 +827,8 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
         st->start_time = pts;
 }
 
-static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
+static void update_initial_durations(AVFormatContext *s, AVStream *st,
+                                     int stream_index, int duration)
 {
     AVPacketList *pktl= s->packet_buffer;
     int64_t cur_dts= 0;
@@ -914,10 +836,10 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket
     if(st->first_dts != AV_NOPTS_VALUE){
         cur_dts= st->first_dts;
         for(; pktl; pktl= pktl->next){
-            if(pktl->pkt.stream_index == pkt->stream_index){
+            if(pktl->pkt.stream_index == stream_index){
                 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
                     break;
-                cur_dts -= pkt->duration;
+                cur_dts -= duration;
             }
         }
         pktl= s->packet_buffer;
@@ -926,15 +848,16 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket
         return;
 
     for(; pktl; pktl= pktl->next){
-        if(pktl->pkt.stream_index != pkt->stream_index)
+        if(pktl->pkt.stream_index != stream_index)
             continue;
         if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
            && !pktl->pkt.duration){
             pktl->pkt.dts= cur_dts;
             if(!st->codec->has_b_frames)
                 pktl->pkt.pts= cur_dts;
-            cur_dts += pkt->duration;
-            pktl->pkt.duration= pkt->duration;
+            cur_dts += duration;
+            if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+                pktl->pkt.duration = duration;
         }else
             break;
     }
@@ -954,19 +877,10 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
     if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
         pkt->dts= AV_NOPTS_VALUE;
 
-    if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
-        //FIXME Set low_delay = 0 when has_b_frames = 1
-        st->codec->has_b_frames = 1;
-
     /* do we have a video B-frame ? */
     delay= st->codec->has_b_frames;
     presentation_delayed = 0;
 
-    // ignore delay caused by frame threading so that the mpeg2-without-dts
-    // warning will not trigger
-    if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
-        delay -= st->codec->thread_count-1;
-
     /* XXX: need has_b_frame, but cannot get it if the codec is
         not initialized */
     if (delay &&
@@ -986,13 +900,13 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
         pkt->dts= pkt->pts= AV_NOPTS_VALUE;
     }
 
-    if (pkt->duration == 0) {
+    if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
         compute_frame_duration(&num, &den, st, pc, pkt);
         if (den && num) {
             pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
 
             if(pkt->duration != 0 && s->packet_buffer)
-                update_initial_durations(s, st, pkt);
+                update_initial_durations(s, st, pkt->stream_index, pkt->duration);
         }
     }
 
@@ -1053,25 +967,37 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
             st->last_IP_pts= pkt->pts;
             /* cannot compute PTS if not present (we can compute it only
             by knowing the future */
-        } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
-            if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
-                int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
-                int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
-                if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
-                    pkt->pts += pkt->duration;
-    //                av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
+        } else if (pkt->pts != AV_NOPTS_VALUE ||
+                   pkt->dts != AV_NOPTS_VALUE ||
+                   pkt->duration              ||
+                   st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+            int duration = pkt->duration;
+            if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+                compute_frame_duration(&num, &den, st, pc, pkt);
+                if (den && num) {
+                    duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
+                                                 den * (int64_t)st->time_base.num,
+                                                 AV_ROUND_DOWN);
+                    if (duration != 0 && s->packet_buffer) {
+                        update_initial_durations(s, st, pkt->stream_index,
+                                                 duration);
+                    }
                 }
             }
 
-            /* presentation is not delayed : PTS and DTS are the same */
-            if(pkt->pts == AV_NOPTS_VALUE)
-                pkt->pts = pkt->dts;
-            update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
-            if(pkt->pts == AV_NOPTS_VALUE)
-                pkt->pts = st->cur_dts;
-            pkt->dts = pkt->pts;
-            if(pkt->pts != AV_NOPTS_VALUE)
-                st->cur_dts = pkt->pts + pkt->duration;
+            if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
+                duration) {
+                /* presentation is not delayed : PTS and DTS are the same */
+                if (pkt->pts == AV_NOPTS_VALUE)
+                    pkt->pts = pkt->dts;
+                update_initial_timestamps(s, pkt->stream_index, pkt->pts,
+                                          pkt->pts);
+                if (pkt->pts == AV_NOPTS_VALUE)
+                    pkt->pts = st->cur_dts;
+                pkt->dts = pkt->pts;
+                if (pkt->pts != AV_NOPTS_VALUE)
+                    st->cur_dts = pkt->pts + duration;
+            }
         }
     }
 
@@ -1081,7 +1007,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
             FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
         if(pkt->dts == AV_NOPTS_VALUE)
             pkt->dts= st->pts_buffer[0];
-        if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
+        if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
             update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
         }
         if(pkt->dts > st->cur_dts)
@@ -1093,148 +1019,215 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
     /* update flags */
     if(is_intra_only(st->codec))
         pkt->flags |= AV_PKT_FLAG_KEY;
-    else if (pc) {
-        pkt->flags = 0;
-        /* keyframe computation */
-        if (pc->key_frame == 1)
-            pkt->flags |= AV_PKT_FLAG_KEY;
-        else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
-            pkt->flags |= AV_PKT_FLAG_KEY;
-    }
     if (pc)
         pkt->convergence_duration = pc->convergence_duration;
 }
 
+static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
+{
+    while (*pkt_buf) {
+        AVPacketList *pktl = *pkt_buf;
+        *pkt_buf = pktl->next;
+        av_free_packet(&pktl->pkt);
+        av_freep(&pktl);
+    }
+    *pkt_buf_end = NULL;
+}
 
-static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+/**
+ * Parse a packet, add all split parts to parse_queue
+ *
+ * @param pkt packet to parse, NULL when flushing the parser at end of stream
+ */
+static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
 {
-    AVStream *st;
-    int len, ret, i;
+    AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
+    AVStream     *st = s->streams[stream_index];
+    uint8_t    *data = pkt ? pkt->data : NULL;
+    int         size = pkt ? pkt->size : 0;
+    int ret = 0, got_output = 0;
 
-    av_init_packet(pkt);
+    if (!pkt) {
+        av_init_packet(&flush_pkt);
+        pkt = &flush_pkt;
+        got_output = 1;
+    }
 
-    for(;;) {
-        /* select current input stream component */
-        st = s->cur_st;
-        if (st) {
-            if (!st->need_parsing || !st->parser) {
-                /* no parsing needed: we just output the packet as is */
-                /* raw data support */
-                *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
-                compute_pkt_fields(s, st, NULL, pkt);
-                s->cur_st = NULL;
-                if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
-                    (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
-                    ff_reduce_index(s, st->index);
-                    av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
-                }
-                break;
-            } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
-                len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
-                                       st->cur_ptr, st->cur_len,
-                                       st->cur_pkt.pts, st->cur_pkt.dts,
-                                       st->cur_pkt.pos);
-                st->cur_pkt.pts = AV_NOPTS_VALUE;
-                st->cur_pkt.dts = AV_NOPTS_VALUE;
-                /* increment read pointer */
-                st->cur_ptr += len;
-                st->cur_len -= len;
-
-                /* return packet if any */
-                if (pkt->size) {
-                got_packet:
-                    pkt->duration = 0;
-                    pkt->stream_index = st->index;
-                    pkt->pts = st->parser->pts;
-                    pkt->dts = st->parser->dts;
-                    pkt->pos = st->parser->pos;
-                    if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
-                        s->cur_st = NULL;
-                        pkt->destruct= st->cur_pkt.destruct;
-                        st->cur_pkt.destruct= NULL;
-                        st->cur_pkt.data    = NULL;
-                        assert(st->cur_len == 0);
-                    }else{
-                        pkt->destruct = NULL;
-                    }
-                    compute_pkt_fields(s, st, st->parser, pkt);
+    while (size > 0 || (pkt == &flush_pkt && got_output)) {
+        int len;
 
-                    if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
-                        ff_reduce_index(s, st->index);
-                        av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
-                                           0, 0, AVINDEX_KEYFRAME);
-                    }
+        av_init_packet(&out_pkt);
+        len = av_parser_parse2(st->parser,  st->codec,
+                               &out_pkt.data, &out_pkt.size, data, size,
+                               pkt->pts, pkt->dts, pkt->pos);
 
-                    break;
-                }
-            } else {
-                /* free packet */
-                av_free_packet(&st->cur_pkt);
-                s->cur_st = NULL;
+        pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+        /* increment read pointer */
+        data += len;
+        size -= len;
+
+        got_output = !!out_pkt.size;
+
+        if (!out_pkt.size)
+            continue;
+
+        /* set the duration */
+        out_pkt.duration = 0;
+        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+            if (st->codec->sample_rate > 0) {
+                out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+                                                    (AVRational){ 1, st->codec->sample_rate },
+                                                    st->time_base,
+                                                    AV_ROUND_DOWN);
             }
-        } else {
-            AVPacket cur_pkt;
-            /* read next packet */
-            ret = av_read_packet(s, &cur_pkt);
-            if (ret < 0) {
-                if (ret == AVERROR(EAGAIN))
-                    return ret;
-                /* return the last frames, if any */
-                for(i = 0; i < s->nb_streams; i++) {
-                    st = s->streams[i];
-                    if (st->parser && st->need_parsing) {
-                        av_parser_parse2(st->parser, st->codec,
-                                        &pkt->data, &pkt->size,
-                                        NULL, 0,
-                                        AV_NOPTS_VALUE, AV_NOPTS_VALUE,
-                                        AV_NOPTS_VALUE);
-                        if (pkt->size)
-                            goto got_packet;
-                    }
-                }
-                /* no more packets: really terminate parsing */
+        } else if (st->codec->time_base.num != 0 &&
+                   st->codec->time_base.den != 0) {
+            out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+                                                st->codec->time_base,
+                                                st->time_base,
+                                                AV_ROUND_DOWN);
+        }
+
+        out_pkt.stream_index = st->index;
+        out_pkt.pts = st->parser->pts;
+        out_pkt.dts = st->parser->dts;
+        out_pkt.pos = st->parser->pos;
+
+        if (st->parser->key_frame == 1 ||
+            (st->parser->key_frame == -1 &&
+             st->parser->pict_type == AV_PICTURE_TYPE_I))
+            out_pkt.flags |= AV_PKT_FLAG_KEY;
+
+        compute_pkt_fields(s, st, st->parser, &out_pkt);
+
+        if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+            out_pkt.flags & AV_PKT_FLAG_KEY) {
+            ff_reduce_index(s, st->index);
+            av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
+                               0, 0, AVINDEX_KEYFRAME);
+        }
+
+        if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
+            out_pkt.destruct = pkt->destruct;
+            pkt->destruct = NULL;
+        }
+        if ((ret = av_dup_packet(&out_pkt)) < 0)
+            goto fail;
+
+        if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
+            av_free_packet(&out_pkt);
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+
+
+    /* end of the stream => close and free the parser */
+    if (pkt == &flush_pkt) {
+        av_parser_close(st->parser);
+        st->parser = NULL;
+    }
+
+fail:
+    av_free_packet(pkt);
+    return ret;
+}
+
+static int read_from_packet_buffer(AVPacketList **pkt_buffer,
+                                   AVPacketList **pkt_buffer_end,
+                                   AVPacket      *pkt)
+{
+    AVPacketList *pktl;
+    av_assert0(*pkt_buffer);
+    pktl = *pkt_buffer;
+    *pkt = pktl->pkt;
+    *pkt_buffer = pktl->next;
+    if (!pktl->next)
+        *pkt_buffer_end = NULL;
+    av_freep(&pktl);
+    return 0;
+}
+
+static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+{
+    int ret = 0, i, got_packet = 0;
+
+    av_init_packet(pkt);
+
+    while (!got_packet && !s->parse_queue) {
+        AVStream *st;
+        AVPacket cur_pkt;
+
+        /* read next packet */
+        ret = ff_read_packet(s, &cur_pkt);
+        if (ret < 0) {
+            if (ret == AVERROR(EAGAIN))
                 return ret;
+            /* flush the parsers */
+            for(i = 0; i < s->nb_streams; i++) {
+                st = s->streams[i];
+                if (st->parser && st->need_parsing)
+                    parse_packet(s, NULL, st->index);
             }
-            st = s->streams[cur_pkt.stream_index];
-            st->cur_pkt= cur_pkt;
-
-            if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
-               st->cur_pkt.dts != AV_NOPTS_VALUE &&
-               st->cur_pkt.pts < st->cur_pkt.dts){
-                av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
-                    st->cur_pkt.stream_index,
-                    st->cur_pkt.pts,
-                    st->cur_pkt.dts,
-                    st->cur_pkt.size);
-//                av_free_packet(&st->cur_pkt);
-//                return -1;
+            /* all remaining packets are now in parse_queue =>
+             * really terminate parsing */
+            break;
+        }
+        ret = 0;
+        st  = s->streams[cur_pkt.stream_index];
+
+        if (cur_pkt.pts != AV_NOPTS_VALUE &&
+            cur_pkt.dts != AV_NOPTS_VALUE &&
+            cur_pkt.pts < cur_pkt.dts) {
+            av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+                   cur_pkt.stream_index,
+                   cur_pkt.pts,
+                   cur_pkt.dts,
+                   cur_pkt.size);
+        }
+        if (s->debug & FF_FDEBUG_TS)
+            av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
+                   cur_pkt.stream_index,
+                   cur_pkt.pts,
+                   cur_pkt.dts,
+                   cur_pkt.size,
+                   cur_pkt.duration,
+                   cur_pkt.flags);
+
+        if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
+            st->parser = av_parser_init(st->codec->codec_id);
+            if (!st->parser) {
+                /* no parser available: just output the raw packets */
+                st->need_parsing = AVSTREAM_PARSE_NONE;
+            } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
+                st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+            } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
+                st->parser->flags |= PARSER_FLAG_ONCE;
             }
+        }
 
-            if(s->debug & FF_FDEBUG_TS)
-                av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
-                    st->cur_pkt.stream_index,
-                    st->cur_pkt.pts,
-                    st->cur_pkt.dts,
-                    st->cur_pkt.size,
-                    st->cur_pkt.duration,
-                    st->cur_pkt.flags);
-
-            s->cur_st = st;
-            st->cur_ptr = st->cur_pkt.data;
-            st->cur_len = st->cur_pkt.size;
-            if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
-                st->parser = av_parser_init(st->codec->codec_id);
-                if (!st->parser) {
-                    /* no parser available: just output the raw packets */
-                    st->need_parsing = AVSTREAM_PARSE_NONE;
-                }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
-                    st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
-                }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
-                    st->parser->flags |= PARSER_FLAG_ONCE;
-                }
+        if (!st->need_parsing || !st->parser) {
+            /* no parsing needed: we just output the packet as is */
+            *pkt = cur_pkt;
+            compute_pkt_fields(s, st, NULL, pkt);
+            if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+                (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+                ff_reduce_index(s, st->index);
+                av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
             }
+            got_packet = 1;
+        } else if (st->discard < AVDISCARD_ALL) {
+            if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
+                return ret;
+        } else {
+            /* free packet */
+            av_free_packet(&cur_pkt);
         }
     }
+
+    if (!got_packet && s->parse_queue)
+        ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
+
     if(s->debug & FF_FDEBUG_TS)
         av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
             pkt->stream_index,
@@ -1244,84 +1237,69 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
             pkt->duration,
             pkt->flags);
 
-    return 0;
+    return ret;
 }
 
 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
 {
-    AVPacketList *pktl;
-    int eof=0;
-    const int genpts= s->flags & AVFMT_FLAG_GENPTS;
+    const int genpts = s->flags & AVFMT_FLAG_GENPTS;
+    int          eof = 0;
+
+    if (!genpts)
+        return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
+                                                          &s->packet_buffer_end,
+                                                          pkt) :
+                                  read_frame_internal(s, pkt);
+
+    for (;;) {
+        int ret;
+        AVPacketList *pktl = s->packet_buffer;
 
-    for(;;){
-        pktl = s->packet_buffer;
         if (pktl) {
-            AVPacket *next_pkt= &pktl->pkt;
+            AVPacket *next_pkt = &pktl->pkt;
 
-            if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
+            if (next_pkt->dts != AV_NOPTS_VALUE) {
                 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
-                while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
-                    if(   pktl->pkt.stream_index == next_pkt->stream_index
-                       && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
-                       && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
-                        next_pkt->pts= pktl->pkt.dts;
+                while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
+                    if (pktl->pkt.stream_index == next_pkt->stream_index &&
+                        (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
+                         av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
+                        next_pkt->pts = pktl->pkt.dts;
                     }
-                    pktl= pktl->next;
+                    pktl = pktl->next;
                 }
                 pktl = s->packet_buffer;
             }
 
-            if(   next_pkt->pts != AV_NOPTS_VALUE
-               || next_pkt->dts == AV_NOPTS_VALUE
-               || !genpts || eof){
-                /* read packet from packet buffer, if there is data */
-                *pkt = *next_pkt;
-                s->packet_buffer = pktl->next;
-                av_free(pktl);
-                return 0;
-            }
+            /* read packet from packet buffer, if there is data */
+            if (!(next_pkt->pts == AV_NOPTS_VALUE &&
+                  next_pkt->dts != AV_NOPTS_VALUE && !eof))
+                return read_from_packet_buffer(&s->packet_buffer,
+                                               &s->packet_buffer_end, pkt);
         }
-        if(genpts){
-            int ret= read_frame_internal(s, pkt);
-            if(ret<0){
-                if(pktl && ret != AVERROR(EAGAIN)){
-                    eof=1;
-                    continue;
-                }else
-                    return ret;
-            }
 
-            if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
-                                           &s->packet_buffer_end)) < 0)
-                return AVERROR(ENOMEM);
-        }else{
-            assert(!s->packet_buffer);
-            return read_frame_internal(s, pkt);
+        ret = read_frame_internal(s, pkt);
+        if (ret < 0) {
+            if (pktl && ret != AVERROR(EAGAIN)) {
+                eof = 1;
+                continue;
+            } else
+                return ret;
         }
+
+        if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
+                          &s->packet_buffer_end)) < 0)
+            return AVERROR(ENOMEM);
     }
 }
 
 /* XXX: suppress the packet queue */
 static void flush_packet_queue(AVFormatContext *s)
 {
-    AVPacketList *pktl;
+    free_packet_buffer(&s->parse_queue,       &s->parse_queue_end);
+    free_packet_buffer(&s->packet_buffer,     &s->packet_buffer_end);
+    free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
 
-    for(;;) {
-        pktl = s->packet_buffer;
-        if (!pktl)
-            break;
-        s->packet_buffer = pktl->next;
-        av_free_packet(&pktl->pkt);
-        av_free(pktl);
-    }
-    while(s->raw_packet_buffer){
-        pktl = s->raw_packet_buffer;
-        s->raw_packet_buffer = pktl->next;
-        av_free_packet(&pktl->pkt);
-        av_free(pktl);
-    }
-    s->packet_buffer_end=
-    s->raw_packet_buffer_end= NULL;
     s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
 }
 
@@ -1338,7 +1316,8 @@ int av_find_default_stream_index(AVFormatContext *s)
         return -1;
     for(i = 0; i < s->nb_streams; i++) {
         st = s->streams[i];
-        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+            !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
             return i;
         }
         if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
@@ -1357,8 +1336,6 @@ void ff_read_frame_flush(AVFormatContext *s)
 
     flush_packet_queue(s);
 
-    s->cur_st = NULL;
-
     /* for each stream, reset read state */
     for(i = 0; i < s->nb_streams; i++) {
         st = s->streams[i];
@@ -1366,14 +1343,10 @@ void ff_read_frame_flush(AVFormatContext *s)
         if (st->parser) {
             av_parser_close(st->parser);
             st->parser = NULL;
-            av_free_packet(&st->cur_pkt);
         }
         st->last_IP_pts = AV_NOPTS_VALUE;
         st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
         st->reference_dts = AV_NOPTS_VALUE;
-        /* fail safe */
-        st->cur_ptr = NULL;
-        st->cur_len = 0;
 
         st->probe_packets = MAX_PROBE_PACKETS;
 
@@ -1382,13 +1355,6 @@ void ff_read_frame_flush(AVFormatContext *s)
     }
 }
 
-#if FF_API_SEEK_PUBLIC
-void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
-{
-    ff_update_cur_dts(s, ref_st, timestamp);
-}
-#endif
-
 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
 {
     int i;
@@ -1510,12 +1476,6 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
                                      wanted_timestamp, flags);
 }
 
-#if FF_API_SEEK_PUBLIC
-int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
-    return ff_seek_frame_binary(s, stream_index, target_ts, flags);
-}
-#endif
-
 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
 {
     AVInputFormat *avif= s->iformat;
@@ -1577,18 +1537,6 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
     return 0;
 }
 
-#if FF_API_SEEK_PUBLIC
-int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
-                      int64_t pos_min, int64_t pos_max, int64_t pos_limit,
-                      int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
-                      int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
-{
-    return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
-                         pos_limit, ts_min, ts_max, flags, ts_ret,
-                         read_timestamp);
-}
-#endif
-
 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
                       int64_t pos_min, int64_t pos_max, int64_t pos_limit,
                       int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
@@ -1701,14 +1649,6 @@ int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
 
 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
     int64_t pos_min, pos_max;
-#if 0
-    AVStream *st;
-
-    if (stream_index < 0)
-        return -1;
-
-    st= s->streams[stream_index];
-#endif
 
     pos_min = s->data_offset;
     pos_max = avio_size(s->pb) - 1;
@@ -1718,9 +1658,6 @@ static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, in
 
     avio_seek(s->pb, pos, SEEK_SET);
 
-#if 0
-    av_update_cur_dts(s, st, ts);
-#endif
     return 0;
 }
 
@@ -1783,7 +1720,8 @@ static int seek_frame_generic(AVFormatContext *s,
     return 0;
 }
 
-int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+static int seek_frame_internal(AVFormatContext *s, int stream_index,
+                               int64_t timestamp, int flags)
 {
     int ret;
     AVStream *st;
@@ -1826,14 +1764,29 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
         return -1;
 }
 
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+    int ret = seek_frame_internal(s, stream_index, timestamp, flags);
+
+    if (ret >= 0)
+        queue_attached_pictures(s);
+
+    return ret;
+}
+
 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
 {
     if(min_ts > ts || max_ts < ts)
         return -1;
 
     if (s->iformat->read_seek2) {
+        int ret;
         ff_read_frame_flush(s);
-        return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+        ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+        if (ret >= 0)
+            queue_attached_pictures(s);
+        return ret;
     }
 
     if(s->iformat->read_timestamp){
@@ -1865,6 +1818,8 @@ static int has_duration(AVFormatContext *ic)
         if (st->duration != AV_NOPTS_VALUE)
             return 1;
     }
+    if (ic->duration != AV_NOPTS_VALUE)
+        return 1;
     return 0;
 }
 
@@ -1976,8 +1931,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
     int64_t filesize, offset, duration;
     int retry=0;
 
-    ic->cur_st = NULL;
-
     /* flush packet queue */
     flush_packet_queue(ic);
 
@@ -1989,7 +1942,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
         if (st->parser) {
             av_parser_close(st->parser);
             st->parser= NULL;
-            av_free_packet(&st->cur_pkt);
         }
     }
 
@@ -2009,7 +1961,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
                 break;
 
             do {
-                ret = av_read_packet(ic, pkt);
+                ret = ff_read_packet(ic, pkt);
             } while(ret == AVERROR(EAGAIN));
             if (ret != 0)
                 break;
@@ -2091,23 +2043,20 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
     }
 }
 
-static int has_codec_parameters(AVCodecContext *avctx)
+static int has_codec_parameters(AVStream *st)
 {
+    AVCodecContext *avctx = st->codec;
     int val;
     switch (avctx->codec_type) {
     case AVMEDIA_TYPE_AUDIO:
-        val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
-        if (!avctx->frame_size &&
-            (avctx->codec_id == CODEC_ID_VORBIS ||
-             avctx->codec_id == CODEC_ID_AAC ||
-             avctx->codec_id == CODEC_ID_MP1 ||
-             avctx->codec_id == CODEC_ID_MP2 ||
-             avctx->codec_id == CODEC_ID_MP3 ||
-             avctx->codec_id == CODEC_ID_CELT))
+        val = avctx->sample_rate && avctx->channels;
+        if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
             return 0;
         break;
     case AVMEDIA_TYPE_VIDEO:
-        val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
+        val = avctx->width;
+        if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
+            return 0;
         break;
     default:
         val = 1;
@@ -2122,46 +2071,68 @@ static int has_decode_delay_been_guessed(AVStream *st)
         st->info->nb_decoded_frames >= 6;
 }
 
+/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
 {
-    int16_t *samples;
     AVCodec *codec;
-    int got_picture, data_size, ret=0;
+    int got_picture = 1, ret = 0;
     AVFrame picture;
+    AVPacket pkt = *avpkt;
+
+    if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
+        AVDictionary *thread_opt = NULL;
+
+        codec = st->codec->codec ? st->codec->codec :
+                                   avcodec_find_decoder(st->codec->codec_id);
 
-    if(!st->codec->codec){
-        codec = avcodec_find_decoder(st->codec->codec_id);
-        if (!codec)
+        if (!codec) {
+            st->info->found_decoder = -1;
             return -1;
-        ret = avcodec_open2(st->codec, codec, options);
-        if (ret < 0)
+        }
+
+        /* force thread count to 1 since the h264 decoder will not extract SPS
+         *  and PPS to extradata during multi-threaded decoding */
+        av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
+        ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
+        if (!options)
+            av_dict_free(&thread_opt);
+        if (ret < 0) {
+            st->info->found_decoder = -1;
             return ret;
-    }
+        }
+        st->info->found_decoder = 1;
+    } else if (!st->info->found_decoder)
+        st->info->found_decoder = 1;
 
-    if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
-       (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
+    if (st->info->found_decoder < 0)
+        return -1;
+
+    while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
+           ret >= 0 &&
+           (!has_codec_parameters(st)         ||
+           !has_decode_delay_been_guessed(st) ||
+           (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
+        got_picture = 0;
+        avcodec_get_frame_defaults(&picture);
         switch(st->codec->codec_type) {
         case AVMEDIA_TYPE_VIDEO:
-            avcodec_get_frame_defaults(&picture);
             ret = avcodec_decode_video2(st->codec, &picture,
-                                        &got_picture, avpkt);
-            if (got_picture)
-                st->info->nb_decoded_frames++;
+                                        &got_picture, &pkt);
             break;
         case AVMEDIA_TYPE_AUDIO:
-            data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
-            samples = av_malloc(data_size);
-            if (!samples)
-                goto fail;
-            ret = avcodec_decode_audio3(st->codec, samples,
-                                        &data_size, avpkt);
-            av_free(samples);
+            ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
             break;
         default:
             break;
         }
+        if (ret >= 0) {
+            if (got_picture)
+                st->info->nb_decoded_frames++;
+            pkt.data += ret;
+            pkt.size -= ret;
+            ret       = got_picture;
+        }
     }
- fail:
     return ret;
 }
 
@@ -2255,13 +2226,6 @@ static int tb_unreliable(AVCodecContext *c){
     return 0;
 }
 
-#if FF_API_FORMAT_PARAMETERS
-int av_find_stream_info(AVFormatContext *ic)
-{
-    return avformat_find_stream_info(ic, NULL);
-}
-#endif
-
 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
 {
     int i, count, ret, read_size, j;
@@ -2272,15 +2236,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
 
     for(i=0;i<ic->nb_streams;i++) {
         AVCodec *codec;
+        AVDictionary *thread_opt = NULL;
         st = ic->streams[i];
 
-        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
-            st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-/*            if(!st->time_base.num)
-                st->time_base= */
-            if(!st->codec->time_base.num)
-                st->codec->time_base= st->time_base;
-        }
         //only for the split stuff
         if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
             st->parser = av_parser_init(st->codec->codec_id);
@@ -2288,19 +2246,27 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
                 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
             }
         }
-        assert(!st->codec->codec);
-        codec = avcodec_find_decoder(st->codec->codec_id);
+        codec = st->codec->codec ? st->codec->codec :
+                                   avcodec_find_decoder(st->codec->codec_id);
+
+        /* force thread count to 1 since the h264 decoder will not extract SPS
+         *  and PPS to extradata during multi-threaded decoding */
+        av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
 
         /* Ensure that subtitle_header is properly set. */
         if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
             && codec && !st->codec->codec)
-            avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
+            avcodec_open2(st->codec, codec, options ? &options[i]
+                              : &thread_opt);
 
         //try to just open decoders, in case this is enough to get parameters
-        if(!has_codec_parameters(st->codec)){
+        if (!has_codec_parameters(st)) {
             if (codec && !st->codec->codec)
-                avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
+                avcodec_open2(st->codec, codec, options ? &options[i]
+                              : &thread_opt);
         }
+        if (!options)
+            av_dict_free(&thread_opt);
     }
 
     for (i=0; i<ic->nb_streams; i++) {
@@ -2310,7 +2276,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
     count = 0;
     read_size = 0;
     for(;;) {
-        if(url_interrupt_cb()){
+        if (ff_check_interrupt(&ic->interrupt_callback)){
             ret= AVERROR_EXIT;
             av_log(ic, AV_LOG_DEBUG, "interrupted\n");
             break;
@@ -2321,7 +2287,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
             int fps_analyze_framecount = 20;
 
             st = ic->streams[i];
-            if (!has_codec_parameters(st->codec))
+            if (!has_codec_parameters(st))
                 break;
             /* if the timebase is coarse (like the usual millisecond precision
                of mkv), we need to analyze more frames to reliably arrive at
@@ -2337,7 +2303,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
                 break;
             if(st->parser && st->parser->parser->split && !st->codec->extradata)
                 break;
-            if(st->first_dts == AV_NOPTS_VALUE)
+            if (st->first_dts == AV_NOPTS_VALUE &&
+                (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
+                 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
                 break;
         }
         if (i == ic->nb_streams) {
@@ -2365,14 +2333,32 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
             continue;
 
         if (ret < 0) {
-            /* EOF or error */
+            /* EOF or error*/
+            AVPacket empty_pkt = { 0 };
+            int err = 0;
+            av_init_packet(&empty_pkt);
+
             ret = -1; /* we could not have all the codec parameters before EOF */
             for(i=0;i<ic->nb_streams;i++) {
                 st = ic->streams[i];
-                if (!has_codec_parameters(st->codec)){
+
+                /* flush the decoders */
+                if (st->info->found_decoder == 1) {
+                    do {
+                        err = try_decode_frame(st, &empty_pkt,
+                                               (options && i < orig_nb_streams) ?
+                                               &options[i] : NULL);
+                    } while (err > 0 && !has_codec_parameters(st));
+                }
+
+                if (err < 0) {
+                    av_log(ic, AV_LOG_WARNING,
+                           "decoding for stream %d failed\n", st->index);
+                } else if (!has_codec_parameters(st)) {
                     char buf[256];
                     avcodec_string(buf, sizeof(buf), st->codec, 0);
-                    av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
+                    av_log(ic, AV_LOG_WARNING,
+                           "Could not find codec parameters (%s)\n", buf);
                 } else {
                     ret = 0;
                 }
@@ -2388,7 +2374,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
 
         st = ic->streams[pkt->stream_index];
         if (st->codec_info_nb_frames>1) {
-            if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
+            if (av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
                 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
                 break;
             }
@@ -2408,7 +2394,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
                 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
                     int framerate= get_std_framerate(i);
                     int ticks= lrintf(dur*framerate/(1001*12));
-                    double error= dur - ticks*1001*12/(double)framerate;
+                    double error = dur - (double)ticks*1001*12 / framerate;
                     st->info->duration_error[i] += error*error;
                 }
                 st->info->duration_count++;
@@ -2421,9 +2407,11 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
         }
         if(st->parser && st->parser->parser->split && !st->codec->extradata){
             int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
-            if(i){
+            if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
                 st->codec->extradata_size= i;
                 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+                if (!st->codec->extradata)
+                    return AVERROR(ENOMEM);
                 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
                 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
             }
@@ -2438,7 +2426,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
            least one frame of codec data, this makes sure the codec initializes
            the channel configuration and does not only trust the values from the container.
         */
-        try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
+        try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
 
         st->codec_info_nb_frames++;
         count++;
@@ -2447,16 +2435,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
     // close codecs which were opened in try_decode_frame()
     for(i=0;i<ic->nb_streams;i++) {
         st = ic->streams[i];
-        if(st->codec->codec)
-            avcodec_close(st->codec);
+        avcodec_close(st->codec);
     }
     for(i=0;i<ic->nb_streams;i++) {
         st = ic->streams[i];
-        if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
-            av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
-                     (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
-                      st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
         if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+            if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
+                av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
+                          (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
+                          st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
             // the check for tb_unreliable() is not completely correct, since this is not about handling
             // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
             // ipmovie.c produces.
@@ -2483,17 +2470,6 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
                 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
                     av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
             }
-
-            if (!st->r_frame_rate.num){
-                if(    st->codec->time_base.den * (int64_t)st->time_base.num
-                    <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
-                    st->r_frame_rate.num = st->codec->time_base.den;
-                    st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
-                }else{
-                    st->r_frame_rate.num = st->time_base.den;
-                    st->r_frame_rate.den = st->time_base.num;
-                }
-            }
         }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
             if(!st->codec->bits_per_coded_sample)
                 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
@@ -2517,34 +2493,12 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
 
     compute_chapters_end(ic);
 
-#if 0
-    /* correct DTS for B-frame streams with no timestamps */
-    for(i=0;i<ic->nb_streams;i++) {
-        st = ic->streams[i];
-        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-            if(b-frames){
-                ppktl = &ic->packet_buffer;
-                while(ppkt1){
-                    if(ppkt1->stream_index != i)
-                        continue;
-                    if(ppkt1->pkt->dts < 0)
-                        break;
-                    if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
-                        break;
-                    ppkt1->pkt->dts -= delta;
-                    ppkt1= ppkt1->next;
-                }
-                if(ppkt1)
-                    continue;
-                st->cur_dts -= delta;
-            }
-        }
-    }
-#endif
-
  find_stream_info_err:
-    for (i=0; i < ic->nb_streams; i++)
+    for (i=0; i < ic->nb_streams; i++) {
+        if (ic->streams[i]->codec)
+            ic->streams[i]->codec->thread_count = 0;
         av_freep(&ic->streams[i]->info);
+    }
     return ret;
 }
 
@@ -2632,14 +2586,6 @@ int av_read_pause(AVFormatContext *s)
     return AVERROR(ENOSYS);
 }
 
-void av_close_input_stream(AVFormatContext *s)
-{
-    flush_packet_queue(s);
-    if (s->iformat->read_close)
-        s->iformat->read_close(s);
-    avformat_free_context(s);
-}
-
 void avformat_free_context(AVFormatContext *s)
 {
     int i;
@@ -2654,8 +2600,9 @@ void avformat_free_context(AVFormatContext *s)
         st = s->streams[i];
         if (st->parser) {
             av_parser_close(st->parser);
-            av_free_packet(&st->cur_pkt);
         }
+        if (st->attached_pic.data)
+            av_free_packet(&st->attached_pic);
         av_dict_free(&st->metadata);
         av_free(st->index_entries);
         av_free(st->codec->extradata);
@@ -2682,25 +2629,27 @@ void avformat_free_context(AVFormatContext *s)
     av_free(s);
 }
 
+#if FF_API_CLOSE_INPUT_FILE
 void av_close_input_file(AVFormatContext *s)
 {
+    avformat_close_input(&s);
+}
+#endif
+
+void avformat_close_input(AVFormatContext **ps)
+{
+    AVFormatContext *s = *ps;
     AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
                        NULL : s->pb;
-    av_close_input_stream(s);
+    flush_packet_queue(s);
+    if (s->iformat->read_close)
+        s->iformat->read_close(s);
+    avformat_free_context(s);
+    *ps = NULL;
     if (pb)
         avio_close(pb);
 }
 
-#if FF_API_NEW_STREAM
-AVStream *av_new_stream(AVFormatContext *s, int id)
-{
-    AVStream *st = avformat_new_stream(s, NULL);
-    if (st)
-        st->id = id;
-    return st;
-}
-#endif
-
 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
 {
     AVStream *st;
@@ -2739,7 +2688,7 @@ AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
     st->probe_packets = MAX_PROBE_PACKETS;
 
     /* default pts setting is MPEG-like */
-    av_set_pts_info(st, 33, 1, 90000);
+    avpriv_set_pts_info(st, 33, 1, 90000);
     st->last_IP_pts = AV_NOPTS_VALUE;
     for(i=0; i<MAX_REORDER_DELAY+1; i++)
         st->pts_buffer[i]= AV_NOPTS_VALUE;
@@ -2801,31 +2750,6 @@ AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
 /************************************************************/
 /* output media file */
 
-#if FF_API_FORMAT_PARAMETERS
-int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
-{
-    int ret;
-
-    if (s->oformat->priv_data_size > 0) {
-        s->priv_data = av_mallocz(s->oformat->priv_data_size);
-        if (!s->priv_data)
-            return AVERROR(ENOMEM);
-        if (s->oformat->priv_class) {
-            *(const AVClass**)s->priv_data= s->oformat->priv_class;
-            av_opt_set_defaults(s->priv_data);
-        }
-    } else
-        s->priv_data = NULL;
-
-    if (s->oformat->set_parameters) {
-        ret = s->oformat->set_parameters(s, ap);
-        if (ret < 0)
-            return ret;
-    }
-    return 0;
-}
-#endif
-
 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
 {
     const AVCodecTag *avctag;
@@ -2859,13 +2783,6 @@ static int validate_codec_tag(AVFormatContext *s, AVStream *st)
     return 1;
 }
 
-#if FF_API_FORMAT_PARAMETERS
-int av_write_header(AVFormatContext *s)
-{
-    return avformat_write_header(s, NULL);
-}
-#endif
-
 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
 {
     int ret = 0, i;
@@ -2910,7 +2827,11 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options)
                 goto fail;
             }
             if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
-                av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
+                av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
+                       "(%d/%d) and encoder layer (%d/%d)\n",
+                       st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
+                       st->codec->sample_aspect_ratio.num,
+                       st->codec->sample_aspect_ratio.den);
                 ret = AVERROR(EINVAL);
                 goto fail;
             }
@@ -3058,7 +2979,7 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
     /* update pts */
     switch (st->codec->codec_type) {
     case AVMEDIA_TYPE_AUDIO:
-        frame_size = get_audio_frame_size(st->codec, pkt->size);
+        frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
 
         /* HACK/FIXME, we skip the initial 0 size packets as they are most
            likely equal to the encoder delay, but it would be better if we
@@ -3078,7 +2999,15 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
 
 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
 {
-    int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
+    int ret;
+
+    if (!pkt) {
+        if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
+            return s->oformat->write_packet(s, pkt);
+        return 1;
+    }
+
+    ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
 
     if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
         return ret;
@@ -3138,7 +3067,9 @@ static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacke
     return comp > 0;
 }
 
-int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
+int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
+                                 AVPacket *pkt, int flush)
+{
     AVPacketList *pktl;
     int stream_count=0;
     int i;
@@ -3168,6 +3099,14 @@ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pk
     }
 }
 
+#if FF_API_INTERLEAVE_PACKET
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
+                                 AVPacket *pkt, int flush)
+{
+    return ff_interleave_packet_per_dts(s, out, pkt, flush);
+}
+#endif
+
 /**
  * Interleave an AVPacket correctly so it can be muxed.
  * @param out the interleaved packet will be output here
@@ -3178,31 +3117,40 @@ int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pk
  *         < 0 if an error occurred
  */
 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
-    if(s->oformat->interleave_packet)
-        return s->oformat->interleave_packet(s, out, in, flush);
-    else
-        return av_interleave_packet_per_dts(s, out, in, flush);
+    if (s->oformat->interleave_packet) {
+        int ret = s->oformat->interleave_packet(s, out, in, flush);
+        if (in)
+            av_free_packet(in);
+        return ret;
+    } else
+        return ff_interleave_packet_per_dts(s, out, in, flush);
 }
 
 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
-    AVStream *st= s->streams[ pkt->stream_index];
-    int ret;
+    int ret, flush = 0;
 
-    //FIXME/XXX/HACK drop zero sized packets
-    if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
-        return 0;
+    if (pkt) {
+        AVStream *st= s->streams[ pkt->stream_index];
 
-    av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
-            pkt->size, pkt->dts, pkt->pts);
-    if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
-        return ret;
+        //FIXME/XXX/HACK drop zero sized packets
+        if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
+            return 0;
 
-    if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
-        return AVERROR(EINVAL);
+        av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
+                pkt->size, pkt->dts, pkt->pts);
+        if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+            return ret;
+
+        if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+            return AVERROR(EINVAL);
+    } else {
+        av_dlog(s, "av_interleaved_write_frame FLUSH\n");
+        flush = 1;
+    }
 
     for(;;){
         AVPacket opkt;
-        int ret= interleave_packet(s, &opkt, pkt, 0);
+        int ret= interleave_packet(s, &opkt, pkt, flush);
         if(ret<=0) //FIXME cleanup needed for ret<0 ?
             return ret;
 
@@ -3247,7 +3195,7 @@ fail:
         av_freep(&s->streams[i]->priv_data);
         av_freep(&s->streams[i]->index_entries);
     }
-    if (s->iformat && s->iformat->priv_class)
+    if (s->oformat->priv_class)
         av_opt_free(s->priv_data);
     av_freep(&s->priv_data);
     return ret;
@@ -3364,16 +3312,6 @@ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_out
     dump_metadata(NULL, st->metadata, "    ");
 }
 
-#if FF_API_DUMP_FORMAT
-void dump_format(AVFormatContext *ic,
-                 int index,
-                 const char *url,
-                 int is_output)
-{
-    av_dump_format(ic, index, url, is_output);
-}
-#endif
-
 void av_dump_format(AVFormatContext *ic,
                     int index,
                     const char *url,
@@ -3465,26 +3403,6 @@ uint64_t ff_ntp_time(void)
   return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
 }
 
-#if FF_API_PARSE_DATE
-#include "libavutil/parseutils.h"
-
-int64_t parse_date(const char *timestr, int duration)
-{
-    int64_t timeval;
-    av_parse_time(&timeval, timestr, duration);
-    return timeval;
-}
-#endif
-
-#if FF_API_FIND_INFO_TAG
-#include "libavutil/parseutils.h"
-
-int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
-{
-    return av_find_info_tag(arg, arg_size, tag1, info);
-}
-#endif
-
 int av_get_frame_filename(char *buf, int buf_size,
                           const char *path, int number)
 {
@@ -3605,27 +3523,11 @@ static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int
         av_hex_dump(f, pkt->data, pkt->size);
 }
 
-#if FF_API_PKT_DUMP
-void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
-{
-    AVRational tb = { 1, AV_TIME_BASE };
-    pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
-}
-#endif
-
 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
 {
     pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
 }
 
-#if FF_API_PKT_DUMP
-void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
-{
-    AVRational tb = { 1, AV_TIME_BASE };
-    pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
-}
-#endif
-
 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
                       AVStream *st)
 {
@@ -3742,8 +3644,8 @@ int ff_hex_to_data(uint8_t *data, const char *p)
     return len;
 }
 
-void av_set_pts_info(AVStream *s, int pts_wrap_bits,
-                     unsigned int pts_num, unsigned int pts_den)
+void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
+                         unsigned int pts_num, unsigned int pts_den)
 {
     AVRational new_tb;
     if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
@@ -3765,7 +3667,7 @@ int ff_url_join(char *str, int size, const char *proto,
                 int port, const char *fmt, ...)
 {
 #if CONFIG_NETWORK
-    struct addrinfo hints, *ai;
+    struct addrinfo hints = { 0 }, *ai;
 #endif
 
     str[0] = '\0';
@@ -3776,7 +3678,6 @@ int ff_url_join(char *str, int size, const char *proto,
 #if CONFIG_NETWORK && defined(AF_INET6)
     /* Determine if hostname is a numerical IPv6 address,
      * properly escape it within [] in that case. */
-    memset(&hints, 0, sizeof(hints));
     hints.ai_flags = AI_NUMERICHOST;
     if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
         if (ai->ai_family == AF_INET6) {
@@ -3941,9 +3842,14 @@ void ff_make_absolute_url(char *buf, int size, const char *base,
 int64_t ff_iso8601_to_unix_time(const char *datestr)
 {
 #if HAVE_STRPTIME
-    struct tm time = {0};
-    strptime(datestr, "%Y - %m - %dT%T", &time);
-    return mktime(&time);
+    struct tm time1 = {0}, time2 = {0};
+    char *ret1, *ret2;
+    ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
+    ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
+    if (ret2 && !ret1)
+        return av_timegm(&time2);
+    else
+        return av_timegm(&time1);
 #else
     av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
                                  "the date string.\n");
@@ -3985,3 +3891,54 @@ int avformat_network_deinit(void)
 #endif
     return 0;
 }
+
+int ff_add_param_change(AVPacket *pkt, int32_t channels,
+                        uint64_t channel_layout, int32_t sample_rate,
+                        int32_t width, int32_t height)
+{
+    uint32_t flags = 0;
+    int size = 4;
+    uint8_t *data;
+    if (!pkt)
+        return AVERROR(EINVAL);
+    if (channels) {
+        size += 4;
+        flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
+    }
+    if (channel_layout) {
+        size += 8;
+        flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
+    }
+    if (sample_rate) {
+        size += 4;
+        flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
+    }
+    if (width || height) {
+        size += 8;
+        flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
+    }
+    data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
+    if (!data)
+        return AVERROR(ENOMEM);
+    bytestream_put_le32(&data, flags);
+    if (channels)
+        bytestream_put_le32(&data, channels);
+    if (channel_layout)
+        bytestream_put_le64(&data, channel_layout);
+    if (sample_rate)
+        bytestream_put_le32(&data, sample_rate);
+    if (width || height) {
+        bytestream_put_le32(&data, width);
+        bytestream_put_le32(&data, height);
+    }
+    return 0;
+}
+
+const struct AVCodecTag *avformat_get_riff_video_tags(void)
+{
+    return ff_codec_bmp_tags;
+}
+const struct AVCodecTag *avformat_get_riff_audio_tags(void)
+{
+    return ff_codec_wav_tags;
+}