2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
54 * various utility functions for use within FFmpeg
57 unsigned avformat_version(void)
59 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return FFMPEG_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
76 static int is_relative(int64_t ts) {
77 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 /* fraction handling */
83 * f = val + (num / den) + 0.5.
85 * 'num' is normalized so that it is such as 0 <= num < den.
87 * @param f fractional number
88 * @param val integer value
89 * @param num must be >= 0
90 * @param den must be >= 1
92 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
105 * Fractional addition to f: f = f + (incr / f->den).
107 * @param f fractional number
108 * @param incr increment, can be positive or negative
110 static void frac_add(AVFrac *f, int64_t incr)
123 } else if (num >= den) {
130 /** head of registered input format linked list */
131 static AVInputFormat *first_iformat = NULL;
132 /** head of registered output format linked list */
133 static AVOutputFormat *first_oformat = NULL;
135 AVInputFormat *av_iformat_next(AVInputFormat *f)
137 if(f) return f->next;
138 else return first_iformat;
141 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
143 if(f) return f->next;
144 else return first_oformat;
147 void av_register_input_format(AVInputFormat *format)
151 while (*p != NULL) p = &(*p)->next;
156 void av_register_output_format(AVOutputFormat *format)
160 while (*p != NULL) p = &(*p)->next;
165 int av_match_ext(const char *filename, const char *extensions)
173 ext = strrchr(filename, '.');
179 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
182 if (!av_strcasecmp(ext1, ext))
192 static int match_format(const char *name, const char *names)
200 namelen = strlen(name);
201 while ((p = strchr(names, ','))) {
202 len = FFMAX(p - names, namelen);
203 if (!av_strncasecmp(name, names, len))
207 return !av_strcasecmp(name, names);
210 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
211 const char *mime_type)
213 AVOutputFormat *fmt = NULL, *fmt_found;
214 int score_max, score;
216 /* specific test for image sequences */
217 #if CONFIG_IMAGE2_MUXER
218 if (!short_name && filename &&
219 av_filename_number_test(filename) &&
220 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
221 return av_guess_format("image2", NULL, NULL);
224 /* Find the proper file type. */
227 while ((fmt = av_oformat_next(fmt))) {
229 if (fmt->name && short_name && match_format(short_name, fmt->name))
231 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
233 if (filename && fmt->extensions &&
234 av_match_ext(filename, fmt->extensions)) {
237 if (score > score_max) {
245 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
246 const char *filename, const char *mime_type, enum AVMediaType type){
247 if(type == AVMEDIA_TYPE_VIDEO){
248 enum CodecID codec_id= CODEC_ID_NONE;
250 #if CONFIG_IMAGE2_MUXER
251 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
252 codec_id= ff_guess_image2_codec(filename);
255 if(codec_id == CODEC_ID_NONE)
256 codec_id= fmt->video_codec;
258 }else if(type == AVMEDIA_TYPE_AUDIO)
259 return fmt->audio_codec;
260 else if (type == AVMEDIA_TYPE_SUBTITLE)
261 return fmt->subtitle_codec;
263 return CODEC_ID_NONE;
266 AVInputFormat *av_find_input_format(const char *short_name)
268 AVInputFormat *fmt = NULL;
269 while ((fmt = av_iformat_next(fmt))) {
270 if (match_format(short_name, fmt->name))
276 int ffio_limit(AVIOContext *s, int size)
279 int64_t remaining= s->maxsize - avio_tell(s);
280 if(remaining < size){
281 int64_t newsize= avio_size(s);
282 if(!s->maxsize || s->maxsize<newsize)
283 s->maxsize= newsize - !newsize;
284 remaining= s->maxsize - avio_tell(s);
285 remaining= FFMAX(remaining, 0);
288 if(s->maxsize>=0 && remaining+1 < size){
289 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
296 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
299 int orig_size = size;
300 size= ffio_limit(s, size);
302 ret= av_new_packet(pkt, size);
307 pkt->pos= avio_tell(s);
309 ret= avio_read(s, pkt->data, size);
313 av_shrink_packet(pkt, ret);
314 if (pkt->size < orig_size)
315 pkt->flags |= AV_PKT_FLAG_CORRUPT;
320 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
325 return av_get_packet(s, pkt, size);
326 old_size = pkt->size;
327 ret = av_grow_packet(pkt, size);
330 ret = avio_read(s, pkt->data + old_size, size);
331 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
336 int av_filename_number_test(const char *filename)
339 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
342 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
344 AVProbeData lpd = *pd;
345 AVInputFormat *fmt1 = NULL, *fmt;
346 int score, nodat = 0, score_max=0;
348 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
349 int id3len = ff_id3v2_tag_len(lpd.buf);
350 if (lpd.buf_size > id3len + 16) {
352 lpd.buf_size -= id3len;
358 while ((fmt1 = av_iformat_next(fmt1))) {
359 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
362 if (fmt1->read_probe) {
363 score = fmt1->read_probe(&lpd);
364 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
365 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
366 } else if (fmt1->extensions) {
367 if (av_match_ext(lpd.filename, fmt1->extensions)) {
371 if (score > score_max) {
374 }else if (score == score_max)
377 *score_ret= score_max;
382 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
385 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
386 if(score_ret > *score_max){
387 *score_max= score_ret;
393 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
395 return av_probe_input_format2(pd, is_opened, &score);
398 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
400 static const struct {
401 const char *name; enum CodecID id; enum AVMediaType type;
403 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
404 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
405 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
406 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
407 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
408 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
409 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
410 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
411 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
415 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
419 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
420 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
421 for (i = 0; fmt_id_type[i].name; i++) {
422 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
423 st->codec->codec_id = fmt_id_type[i].id;
424 st->codec->codec_type = fmt_id_type[i].type;
432 /************************************************************/
433 /* input media file */
435 int av_demuxer_open(AVFormatContext *ic){
438 if (ic->iformat->read_header) {
439 err = ic->iformat->read_header(ic);
444 if (ic->pb && !ic->data_offset)
445 ic->data_offset = avio_tell(ic->pb);
451 /** size of probe buffer, for guessing file type from file contents */
452 #define PROBE_BUF_MIN 2048
453 #define PROBE_BUF_MAX (1<<20)
455 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
456 const char *filename, void *logctx,
457 unsigned int offset, unsigned int max_probe_size)
459 AVProbeData pd = { filename ? filename : "", NULL, -offset };
460 unsigned char *buf = NULL;
461 int ret = 0, probe_size;
463 if (!max_probe_size) {
464 max_probe_size = PROBE_BUF_MAX;
465 } else if (max_probe_size > PROBE_BUF_MAX) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size < PROBE_BUF_MIN) {
468 return AVERROR(EINVAL);
471 if (offset >= max_probe_size) {
472 return AVERROR(EINVAL);
475 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
476 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
477 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
478 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
481 if (probe_size < offset) {
485 /* read probe data */
486 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
489 return AVERROR(ENOMEM);
492 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
493 /* fail if error was not end of file, otherwise, lower score */
494 if (ret != AVERROR_EOF) {
499 ret = 0; /* error was end of file, nothing read */
502 pd.buf = &buf[offset];
504 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
506 /* guess file format */
507 *fmt = av_probe_input_format2(&pd, 1, &score);
509 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
510 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
512 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
518 return AVERROR_INVALIDDATA;
521 /* rewind. reuse probe buffer to avoid seeking */
522 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
528 /* open input file and probe the format if necessary */
529 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
532 AVProbeData pd = {filename, NULL, 0};
535 s->flags |= AVFMT_FLAG_CUSTOM_IO;
537 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
538 else if (s->iformat->flags & AVFMT_NOFILE)
539 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
540 "will be ignored with AVFMT_NOFILE format.\n");
544 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
545 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
548 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
549 &s->interrupt_callback, options)) < 0)
553 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 static void queue_attached_pictures(AVFormatContext *s)
576 for (i = 0; i < s->nb_streams; i++)
577 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
578 s->streams[i]->discard < AVDISCARD_ALL) {
579 AVPacket copy = s->streams[i]->attached_pic;
580 copy.destruct = NULL;
581 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
585 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
587 AVFormatContext *s = *ps;
589 AVDictionary *tmp = NULL;
590 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
592 if (!s && !(s = avformat_alloc_context()))
593 return AVERROR(ENOMEM);
595 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
596 return AVERROR(EINVAL);
602 av_dict_copy(&tmp, *options, 0);
604 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
607 if ((ret = init_input(s, filename, &tmp)) < 0)
610 /* check filename in case an image number is expected */
611 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
612 if (!av_filename_number_test(filename)) {
613 ret = AVERROR(EINVAL);
618 s->duration = s->start_time = AV_NOPTS_VALUE;
619 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
621 /* allocate private data */
622 if (s->iformat->priv_data_size > 0) {
623 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
624 ret = AVERROR(ENOMEM);
627 if (s->iformat->priv_class) {
628 *(const AVClass**)s->priv_data = s->iformat->priv_class;
629 av_opt_set_defaults(s->priv_data);
630 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
635 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
637 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
639 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
640 if ((ret = s->iformat->read_header(s)) < 0)
643 if (id3v2_extra_meta &&
644 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
646 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
648 queue_attached_pictures(s);
650 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
651 s->data_offset = avio_tell(s->pb);
653 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
656 av_dict_free(options);
663 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
665 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
667 avformat_free_context(s);
672 /*******************************************************/
674 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
676 if(st->request_probe>0){
677 AVProbeData *pd = &st->probe_data;
679 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
683 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
684 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
685 pd->buf_size += pkt->size;
686 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
688 st->probe_packets = 0;
691 end= s->raw_packet_buffer_remaining_size <= 0
692 || st->probe_packets<=0;
694 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
695 int score= set_codec_from_probe_data(s, st, pd);
696 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
700 st->request_probe= -1;
701 if(st->codec->codec_id != CODEC_ID_NONE){
702 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
704 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
710 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
716 AVPacketList *pktl = s->raw_packet_buffer;
720 st = s->streams[pkt->stream_index];
721 if(st->request_probe <= 0){
722 s->raw_packet_buffer = pktl->next;
723 s->raw_packet_buffer_remaining_size += pkt->size;
730 ret= s->iformat->read_packet(s, pkt);
732 if (!pktl || ret == AVERROR(EAGAIN))
734 for (i = 0; i < s->nb_streams; i++) {
736 if (st->probe_packets) {
737 probe_codec(s, st, NULL);
739 av_assert0(st->request_probe <= 0);
744 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
745 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
746 av_log(s, AV_LOG_WARNING,
747 "Dropped corrupted packet (stream = %d)\n",
753 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
754 av_packet_merge_side_data(pkt);
756 if(pkt->stream_index >= (unsigned)s->nb_streams){
757 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
761 st= s->streams[pkt->stream_index];
763 switch(st->codec->codec_type){
764 case AVMEDIA_TYPE_VIDEO:
765 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
767 case AVMEDIA_TYPE_AUDIO:
768 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
770 case AVMEDIA_TYPE_SUBTITLE:
771 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
775 if(!pktl && st->request_probe <= 0)
778 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
779 s->raw_packet_buffer_remaining_size -= pkt->size;
781 probe_codec(s, st, pkt);
785 #if FF_API_READ_PACKET
786 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
788 return ff_read_packet(s, pkt);
793 /**********************************************************/
795 static int determinable_frame_size(AVCodecContext *avctx)
797 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
798 avctx->codec_id == CODEC_ID_MP1 ||
799 avctx->codec_id == CODEC_ID_MP2 ||
800 avctx->codec_id == CODEC_ID_MP3/* ||
801 avctx->codec_id == CODEC_ID_CELT*/)
807 * Get the number of samples of an audio frame. Return -1 on error.
809 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
813 /* give frame_size priority if demuxing */
814 if (!mux && enc->frame_size > 1)
815 return enc->frame_size;
817 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
820 /* fallback to using frame_size if muxing */
821 if (enc->frame_size > 1)
822 return enc->frame_size;
829 * Return the frame duration in seconds. Return 0 if not available.
831 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
832 AVCodecParserContext *pc, AVPacket *pkt)
838 switch(st->codec->codec_type) {
839 case AVMEDIA_TYPE_VIDEO:
840 if (st->r_frame_rate.num && !pc) {
841 *pnum = st->r_frame_rate.den;
842 *pden = st->r_frame_rate.num;
843 } else if(st->time_base.num*1000LL > st->time_base.den) {
844 *pnum = st->time_base.num;
845 *pden = st->time_base.den;
846 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
847 *pnum = st->codec->time_base.num;
848 *pden = st->codec->time_base.den;
849 if (pc && pc->repeat_pict) {
850 *pnum = (*pnum) * (1 + pc->repeat_pict);
852 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
853 //Thus if we have no parser in such case leave duration undefined.
854 if(st->codec->ticks_per_frame>1 && !pc){
859 case AVMEDIA_TYPE_AUDIO:
860 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
861 if (frame_size <= 0 || st->codec->sample_rate <= 0)
864 *pden = st->codec->sample_rate;
871 static int is_intra_only(AVCodecContext *enc){
872 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
874 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
875 switch(enc->codec_id){
877 case CODEC_ID_MJPEGB:
879 case CODEC_ID_PRORES:
880 case CODEC_ID_RAWVIDEO:
882 case CODEC_ID_DVVIDEO:
883 case CODEC_ID_HUFFYUV:
884 case CODEC_ID_FFVHUFF:
889 case CODEC_ID_JPEG2000:
891 case CODEC_ID_UTVIDEO:
899 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
903 if (pktl == s->parse_queue_end)
904 return s->packet_buffer;
908 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
909 int64_t dts, int64_t pts)
911 AVStream *st= s->streams[stream_index];
912 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
914 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
917 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
920 if (is_relative(pts))
921 pts += st->first_dts - RELATIVE_TS_BASE;
923 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
924 if(pktl->pkt.stream_index != stream_index)
926 if(is_relative(pktl->pkt.pts))
927 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
929 if(is_relative(pktl->pkt.dts))
930 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
932 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
933 st->start_time= pktl->pkt.pts;
935 if (st->start_time == AV_NOPTS_VALUE)
936 st->start_time = pts;
939 static void update_initial_durations(AVFormatContext *s, AVStream *st,
940 int stream_index, int duration)
942 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
943 int64_t cur_dts= RELATIVE_TS_BASE;
945 if(st->first_dts != AV_NOPTS_VALUE){
946 cur_dts= st->first_dts;
947 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
948 if(pktl->pkt.stream_index == stream_index){
949 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
954 if(pktl && pktl->pkt.dts != st->first_dts) {
955 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
959 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
962 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
963 st->first_dts = cur_dts;
964 }else if(st->cur_dts != RELATIVE_TS_BASE)
967 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
968 if(pktl->pkt.stream_index != stream_index)
970 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
971 && !pktl->pkt.duration){
972 pktl->pkt.dts= cur_dts;
973 if(!st->codec->has_b_frames)
974 pktl->pkt.pts= cur_dts;
975 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
976 pktl->pkt.duration = duration;
979 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
982 st->cur_dts= cur_dts;
985 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
986 AVCodecParserContext *pc, AVPacket *pkt)
988 int num, den, presentation_delayed, delay, i;
991 if (s->flags & AVFMT_FLAG_NOFILLIN)
994 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
995 pkt->dts= AV_NOPTS_VALUE;
997 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
998 //FIXME Set low_delay = 0 when has_b_frames = 1
999 st->codec->has_b_frames = 1;
1001 /* do we have a video B-frame ? */
1002 delay= st->codec->has_b_frames;
1003 presentation_delayed = 0;
1005 /* XXX: need has_b_frame, but cannot get it if the codec is
1008 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1009 presentation_delayed = 1;
1011 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1012 pkt->dts -= 1LL<<st->pts_wrap_bits;
1015 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1016 // we take the conservative approach and discard both
1017 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1018 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1019 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1020 pkt->dts= AV_NOPTS_VALUE;
1023 if (pkt->duration == 0) {
1024 compute_frame_duration(&num, &den, st, pc, pkt);
1026 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1029 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1030 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1032 /* correct timestamps with byte offset if demuxers only have timestamps
1033 on packet boundaries */
1034 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1035 /* this will estimate bitrate based on this frame's duration and size */
1036 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1037 if(pkt->pts != AV_NOPTS_VALUE)
1039 if(pkt->dts != AV_NOPTS_VALUE)
1043 if (pc && pc->dts_sync_point >= 0) {
1044 // we have synchronization info from the parser
1045 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1047 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1048 if (pkt->dts != AV_NOPTS_VALUE) {
1049 // got DTS from the stream, update reference timestamp
1050 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1051 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1052 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1053 // compute DTS based on reference timestamp
1054 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1055 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1057 if (pc->dts_sync_point > 0)
1058 st->reference_dts = pkt->dts; // new reference
1062 /* This may be redundant, but it should not hurt. */
1063 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1064 presentation_delayed = 1;
1066 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1067 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1068 /* interpolate PTS and DTS if they are not present */
1069 //We skip H264 currently because delay and has_b_frames are not reliably set
1070 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1071 if (presentation_delayed) {
1072 /* DTS = decompression timestamp */
1073 /* PTS = presentation timestamp */
1074 if (pkt->dts == AV_NOPTS_VALUE)
1075 pkt->dts = st->last_IP_pts;
1076 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1077 if (pkt->dts == AV_NOPTS_VALUE)
1078 pkt->dts = st->cur_dts;
1080 /* this is tricky: the dts must be incremented by the duration
1081 of the frame we are displaying, i.e. the last I- or P-frame */
1082 if (st->last_IP_duration == 0)
1083 st->last_IP_duration = pkt->duration;
1084 if(pkt->dts != AV_NOPTS_VALUE)
1085 st->cur_dts = pkt->dts + st->last_IP_duration;
1086 st->last_IP_duration = pkt->duration;
1087 st->last_IP_pts= pkt->pts;
1088 /* cannot compute PTS if not present (we can compute it only
1089 by knowing the future */
1090 } else if (pkt->pts != AV_NOPTS_VALUE ||
1091 pkt->dts != AV_NOPTS_VALUE ||
1093 int duration = pkt->duration;
1095 if(pkt->pts != AV_NOPTS_VALUE && duration){
1096 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1097 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1098 if( old_diff < new_diff && old_diff < (duration>>3)
1099 && (!strcmp(s->iformat->name, "mpeg") ||
1100 !strcmp(s->iformat->name, "mpegts"))){
1101 pkt->pts += duration;
1102 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1103 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1104 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1108 /* presentation is not delayed : PTS and DTS are the same */
1109 if (pkt->pts == AV_NOPTS_VALUE)
1110 pkt->pts = pkt->dts;
1111 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1113 if (pkt->pts == AV_NOPTS_VALUE)
1114 pkt->pts = st->cur_dts;
1115 pkt->dts = pkt->pts;
1116 if (pkt->pts != AV_NOPTS_VALUE)
1117 st->cur_dts = pkt->pts + duration;
1121 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1122 st->pts_buffer[0]= pkt->pts;
1123 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1124 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1125 if(pkt->dts == AV_NOPTS_VALUE)
1126 pkt->dts= st->pts_buffer[0];
1127 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1128 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1130 if(pkt->dts > st->cur_dts)
1131 st->cur_dts = pkt->dts;
1134 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1135 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1138 if(is_intra_only(st->codec))
1139 pkt->flags |= AV_PKT_FLAG_KEY;
1141 pkt->convergence_duration = pc->convergence_duration;
1144 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1147 AVPacketList *pktl = *pkt_buf;
1148 *pkt_buf = pktl->next;
1149 av_free_packet(&pktl->pkt);
1152 *pkt_buf_end = NULL;
1156 * Parse a packet, add all split parts to parse_queue
1158 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1160 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1162 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1163 AVStream *st = s->streams[stream_index];
1164 uint8_t *data = pkt ? pkt->data : NULL;
1165 int size = pkt ? pkt->size : 0;
1166 int ret = 0, got_output = 0;
1169 av_init_packet(&flush_pkt);
1172 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1173 // preserve 0-size sync packets
1174 compute_pkt_fields(s, st, st->parser, pkt);
1177 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1180 av_init_packet(&out_pkt);
1181 len = av_parser_parse2(st->parser, st->codec,
1182 &out_pkt.data, &out_pkt.size, data, size,
1183 pkt->pts, pkt->dts, pkt->pos);
1185 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1186 /* increment read pointer */
1190 got_output = !!out_pkt.size;
1195 /* set the duration */
1196 out_pkt.duration = 0;
1197 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1198 if (st->codec->sample_rate > 0) {
1199 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1200 (AVRational){ 1, st->codec->sample_rate },
1204 } else if (st->codec->time_base.num != 0 &&
1205 st->codec->time_base.den != 0) {
1206 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1207 st->codec->time_base,
1212 out_pkt.stream_index = st->index;
1213 out_pkt.pts = st->parser->pts;
1214 out_pkt.dts = st->parser->dts;
1215 out_pkt.pos = st->parser->pos;
1217 if (st->parser->key_frame == 1 ||
1218 (st->parser->key_frame == -1 &&
1219 st->parser->pict_type == AV_PICTURE_TYPE_I))
1220 out_pkt.flags |= AV_PKT_FLAG_KEY;
1222 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1223 out_pkt.flags |= AV_PKT_FLAG_KEY;
1225 compute_pkt_fields(s, st, st->parser, &out_pkt);
1227 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1228 out_pkt.flags & AV_PKT_FLAG_KEY) {
1229 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1230 ff_reduce_index(s, st->index);
1231 av_add_index_entry(st, pos, out_pkt.dts,
1232 0, 0, AVINDEX_KEYFRAME);
1235 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1236 out_pkt.destruct = pkt->destruct;
1237 pkt->destruct = NULL;
1239 if ((ret = av_dup_packet(&out_pkt)) < 0)
1242 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1243 av_free_packet(&out_pkt);
1244 ret = AVERROR(ENOMEM);
1250 /* end of the stream => close and free the parser */
1251 if (pkt == &flush_pkt) {
1252 av_parser_close(st->parser);
1257 av_free_packet(pkt);
1261 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1262 AVPacketList **pkt_buffer_end,
1266 av_assert0(*pkt_buffer);
1269 *pkt_buffer = pktl->next;
1271 *pkt_buffer_end = NULL;
1276 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1278 int ret = 0, i, got_packet = 0;
1280 av_init_packet(pkt);
1282 while (!got_packet && !s->parse_queue) {
1286 /* read next packet */
1287 ret = ff_read_packet(s, &cur_pkt);
1289 if (ret == AVERROR(EAGAIN))
1291 /* flush the parsers */
1292 for(i = 0; i < s->nb_streams; i++) {
1294 if (st->parser && st->need_parsing)
1295 parse_packet(s, NULL, st->index);
1297 /* all remaining packets are now in parse_queue =>
1298 * really terminate parsing */
1302 st = s->streams[cur_pkt.stream_index];
1304 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1305 cur_pkt.dts != AV_NOPTS_VALUE &&
1306 cur_pkt.pts < cur_pkt.dts) {
1307 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1308 cur_pkt.stream_index,
1309 av_ts2str(cur_pkt.pts),
1310 av_ts2str(cur_pkt.dts),
1313 if (s->debug & FF_FDEBUG_TS)
1314 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1315 cur_pkt.stream_index,
1316 av_ts2str(cur_pkt.pts),
1317 av_ts2str(cur_pkt.dts),
1322 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1323 st->parser = av_parser_init(st->codec->codec_id);
1325 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1326 "%s, packets or times may be invalid.\n",
1327 avcodec_get_name(st->codec->codec_id));
1328 /* no parser available: just output the raw packets */
1329 st->need_parsing = AVSTREAM_PARSE_NONE;
1330 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1331 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1332 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1333 st->parser->flags |= PARSER_FLAG_ONCE;
1334 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1335 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1339 if (!st->need_parsing || !st->parser) {
1340 /* no parsing needed: we just output the packet as is */
1342 compute_pkt_fields(s, st, NULL, pkt);
1343 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1344 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1345 ff_reduce_index(s, st->index);
1346 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1349 } else if (st->discard < AVDISCARD_ALL) {
1350 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1354 av_free_packet(&cur_pkt);
1356 if (pkt->flags & AV_PKT_FLAG_KEY)
1357 st->skip_to_keyframe = 0;
1358 if (st->skip_to_keyframe) {
1359 av_free_packet(&cur_pkt);
1364 if (!got_packet && s->parse_queue)
1365 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1367 if(s->debug & FF_FDEBUG_TS)
1368 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1370 av_ts2str(pkt->pts),
1371 av_ts2str(pkt->dts),
1379 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1381 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1386 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1387 &s->packet_buffer_end,
1389 read_frame_internal(s, pkt);
1394 AVPacketList *pktl = s->packet_buffer;
1397 AVPacket *next_pkt = &pktl->pkt;
1399 if (next_pkt->dts != AV_NOPTS_VALUE) {
1400 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1401 // last dts seen for this stream. if any of packets following
1402 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1403 int64_t last_dts = next_pkt->dts;
1404 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1405 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1406 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1407 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1408 next_pkt->pts = pktl->pkt.dts;
1410 if (last_dts != AV_NOPTS_VALUE) {
1411 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1412 last_dts = pktl->pkt.dts;
1417 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1418 // Fixing the last reference frame had none pts issue (For MXF etc).
1419 // We only do this when
1421 // 2. we are not able to resolve a pts value for current packet.
1422 // 3. the packets for this stream at the end of the files had valid dts.
1423 next_pkt->pts = last_dts + next_pkt->duration;
1425 pktl = s->packet_buffer;
1428 /* read packet from packet buffer, if there is data */
1429 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1430 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1431 ret = read_from_packet_buffer(&s->packet_buffer,
1432 &s->packet_buffer_end, pkt);
1437 ret = read_frame_internal(s, pkt);
1439 if (pktl && ret != AVERROR(EAGAIN)) {
1446 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1447 &s->packet_buffer_end)) < 0)
1448 return AVERROR(ENOMEM);
1452 if (is_relative(pkt->dts))
1453 pkt->dts -= RELATIVE_TS_BASE;
1454 if (is_relative(pkt->pts))
1455 pkt->pts -= RELATIVE_TS_BASE;
1459 /* XXX: suppress the packet queue */
1460 static void flush_packet_queue(AVFormatContext *s)
1462 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1463 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1464 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1466 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1469 /*******************************************************/
1472 int av_find_default_stream_index(AVFormatContext *s)
1474 int first_audio_index = -1;
1478 if (s->nb_streams <= 0)
1480 for(i = 0; i < s->nb_streams; i++) {
1482 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1483 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1486 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1487 first_audio_index = i;
1489 return first_audio_index >= 0 ? first_audio_index : 0;
1493 * Flush the frame reader.
1495 void ff_read_frame_flush(AVFormatContext *s)
1500 flush_packet_queue(s);
1502 /* for each stream, reset read state */
1503 for(i = 0; i < s->nb_streams; i++) {
1507 av_parser_close(st->parser);
1510 st->last_IP_pts = AV_NOPTS_VALUE;
1511 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1512 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1513 st->reference_dts = AV_NOPTS_VALUE;
1515 st->probe_packets = MAX_PROBE_PACKETS;
1517 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1518 st->pts_buffer[j]= AV_NOPTS_VALUE;
1522 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1526 for(i = 0; i < s->nb_streams; i++) {
1527 AVStream *st = s->streams[i];
1529 st->cur_dts = av_rescale(timestamp,
1530 st->time_base.den * (int64_t)ref_st->time_base.num,
1531 st->time_base.num * (int64_t)ref_st->time_base.den);
1535 void ff_reduce_index(AVFormatContext *s, int stream_index)
1537 AVStream *st= s->streams[stream_index];
1538 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1540 if((unsigned)st->nb_index_entries >= max_entries){
1542 for(i=0; 2*i<st->nb_index_entries; i++)
1543 st->index_entries[i]= st->index_entries[2*i];
1544 st->nb_index_entries= i;
1548 int ff_add_index_entry(AVIndexEntry **index_entries,
1549 int *nb_index_entries,
1550 unsigned int *index_entries_allocated_size,
1551 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1553 AVIndexEntry *entries, *ie;
1556 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1559 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1560 timestamp -= RELATIVE_TS_BASE;
1562 entries = av_fast_realloc(*index_entries,
1563 index_entries_allocated_size,
1564 (*nb_index_entries + 1) *
1565 sizeof(AVIndexEntry));
1569 *index_entries= entries;
1571 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1574 index= (*nb_index_entries)++;
1575 ie= &entries[index];
1576 assert(index==0 || ie[-1].timestamp < timestamp);
1578 ie= &entries[index];
1579 if(ie->timestamp != timestamp){
1580 if(ie->timestamp <= timestamp)
1582 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1583 (*nb_index_entries)++;
1584 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1585 distance= ie->min_distance;
1589 ie->timestamp = timestamp;
1590 ie->min_distance= distance;
1597 int av_add_index_entry(AVStream *st,
1598 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1600 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1601 &st->index_entries_allocated_size, pos,
1602 timestamp, size, distance, flags);
1605 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1606 int64_t wanted_timestamp, int flags)
1614 //optimize appending index entries at the end
1615 if(b && entries[b-1].timestamp < wanted_timestamp)
1620 timestamp = entries[m].timestamp;
1621 if(timestamp >= wanted_timestamp)
1623 if(timestamp <= wanted_timestamp)
1626 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1628 if(!(flags & AVSEEK_FLAG_ANY)){
1629 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1630 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1639 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1642 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1643 wanted_timestamp, flags);
1646 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1648 AVInputFormat *avif= s->iformat;
1649 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1650 int64_t ts_min, ts_max, ts;
1655 if (stream_index < 0)
1658 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1661 ts_min= AV_NOPTS_VALUE;
1662 pos_limit= -1; //gcc falsely says it may be uninitialized
1664 st= s->streams[stream_index];
1665 if(st->index_entries){
1668 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1669 index= FFMAX(index, 0);
1670 e= &st->index_entries[index];
1672 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1674 ts_min= e->timestamp;
1675 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1676 pos_min, av_ts2str(ts_min));
1681 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1682 assert(index < st->nb_index_entries);
1684 e= &st->index_entries[index];
1685 assert(e->timestamp >= target_ts);
1687 ts_max= e->timestamp;
1688 pos_limit= pos_max - e->min_distance;
1689 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1690 pos_max, pos_limit, av_ts2str(ts_max));
1694 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1699 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1702 ff_read_frame_flush(s);
1703 ff_update_cur_dts(s, st, ts);
1708 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1709 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1710 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1711 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1714 int64_t start_pos, filesize;
1717 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1719 if(ts_min == AV_NOPTS_VALUE){
1720 pos_min = s->data_offset;
1721 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1722 if (ts_min == AV_NOPTS_VALUE)
1726 if(ts_min >= target_ts){
1731 if(ts_max == AV_NOPTS_VALUE){
1733 filesize = avio_size(s->pb);
1734 pos_max = filesize - 1;
1737 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1739 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1740 if (ts_max == AV_NOPTS_VALUE)
1744 int64_t tmp_pos= pos_max + 1;
1745 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1746 if(tmp_ts == AV_NOPTS_VALUE)
1750 if(tmp_pos >= filesize)
1756 if(ts_max <= target_ts){
1761 if(ts_min > ts_max){
1763 }else if(ts_min == ts_max){
1768 while (pos_min < pos_limit) {
1769 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1770 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1771 assert(pos_limit <= pos_max);
1774 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1775 // interpolate position (better than dichotomy)
1776 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1777 + pos_min - approximate_keyframe_distance;
1778 }else if(no_change==1){
1779 // bisection, if interpolation failed to change min or max pos last time
1780 pos = (pos_min + pos_limit)>>1;
1782 /* linear search if bisection failed, can only happen if there
1783 are very few or no keyframes between min/max */
1788 else if(pos > pos_limit)
1792 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1797 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1798 pos_min, pos, pos_max,
1799 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1800 pos_limit, start_pos, no_change);
1801 if(ts == AV_NOPTS_VALUE){
1802 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1805 assert(ts != AV_NOPTS_VALUE);
1806 if (target_ts <= ts) {
1807 pos_limit = start_pos - 1;
1811 if (target_ts >= ts) {
1817 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1818 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1821 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1823 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1824 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1825 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1831 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1832 int64_t pos_min, pos_max;
1834 pos_min = s->data_offset;
1835 pos_max = avio_size(s->pb) - 1;
1837 if (pos < pos_min) pos= pos_min;
1838 else if(pos > pos_max) pos= pos_max;
1840 avio_seek(s->pb, pos, SEEK_SET);
1845 static int seek_frame_generic(AVFormatContext *s,
1846 int stream_index, int64_t timestamp, int flags)
1853 st = s->streams[stream_index];
1855 index = av_index_search_timestamp(st, timestamp, flags);
1857 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1860 if(index < 0 || index==st->nb_index_entries-1){
1864 if(st->nb_index_entries){
1865 assert(st->index_entries);
1866 ie= &st->index_entries[st->nb_index_entries-1];
1867 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1869 ff_update_cur_dts(s, st, ie->timestamp);
1871 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1877 read_status = av_read_frame(s, &pkt);
1878 } while (read_status == AVERROR(EAGAIN));
1879 if (read_status < 0)
1881 av_free_packet(&pkt);
1882 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1883 if(pkt.flags & AV_PKT_FLAG_KEY)
1885 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1886 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1891 index = av_index_search_timestamp(st, timestamp, flags);
1896 ff_read_frame_flush(s);
1897 AV_NOWARN_DEPRECATED(
1898 if (s->iformat->read_seek){
1899 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1903 ie = &st->index_entries[index];
1904 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1906 ff_update_cur_dts(s, st, ie->timestamp);
1911 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1912 int64_t timestamp, int flags)
1917 if (flags & AVSEEK_FLAG_BYTE) {
1918 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1920 ff_read_frame_flush(s);
1921 return seek_frame_byte(s, stream_index, timestamp, flags);
1924 if(stream_index < 0){
1925 stream_index= av_find_default_stream_index(s);
1926 if(stream_index < 0)
1929 st= s->streams[stream_index];
1930 /* timestamp for default must be expressed in AV_TIME_BASE units */
1931 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1934 /* first, we try the format specific seek */
1935 AV_NOWARN_DEPRECATED(
1936 if (s->iformat->read_seek) {
1937 ff_read_frame_flush(s);
1938 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1946 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1947 ff_read_frame_flush(s);
1948 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1949 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1950 ff_read_frame_flush(s);
1951 return seek_frame_generic(s, stream_index, timestamp, flags);
1957 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1959 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1962 queue_attached_pictures(s);
1967 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1969 if(min_ts > ts || max_ts < ts)
1972 if (s->iformat->read_seek2) {
1974 ff_read_frame_flush(s);
1975 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1978 queue_attached_pictures(s);
1982 if(s->iformat->read_timestamp){
1983 //try to seek via read_timestamp()
1986 //Fallback to old API if new is not implemented but old is
1987 //Note the old has somewat different sematics
1988 AV_NOWARN_DEPRECATED(
1989 if (s->iformat->read_seek || 1) {
1990 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1991 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1992 if (ret<0 && ts != min_ts && max_ts != ts) {
1993 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1995 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2001 // try some generic seek like seek_frame_generic() but with new ts semantics
2004 /*******************************************************/
2007 * Return TRUE if the stream has accurate duration in any stream.
2009 * @return TRUE if the stream has accurate duration for at least one component.
2011 static int has_duration(AVFormatContext *ic)
2016 for(i = 0;i < ic->nb_streams; i++) {
2017 st = ic->streams[i];
2018 if (st->duration != AV_NOPTS_VALUE)
2021 if (ic->duration != AV_NOPTS_VALUE)
2027 * Estimate the stream timings from the one of each components.
2029 * Also computes the global bitrate if possible.
2031 static void update_stream_timings(AVFormatContext *ic)
2033 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2034 int64_t duration, duration1, filesize;
2038 start_time = INT64_MAX;
2039 start_time_text = INT64_MAX;
2040 end_time = INT64_MIN;
2041 duration = INT64_MIN;
2042 for(i = 0;i < ic->nb_streams; i++) {
2043 st = ic->streams[i];
2044 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2045 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2046 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2047 if (start_time1 < start_time_text)
2048 start_time_text = start_time1;
2050 start_time = FFMIN(start_time, start_time1);
2051 if (st->duration != AV_NOPTS_VALUE) {
2052 end_time1 = start_time1
2053 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2054 end_time = FFMAX(end_time, end_time1);
2057 if (st->duration != AV_NOPTS_VALUE) {
2058 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2059 duration = FFMAX(duration, duration1);
2062 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2063 start_time = start_time_text;
2064 else if(start_time > start_time_text)
2065 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2067 if (start_time != INT64_MAX) {
2068 ic->start_time = start_time;
2069 if (end_time != INT64_MIN)
2070 duration = FFMAX(duration, end_time - start_time);
2072 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2073 ic->duration = duration;
2075 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2076 /* compute the bitrate */
2077 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2078 (double)ic->duration;
2082 static void fill_all_stream_timings(AVFormatContext *ic)
2087 update_stream_timings(ic);
2088 for(i = 0;i < ic->nb_streams; i++) {
2089 st = ic->streams[i];
2090 if (st->start_time == AV_NOPTS_VALUE) {
2091 if(ic->start_time != AV_NOPTS_VALUE)
2092 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2093 if(ic->duration != AV_NOPTS_VALUE)
2094 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2099 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2101 int64_t filesize, duration;
2105 /* if bit_rate is already set, we believe it */
2106 if (ic->bit_rate <= 0) {
2108 for(i=0;i<ic->nb_streams;i++) {
2109 st = ic->streams[i];
2110 if (st->codec->bit_rate > 0)
2111 bit_rate += st->codec->bit_rate;
2113 ic->bit_rate = bit_rate;
2116 /* if duration is already set, we believe it */
2117 if (ic->duration == AV_NOPTS_VALUE &&
2118 ic->bit_rate != 0) {
2119 filesize = ic->pb ? avio_size(ic->pb) : 0;
2121 for(i = 0; i < ic->nb_streams; i++) {
2122 st = ic->streams[i];
2123 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2124 if (st->duration == AV_NOPTS_VALUE)
2125 st->duration = duration;
2131 #define DURATION_MAX_READ_SIZE 250000
2132 #define DURATION_MAX_RETRY 3
2134 /* only usable for MPEG-PS streams */
2135 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2137 AVPacket pkt1, *pkt = &pkt1;
2139 int read_size, i, ret;
2141 int64_t filesize, offset, duration;
2144 /* flush packet queue */
2145 flush_packet_queue(ic);
2147 for (i=0; i<ic->nb_streams; i++) {
2148 st = ic->streams[i];
2149 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2150 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2153 av_parser_close(st->parser);
2158 /* estimate the end time (duration) */
2159 /* XXX: may need to support wrapping */
2160 filesize = ic->pb ? avio_size(ic->pb) : 0;
2161 end_time = AV_NOPTS_VALUE;
2163 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2167 avio_seek(ic->pb, offset, SEEK_SET);
2170 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2174 ret = ff_read_packet(ic, pkt);
2175 } while(ret == AVERROR(EAGAIN));
2178 read_size += pkt->size;
2179 st = ic->streams[pkt->stream_index];
2180 if (pkt->pts != AV_NOPTS_VALUE &&
2181 (st->start_time != AV_NOPTS_VALUE ||
2182 st->first_dts != AV_NOPTS_VALUE)) {
2183 duration = end_time = pkt->pts;
2184 if (st->start_time != AV_NOPTS_VALUE)
2185 duration -= st->start_time;
2187 duration -= st->first_dts;
2189 duration += 1LL<<st->pts_wrap_bits;
2191 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2192 st->duration = duration;
2195 av_free_packet(pkt);
2197 }while( end_time==AV_NOPTS_VALUE
2198 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2199 && ++retry <= DURATION_MAX_RETRY);
2201 fill_all_stream_timings(ic);
2203 avio_seek(ic->pb, old_offset, SEEK_SET);
2204 for (i=0; i<ic->nb_streams; i++) {
2206 st->cur_dts= st->first_dts;
2207 st->last_IP_pts = AV_NOPTS_VALUE;
2208 st->reference_dts = AV_NOPTS_VALUE;
2212 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2216 /* get the file size, if possible */
2217 if (ic->iformat->flags & AVFMT_NOFILE) {
2220 file_size = avio_size(ic->pb);
2221 file_size = FFMAX(0, file_size);
2224 if ((!strcmp(ic->iformat->name, "mpeg") ||
2225 !strcmp(ic->iformat->name, "mpegts")) &&
2226 file_size && ic->pb->seekable) {
2227 /* get accurate estimate from the PTSes */
2228 estimate_timings_from_pts(ic, old_offset);
2229 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2230 } else if (has_duration(ic)) {
2231 /* at least one component has timings - we use them for all
2233 fill_all_stream_timings(ic);
2234 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2236 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2237 /* less precise: use bitrate info */
2238 estimate_timings_from_bit_rate(ic);
2239 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2241 update_stream_timings(ic);
2245 AVStream av_unused *st;
2246 for(i = 0;i < ic->nb_streams; i++) {
2247 st = ic->streams[i];
2248 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2249 (double) st->start_time / AV_TIME_BASE,
2250 (double) st->duration / AV_TIME_BASE);
2252 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2253 (double) ic->start_time / AV_TIME_BASE,
2254 (double) ic->duration / AV_TIME_BASE,
2255 ic->bit_rate / 1000);
2259 static int has_codec_parameters(AVStream *st)
2261 AVCodecContext *avctx = st->codec;
2263 switch (avctx->codec_type) {
2264 case AVMEDIA_TYPE_AUDIO:
2265 val = avctx->sample_rate && avctx->channels;
2266 if (!avctx->frame_size && determinable_frame_size(avctx))
2268 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2271 case AVMEDIA_TYPE_VIDEO:
2273 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2276 case AVMEDIA_TYPE_DATA:
2277 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2282 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2285 static int has_decode_delay_been_guessed(AVStream *st)
2287 if(st->codec->codec_id != CODEC_ID_H264) return 1;
2288 #if CONFIG_H264_DECODER
2289 if(st->codec->has_b_frames &&
2290 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
2293 if(st->codec->has_b_frames<3)
2294 return st->info->nb_decoded_frames >= 6;
2295 else if(st->codec->has_b_frames<4)
2296 return st->info->nb_decoded_frames >= 18;
2298 return st->info->nb_decoded_frames >= 20;
2301 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2302 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2305 int got_picture = 1, ret = 0;
2307 AVPacket pkt = *avpkt;
2309 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2310 AVDictionary *thread_opt = NULL;
2312 codec = st->codec->codec ? st->codec->codec :
2313 avcodec_find_decoder(st->codec->codec_id);
2316 st->info->found_decoder = -1;
2320 /* force thread count to 1 since the h264 decoder will not extract SPS
2321 * and PPS to extradata during multi-threaded decoding */
2322 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2323 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2325 av_dict_free(&thread_opt);
2327 st->info->found_decoder = -1;
2330 st->info->found_decoder = 1;
2331 } else if (!st->info->found_decoder)
2332 st->info->found_decoder = 1;
2334 if (st->info->found_decoder < 0)
2337 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2339 (!has_codec_parameters(st) ||
2340 !has_decode_delay_been_guessed(st) ||
2341 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2343 avcodec_get_frame_defaults(&picture);
2344 switch(st->codec->codec_type) {
2345 case AVMEDIA_TYPE_VIDEO:
2346 ret = avcodec_decode_video2(st->codec, &picture,
2347 &got_picture, &pkt);
2349 case AVMEDIA_TYPE_AUDIO:
2350 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2357 st->info->nb_decoded_frames++;
2363 if(!pkt.data && !got_picture)
2368 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2370 while (tags->id != CODEC_ID_NONE) {
2378 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2381 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2382 if(tag == tags[i].tag)
2385 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2386 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2389 return CODEC_ID_NONE;
2392 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2395 for(i=0; tags && tags[i]; i++){
2396 int tag= ff_codec_get_tag(tags[i], id);
2402 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2405 for(i=0; tags && tags[i]; i++){
2406 enum CodecID id= ff_codec_get_id(tags[i], tag);
2407 if(id!=CODEC_ID_NONE) return id;
2409 return CODEC_ID_NONE;
2412 static void compute_chapters_end(AVFormatContext *s)
2415 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2417 for (i = 0; i < s->nb_chapters; i++)
2418 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2419 AVChapter *ch = s->chapters[i];
2420 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2423 for (j = 0; j < s->nb_chapters; j++) {
2424 AVChapter *ch1 = s->chapters[j];
2425 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2426 if (j != i && next_start > ch->start && next_start < end)
2429 ch->end = (end == INT64_MAX) ? ch->start : end;
2433 static int get_std_framerate(int i){
2434 if(i<60*12) return (i+1)*1001;
2435 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2439 * Is the time base unreliable.
2440 * This is a heuristic to balance between quick acceptance of the values in
2441 * the headers vs. some extra checks.
2442 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2443 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2444 * And there are "variable" fps files this needs to detect as well.
2446 static int tb_unreliable(AVCodecContext *c){
2447 if( c->time_base.den >= 101L*c->time_base.num
2448 || c->time_base.den < 5L*c->time_base.num
2449 /* || c->codec_tag == AV_RL32("DIVX")
2450 || c->codec_tag == AV_RL32("XVID")*/
2451 || c->codec_id == CODEC_ID_MPEG2VIDEO
2452 || c->codec_id == CODEC_ID_H264
2458 #if FF_API_FORMAT_PARAMETERS
2459 int av_find_stream_info(AVFormatContext *ic)
2461 return avformat_find_stream_info(ic, NULL);
2465 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2467 int i, count, ret, read_size, j;
2469 AVPacket pkt1, *pkt;
2470 int64_t old_offset = avio_tell(ic->pb);
2471 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2472 int flush_codecs = 1;
2475 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2477 for(i=0;i<ic->nb_streams;i++) {
2479 AVDictionary *thread_opt = NULL;
2480 st = ic->streams[i];
2482 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2483 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2484 /* if(!st->time_base.num)
2486 if(!st->codec->time_base.num)
2487 st->codec->time_base= st->time_base;
2489 //only for the split stuff
2490 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2491 st->parser = av_parser_init(st->codec->codec_id);
2493 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2494 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2495 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2496 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2498 } else if (st->need_parsing) {
2499 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2500 "%s, packets or times may be invalid.\n",
2501 avcodec_get_name(st->codec->codec_id));
2504 codec = st->codec->codec ? st->codec->codec :
2505 avcodec_find_decoder(st->codec->codec_id);
2507 /* force thread count to 1 since the h264 decoder will not extract SPS
2508 * and PPS to extradata during multi-threaded decoding */
2509 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2511 /* Ensure that subtitle_header is properly set. */
2512 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2513 && codec && !st->codec->codec)
2514 avcodec_open2(st->codec, codec, options ? &options[i]
2517 //try to just open decoders, in case this is enough to get parameters
2518 if (!has_codec_parameters(st)) {
2519 if (codec && !st->codec->codec)
2520 avcodec_open2(st->codec, codec, options ? &options[i]
2524 av_dict_free(&thread_opt);
2527 for (i=0; i<ic->nb_streams; i++) {
2528 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2534 if (ff_check_interrupt(&ic->interrupt_callback)){
2536 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2540 /* check if one codec still needs to be handled */
2541 for(i=0;i<ic->nb_streams;i++) {
2542 int fps_analyze_framecount = 20;
2544 st = ic->streams[i];
2545 if (!has_codec_parameters(st))
2547 /* if the timebase is coarse (like the usual millisecond precision
2548 of mkv), we need to analyze more frames to reliably arrive at
2550 if (av_q2d(st->time_base) > 0.0005)
2551 fps_analyze_framecount *= 2;
2552 if (ic->fps_probe_size >= 0)
2553 fps_analyze_framecount = ic->fps_probe_size;
2554 /* variable fps and no guess at the real fps */
2555 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2556 && st->info->duration_count < fps_analyze_framecount
2557 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2559 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2561 if (st->first_dts == AV_NOPTS_VALUE &&
2562 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2563 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2566 if (i == ic->nb_streams) {
2567 /* NOTE: if the format has no header, then we need to read
2568 some packets to get most of the streams, so we cannot
2570 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2571 /* if we found the info for all the codecs, we can stop */
2573 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2578 /* we did not get all the codec info, but we read too much data */
2579 if (read_size >= ic->probesize) {
2581 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2582 for (i = 0; i < ic->nb_streams; i++)
2583 if (!ic->streams[i]->r_frame_rate.num &&
2584 ic->streams[i]->info->duration_count <= 1)
2585 av_log(ic, AV_LOG_WARNING,
2586 "Stream #%d: not enough frames to estimate rate; "
2587 "consider increasing probesize\n", i);
2591 /* NOTE: a new stream can be added there if no header in file
2592 (AVFMTCTX_NOHEADER) */
2593 ret = read_frame_internal(ic, &pkt1);
2594 if (ret == AVERROR(EAGAIN))
2602 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2603 if ((ret = av_dup_packet(pkt)) < 0)
2604 goto find_stream_info_err;
2606 read_size += pkt->size;
2608 st = ic->streams[pkt->stream_index];
2609 if (st->codec_info_nb_frames>1) {
2611 if (st->time_base.den > 0)
2612 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2613 if (st->avg_frame_rate.num > 0)
2614 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2616 if (t >= ic->max_analyze_duration) {
2617 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2620 st->info->codec_info_duration += pkt->duration;
2623 int64_t last = st->info->last_dts;
2625 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2626 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2627 int64_t duration= pkt->dts - last;
2629 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2630 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2631 for (i=0; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2632 int framerate= get_std_framerate(i);
2633 double sdts= dts*framerate/(1001*12);
2635 int ticks= lrintf(sdts+j*0.5);
2636 double error= sdts - ticks + j*0.5;
2637 st->info->duration_error[j][0][i] += error;
2638 st->info->duration_error[j][1][i] += error*error;
2641 st->info->duration_count++;
2642 // ignore the first 4 values, they might have some random jitter
2643 if (st->info->duration_count > 3)
2644 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2646 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2647 st->info->last_dts = pkt->dts;
2649 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2650 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2651 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2652 st->codec->extradata_size= i;
2653 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2654 if (!st->codec->extradata)
2655 return AVERROR(ENOMEM);
2656 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2657 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2661 /* if still no information, we try to open the codec and to
2662 decompress the frame. We try to avoid that in most cases as
2663 it takes longer and uses more memory. For MPEG-4, we need to
2664 decompress for QuickTime.
2666 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2667 least one frame of codec data, this makes sure the codec initializes
2668 the channel configuration and does not only trust the values from the container.
2670 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2672 st->codec_info_nb_frames++;
2677 AVPacket empty_pkt = { 0 };
2679 av_init_packet(&empty_pkt);
2681 ret = -1; /* we could not have all the codec parameters before EOF */
2682 for(i=0;i<ic->nb_streams;i++) {
2683 st = ic->streams[i];
2685 /* flush the decoders */
2686 if (st->info->found_decoder == 1) {
2688 err = try_decode_frame(st, &empty_pkt,
2689 (options && i < orig_nb_streams) ?
2690 &options[i] : NULL);
2691 } while (err > 0 && !has_codec_parameters(st));
2694 av_log(ic, AV_LOG_INFO,
2695 "decoding for stream %d failed\n", st->index);
2699 if (!has_codec_parameters(st)){
2701 avcodec_string(buf, sizeof(buf), st->codec, 0);
2702 av_log(ic, AV_LOG_WARNING,
2703 "Could not find codec parameters (%s)\n", buf);
2710 // close codecs which were opened in try_decode_frame()
2711 for(i=0;i<ic->nb_streams;i++) {
2712 st = ic->streams[i];
2713 avcodec_close(st->codec);
2715 for(i=0;i<ic->nb_streams;i++) {
2716 st = ic->streams[i];
2717 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2718 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2719 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2720 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2721 st->codec->codec_tag= tag;
2724 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2725 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2726 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2727 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2728 // the check for tb_unreliable() is not completely correct, since this is not about handling
2729 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2730 // ipmovie.c produces.
2731 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2732 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2733 if (st->info->duration_count && !st->r_frame_rate.num
2734 && tb_unreliable(st->codec) /*&&
2735 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2736 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2738 double best_error= 0.01;
2740 for (j=0; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2743 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2745 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2748 int n= st->info->duration_count;
2749 double a= st->info->duration_error[k][0][j] / n;
2750 double error= st->info->duration_error[k][1][j]/n - a*a;
2752 if(error < best_error && best_error> 0.000000001){
2754 num = get_std_framerate(j);
2757 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2760 // do not increase frame rate by more than 1 % in order to match a standard rate.
2761 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2762 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2765 if (!st->r_frame_rate.num){
2766 if( st->codec->time_base.den * (int64_t)st->time_base.num
2767 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2768 st->r_frame_rate.num = st->codec->time_base.den;
2769 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2771 st->r_frame_rate.num = st->time_base.den;
2772 st->r_frame_rate.den = st->time_base.num;
2775 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2776 if(!st->codec->bits_per_coded_sample)
2777 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2778 // set stream disposition based on audio service type
2779 switch (st->codec->audio_service_type) {
2780 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2781 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2782 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2783 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2784 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2785 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2786 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2787 st->disposition = AV_DISPOSITION_COMMENT; break;
2788 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2789 st->disposition = AV_DISPOSITION_KARAOKE; break;
2794 estimate_timings(ic, old_offset);
2796 compute_chapters_end(ic);
2798 find_stream_info_err:
2799 for (i=0; i < ic->nb_streams; i++) {
2800 if (ic->streams[i]->codec)
2801 ic->streams[i]->codec->thread_count = 0;
2802 av_freep(&ic->streams[i]->info);
2805 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2809 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2813 for (i = 0; i < ic->nb_programs; i++) {
2814 if (ic->programs[i] == last) {
2818 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2819 if (ic->programs[i]->stream_index[j] == s)
2820 return ic->programs[i];
2826 int av_find_best_stream(AVFormatContext *ic,
2827 enum AVMediaType type,
2828 int wanted_stream_nb,
2830 AVCodec **decoder_ret,
2833 int i, nb_streams = ic->nb_streams;
2834 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2835 unsigned *program = NULL;
2836 AVCodec *decoder = NULL, *best_decoder = NULL;
2838 if (related_stream >= 0 && wanted_stream_nb < 0) {
2839 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2841 program = p->stream_index;
2842 nb_streams = p->nb_stream_indexes;
2845 for (i = 0; i < nb_streams; i++) {
2846 int real_stream_index = program ? program[i] : i;
2847 AVStream *st = ic->streams[real_stream_index];
2848 AVCodecContext *avctx = st->codec;
2849 if (avctx->codec_type != type)
2851 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2853 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2856 decoder = avcodec_find_decoder(st->codec->codec_id);
2859 ret = AVERROR_DECODER_NOT_FOUND;
2863 if (best_count >= st->codec_info_nb_frames)
2865 best_count = st->codec_info_nb_frames;
2866 ret = real_stream_index;
2867 best_decoder = decoder;
2868 if (program && i == nb_streams - 1 && ret < 0) {
2870 nb_streams = ic->nb_streams;
2871 i = 0; /* no related stream found, try again with everything */
2875 *decoder_ret = best_decoder;
2879 /*******************************************************/
2881 int av_read_play(AVFormatContext *s)
2883 if (s->iformat->read_play)
2884 return s->iformat->read_play(s);
2886 return avio_pause(s->pb, 0);
2887 return AVERROR(ENOSYS);
2890 int av_read_pause(AVFormatContext *s)
2892 if (s->iformat->read_pause)
2893 return s->iformat->read_pause(s);
2895 return avio_pause(s->pb, 1);
2896 return AVERROR(ENOSYS);
2899 void avformat_free_context(AVFormatContext *s)
2905 if (s->iformat && s->iformat->priv_class && s->priv_data)
2906 av_opt_free(s->priv_data);
2908 for(i=0;i<s->nb_streams;i++) {
2909 /* free all data in a stream component */
2912 av_parser_close(st->parser);
2914 if (st->attached_pic.data)
2915 av_free_packet(&st->attached_pic);
2916 av_dict_free(&st->metadata);
2917 av_freep(&st->index_entries);
2918 av_freep(&st->codec->extradata);
2919 av_freep(&st->codec->subtitle_header);
2920 av_freep(&st->codec);
2921 av_freep(&st->priv_data);
2922 av_freep(&st->info);
2925 for(i=s->nb_programs-1; i>=0; i--) {
2926 av_dict_free(&s->programs[i]->metadata);
2927 av_freep(&s->programs[i]->stream_index);
2928 av_freep(&s->programs[i]);
2930 av_freep(&s->programs);
2931 av_freep(&s->priv_data);
2932 while(s->nb_chapters--) {
2933 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2934 av_freep(&s->chapters[s->nb_chapters]);
2936 av_freep(&s->chapters);
2937 av_dict_free(&s->metadata);
2938 av_freep(&s->streams);
2942 #if FF_API_CLOSE_INPUT_FILE
2943 void av_close_input_file(AVFormatContext *s)
2945 avformat_close_input(&s);
2949 void avformat_close_input(AVFormatContext **ps)
2951 AVFormatContext *s = *ps;
2952 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2954 flush_packet_queue(s);
2955 if (s->iformat && (s->iformat->read_close))
2956 s->iformat->read_close(s);
2957 avformat_free_context(s);
2963 #if FF_API_NEW_STREAM
2964 AVStream *av_new_stream(AVFormatContext *s, int id)
2966 AVStream *st = avformat_new_stream(s, NULL);
2973 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2979 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2981 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2984 s->streams = streams;
2986 st = av_mallocz(sizeof(AVStream));
2989 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2993 st->info->last_dts = AV_NOPTS_VALUE;
2995 st->codec = avcodec_alloc_context3(c);
2997 /* no default bitrate if decoding */
2998 st->codec->bit_rate = 0;
3000 st->index = s->nb_streams;
3001 st->start_time = AV_NOPTS_VALUE;
3002 st->duration = AV_NOPTS_VALUE;
3003 /* we set the current DTS to 0 so that formats without any timestamps
3004 but durations get some timestamps, formats with some unknown
3005 timestamps have their first few packets buffered and the
3006 timestamps corrected before they are returned to the user */
3007 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3008 st->first_dts = AV_NOPTS_VALUE;
3009 st->probe_packets = MAX_PROBE_PACKETS;
3011 /* default pts setting is MPEG-like */
3012 avpriv_set_pts_info(st, 33, 1, 90000);
3013 st->last_IP_pts = AV_NOPTS_VALUE;
3014 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3015 st->pts_buffer[i]= AV_NOPTS_VALUE;
3016 st->reference_dts = AV_NOPTS_VALUE;
3018 st->sample_aspect_ratio = (AVRational){0,1};
3020 s->streams[s->nb_streams++] = st;
3024 AVProgram *av_new_program(AVFormatContext *ac, int id)
3026 AVProgram *program=NULL;
3029 av_dlog(ac, "new_program: id=0x%04x\n", id);
3031 for(i=0; i<ac->nb_programs; i++)
3032 if(ac->programs[i]->id == id)
3033 program = ac->programs[i];
3036 program = av_mallocz(sizeof(AVProgram));
3039 dynarray_add(&ac->programs, &ac->nb_programs, program);
3040 program->discard = AVDISCARD_NONE;
3047 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3049 AVChapter *chapter = NULL;
3052 for(i=0; i<s->nb_chapters; i++)
3053 if(s->chapters[i]->id == id)
3054 chapter = s->chapters[i];
3057 chapter= av_mallocz(sizeof(AVChapter));
3060 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3062 av_dict_set(&chapter->metadata, "title", title, 0);
3064 chapter->time_base= time_base;
3065 chapter->start = start;
3071 /************************************************************/
3072 /* output media file */
3074 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3075 const char *format, const char *filename)
3077 AVFormatContext *s = avformat_alloc_context();
3086 oformat = av_guess_format(format, NULL, NULL);
3088 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3089 ret = AVERROR(EINVAL);
3093 oformat = av_guess_format(NULL, filename, NULL);
3095 ret = AVERROR(EINVAL);
3096 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3103 s->oformat = oformat;
3104 if (s->oformat->priv_data_size > 0) {
3105 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3108 if (s->oformat->priv_class) {
3109 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3110 av_opt_set_defaults(s->priv_data);
3113 s->priv_data = NULL;
3116 av_strlcpy(s->filename, filename, sizeof(s->filename));
3120 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3121 ret = AVERROR(ENOMEM);
3123 avformat_free_context(s);
3127 #if FF_API_ALLOC_OUTPUT_CONTEXT
3128 AVFormatContext *avformat_alloc_output_context(const char *format,
3129 AVOutputFormat *oformat, const char *filename)
3131 AVFormatContext *avctx;
3132 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3133 return ret < 0 ? NULL : avctx;
3137 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3139 const AVCodecTag *avctag;
3141 enum CodecID id = CODEC_ID_NONE;
3142 unsigned int tag = 0;
3145 * Check that tag + id is in the table
3146 * If neither is in the table -> OK
3147 * If tag is in the table with another id -> FAIL
3148 * If id is in the table with another tag -> FAIL unless strict < normal
3150 for (n = 0; s->oformat->codec_tag[n]; n++) {
3151 avctag = s->oformat->codec_tag[n];
3152 while (avctag->id != CODEC_ID_NONE) {
3153 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3155 if (id == st->codec->codec_id)
3158 if (avctag->id == st->codec->codec_id)
3163 if (id != CODEC_ID_NONE)
3165 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3170 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3174 AVDictionary *tmp = NULL;
3177 av_dict_copy(&tmp, *options, 0);
3178 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3180 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3181 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3184 // some sanity checks
3185 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3186 av_log(s, AV_LOG_ERROR, "no streams\n");
3187 ret = AVERROR(EINVAL);
3191 for(i=0;i<s->nb_streams;i++) {
3194 switch (st->codec->codec_type) {
3195 case AVMEDIA_TYPE_AUDIO:
3196 if(st->codec->sample_rate<=0){
3197 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3198 ret = AVERROR(EINVAL);
3201 if(!st->codec->block_align)
3202 st->codec->block_align = st->codec->channels *
3203 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3205 case AVMEDIA_TYPE_VIDEO:
3206 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3207 av_log(s, AV_LOG_ERROR, "time base not set\n");
3208 ret = AVERROR(EINVAL);
3211 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3212 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3213 ret = AVERROR(EINVAL);
3216 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3217 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3219 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3220 "(%d/%d) and encoder layer (%d/%d)\n",
3221 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3222 st->codec->sample_aspect_ratio.num,
3223 st->codec->sample_aspect_ratio.den);
3224 ret = AVERROR(EINVAL);
3230 if(s->oformat->codec_tag){
3231 if( st->codec->codec_tag
3232 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3233 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3234 && !validate_codec_tag(s, st)){
3235 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3236 st->codec->codec_tag= 0;
3238 if(st->codec->codec_tag){
3239 if (!validate_codec_tag(s, st)) {
3240 char tagbuf[32], cortag[32];
3241 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3242 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3243 av_log(s, AV_LOG_ERROR,
3244 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3245 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3246 ret = AVERROR_INVALIDDATA;
3250 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3253 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3254 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3255 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3258 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3259 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3260 if (!s->priv_data) {
3261 ret = AVERROR(ENOMEM);
3264 if (s->oformat->priv_class) {
3265 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3266 av_opt_set_defaults(s->priv_data);
3267 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3272 /* set muxer identification string */
3273 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3274 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3277 if(s->oformat->write_header){
3278 ret = s->oformat->write_header(s);
3283 /* init PTS generation */
3284 for(i=0;i<s->nb_streams;i++) {
3285 int64_t den = AV_NOPTS_VALUE;
3288 switch (st->codec->codec_type) {
3289 case AVMEDIA_TYPE_AUDIO:
3290 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3292 case AVMEDIA_TYPE_VIDEO:
3293 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3298 if (den != AV_NOPTS_VALUE) {
3300 ret = AVERROR_INVALIDDATA;
3303 frac_init(&st->pts, 0, 0, den);
3308 av_dict_free(options);
3317 //FIXME merge with compute_pkt_fields
3318 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3319 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3320 int num, den, frame_size, i;
3322 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3323 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3325 /* duration field */
3326 if (pkt->duration == 0) {
3327 compute_frame_duration(&num, &den, st, NULL, pkt);
3329 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3333 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3336 //XXX/FIXME this is a temporary hack until all encoders output pts
3337 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3340 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3344 // pkt->pts= st->cur_dts;
3345 pkt->pts= st->pts.val;
3348 //calculate dts from pts
3349 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3350 st->pts_buffer[0]= pkt->pts;
3351 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3352 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3353 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3354 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3356 pkt->dts= st->pts_buffer[0];
3359 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3360 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3361 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3362 av_log(s, AV_LOG_ERROR,
3363 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3364 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3365 return AVERROR(EINVAL);
3367 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3368 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3369 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3370 return AVERROR(EINVAL);
3373 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3374 st->cur_dts= pkt->dts;
3375 st->pts.val= pkt->dts;
3378 switch (st->codec->codec_type) {
3379 case AVMEDIA_TYPE_AUDIO:
3380 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3382 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3383 likely equal to the encoder delay, but it would be better if we
3384 had the real timestamps from the encoder */
3385 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3386 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3389 case AVMEDIA_TYPE_VIDEO:
3390 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3398 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3403 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3404 return s->oformat->write_packet(s, pkt);
3408 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3410 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3413 ret= s->oformat->write_packet(s, pkt);
3416 s->streams[pkt->stream_index]->nb_frames++;
3420 #define CHUNK_START 0x1000
3422 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3423 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3425 AVPacketList **next_point, *this_pktl;
3426 AVStream *st= s->streams[pkt->stream_index];
3427 int chunked= s->max_chunk_size || s->max_chunk_duration;
3429 this_pktl = av_mallocz(sizeof(AVPacketList));
3431 return AVERROR(ENOMEM);
3432 this_pktl->pkt= *pkt;
3433 pkt->destruct= NULL; // do not free original but only the copy
3434 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3436 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3437 next_point = &(st->last_in_packet_buffer->next);
3439 next_point = &s->packet_buffer;
3444 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3445 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3446 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3447 st->interleaver_chunk_size += pkt->size;
3448 st->interleaver_chunk_duration += pkt->duration;
3451 st->interleaver_chunk_size =
3452 st->interleaver_chunk_duration = 0;
3453 this_pktl->pkt.flags |= CHUNK_START;
3457 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3459 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3460 || !compare(s, &(*next_point)->pkt, pkt))){
3461 next_point= &(*next_point)->next;
3466 next_point = &(s->packet_buffer_end->next);
3469 assert(!*next_point);
3471 s->packet_buffer_end= this_pktl;
3474 this_pktl->next= *next_point;
3476 s->streams[pkt->stream_index]->last_in_packet_buffer=
3477 *next_point= this_pktl;
3481 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3483 AVStream *st = s->streams[ pkt ->stream_index];
3484 AVStream *st2= s->streams[ next->stream_index];
3485 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3487 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3488 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3489 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3491 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3492 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3495 comp= (ts>ts2) - (ts<ts2);
3499 return pkt->stream_index < next->stream_index;
3503 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3504 AVPacket *pkt, int flush)
3507 int stream_count=0, noninterleaved_count=0;
3508 int64_t delta_dts_max = 0;
3512 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3517 for(i=0; i < s->nb_streams; i++) {
3518 if (s->streams[i]->last_in_packet_buffer) {
3520 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3521 ++noninterleaved_count;
3525 if (s->nb_streams == stream_count) {
3528 for(i=0; i < s->nb_streams; i++) {
3529 if (s->streams[i]->last_in_packet_buffer) {
3531 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3532 s->streams[i]->time_base,
3534 av_rescale_q(s->packet_buffer->pkt.dts,
3535 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3537 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3540 if(s->nb_streams == stream_count+noninterleaved_count &&
3541 delta_dts_max > 20*AV_TIME_BASE) {
3542 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3546 if(stream_count && flush){
3547 pktl= s->packet_buffer;
3550 s->packet_buffer= pktl->next;
3551 if(!s->packet_buffer)
3552 s->packet_buffer_end= NULL;
3554 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3555 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3559 av_init_packet(out);
3564 #if FF_API_INTERLEAVE_PACKET
3565 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3566 AVPacket *pkt, int flush)
3568 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3573 * Interleave an AVPacket correctly so it can be muxed.
3574 * @param out the interleaved packet will be output here
3575 * @param in the input packet
3576 * @param flush 1 if no further packets are available as input and all
3577 * remaining packets should be output
3578 * @return 1 if a packet was output, 0 if no packet could be output,
3579 * < 0 if an error occurred
3581 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3582 if (s->oformat->interleave_packet) {
3583 int ret = s->oformat->interleave_packet(s, out, in, flush);
3588 return ff_interleave_packet_per_dts(s, out, in, flush);
3591 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3595 AVStream *st= s->streams[ pkt->stream_index];
3597 //FIXME/XXX/HACK drop zero sized packets
3598 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3601 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3602 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3603 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3606 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3607 return AVERROR(EINVAL);
3609 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3615 int ret= interleave_packet(s, &opkt, pkt, flush);
3616 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3619 ret= s->oformat->write_packet(s, &opkt);
3621 s->streams[opkt.stream_index]->nb_frames++;
3623 av_free_packet(&opkt);
3628 if(s->pb && s->pb->error)
3629 return s->pb->error;
3633 int av_write_trailer(AVFormatContext *s)
3639 ret= interleave_packet(s, &pkt, NULL, 1);
3640 if(ret<0) //FIXME cleanup needed for ret<0 ?
3645 ret= s->oformat->write_packet(s, &pkt);
3647 s->streams[pkt.stream_index]->nb_frames++;
3649 av_free_packet(&pkt);
3653 if(s->pb && s->pb->error)
3657 if(s->oformat->write_trailer)
3658 ret = s->oformat->write_trailer(s);
3663 ret = s->pb ? s->pb->error : 0;
3664 for(i=0;i<s->nb_streams;i++) {
3665 av_freep(&s->streams[i]->priv_data);
3666 av_freep(&s->streams[i]->index_entries);
3668 if (s->oformat->priv_class)
3669 av_opt_free(s->priv_data);
3670 av_freep(&s->priv_data);
3674 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3675 int64_t *dts, int64_t *wall)
3677 if (!s->oformat || !s->oformat->get_output_timestamp)
3678 return AVERROR(ENOSYS);
3679 s->oformat->get_output_timestamp(s, stream, dts, wall);
3683 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3686 AVProgram *program=NULL;
3689 if (idx >= ac->nb_streams) {
3690 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3694 for(i=0; i<ac->nb_programs; i++){
3695 if(ac->programs[i]->id != progid)
3697 program = ac->programs[i];
3698 for(j=0; j<program->nb_stream_indexes; j++)
3699 if(program->stream_index[j] == idx)
3702 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3705 program->stream_index = tmp;
3706 program->stream_index[program->nb_stream_indexes++] = idx;
3711 static void print_fps(double d, const char *postfix){
3712 uint64_t v= lrintf(d*100);
3713 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3714 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3715 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3718 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3720 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3721 AVDictionaryEntry *tag=NULL;
3723 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3724 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3725 if(strcmp("language", tag->key)){
3726 const char *p = tag->value;
3727 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3730 size_t len = strcspn(p, "\xd\xa");
3731 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3732 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3734 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3735 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3738 av_log(ctx, AV_LOG_INFO, "\n");
3744 /* "user interface" functions */
3745 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3748 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3749 AVStream *st = ic->streams[i];
3750 int g = av_gcd(st->time_base.num, st->time_base.den);
3751 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3752 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3753 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3754 /* the pid is an important information, so we display it */
3755 /* XXX: add a generic system */
3756 if (flags & AVFMT_SHOW_IDS)
3757 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3759 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3760 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3761 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3762 if (st->sample_aspect_ratio.num && // default
3763 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3764 AVRational display_aspect_ratio;
3765 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3766 st->codec->width*st->sample_aspect_ratio.num,
3767 st->codec->height*st->sample_aspect_ratio.den,
3769 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3770 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3771 display_aspect_ratio.num, display_aspect_ratio.den);
3773 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3774 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3775 print_fps(av_q2d(st->avg_frame_rate), "fps");
3776 if(st->r_frame_rate.den && st->r_frame_rate.num)
3777 print_fps(av_q2d(st->r_frame_rate), "tbr");
3778 if(st->time_base.den && st->time_base.num)
3779 print_fps(1/av_q2d(st->time_base), "tbn");
3780 if(st->codec->time_base.den && st->codec->time_base.num)
3781 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3783 if (st->disposition & AV_DISPOSITION_DEFAULT)
3784 av_log(NULL, AV_LOG_INFO, " (default)");
3785 if (st->disposition & AV_DISPOSITION_DUB)
3786 av_log(NULL, AV_LOG_INFO, " (dub)");
3787 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3788 av_log(NULL, AV_LOG_INFO, " (original)");
3789 if (st->disposition & AV_DISPOSITION_COMMENT)
3790 av_log(NULL, AV_LOG_INFO, " (comment)");
3791 if (st->disposition & AV_DISPOSITION_LYRICS)
3792 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3793 if (st->disposition & AV_DISPOSITION_KARAOKE)
3794 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3795 if (st->disposition & AV_DISPOSITION_FORCED)
3796 av_log(NULL, AV_LOG_INFO, " (forced)");
3797 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3798 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3799 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3800 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3801 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3802 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3803 av_log(NULL, AV_LOG_INFO, "\n");
3804 dump_metadata(NULL, st->metadata, " ");
3807 void av_dump_format(AVFormatContext *ic,
3813 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3814 if (ic->nb_streams && !printed)
3817 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3818 is_output ? "Output" : "Input",
3820 is_output ? ic->oformat->name : ic->iformat->name,
3821 is_output ? "to" : "from", url);
3822 dump_metadata(NULL, ic->metadata, " ");
3824 av_log(NULL, AV_LOG_INFO, " Duration: ");
3825 if (ic->duration != AV_NOPTS_VALUE) {
3826 int hours, mins, secs, us;
3827 secs = ic->duration / AV_TIME_BASE;
3828 us = ic->duration % AV_TIME_BASE;
3833 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3834 (100 * us) / AV_TIME_BASE);
3836 av_log(NULL, AV_LOG_INFO, "N/A");
3838 if (ic->start_time != AV_NOPTS_VALUE) {
3840 av_log(NULL, AV_LOG_INFO, ", start: ");
3841 secs = ic->start_time / AV_TIME_BASE;
3842 us = abs(ic->start_time % AV_TIME_BASE);
3843 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3844 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3846 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3848 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3850 av_log(NULL, AV_LOG_INFO, "N/A");
3852 av_log(NULL, AV_LOG_INFO, "\n");
3854 for (i = 0; i < ic->nb_chapters; i++) {
3855 AVChapter *ch = ic->chapters[i];
3856 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3857 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3858 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3860 dump_metadata(NULL, ch->metadata, " ");
3862 if(ic->nb_programs) {
3863 int j, k, total = 0;
3864 for(j=0; j<ic->nb_programs; j++) {
3865 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3867 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3868 name ? name->value : "");
3869 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3870 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3871 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3872 printed[ic->programs[j]->stream_index[k]] = 1;
3874 total += ic->programs[j]->nb_stream_indexes;
3876 if (total < ic->nb_streams)
3877 av_log(NULL, AV_LOG_INFO, " No Program\n");
3879 for(i=0;i<ic->nb_streams;i++)
3881 dump_stream_format(ic, i, index, is_output);
3886 #if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
3887 FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
3889 return av_gettime();
3893 uint64_t ff_ntp_time(void)
3895 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3898 int av_get_frame_filename(char *buf, int buf_size,
3899 const char *path, int number)
3902 char *q, buf1[20], c;
3903 int nd, len, percentd_found;
3915 while (isdigit(*p)) {
3916 nd = nd * 10 + *p++ - '0';
3919 } while (isdigit(c));
3928 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3930 if ((q - buf + len) > buf_size - 1)
3932 memcpy(q, buf1, len);
3940 if ((q - buf) < buf_size - 1)
3944 if (!percentd_found)
3953 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3957 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3959 for(i=0;i<size;i+=16) {
3966 PRINT(" %02x", buf[i+j]);
3971 for(j=0;j<len;j++) {
3973 if (c < ' ' || c > '~')
3982 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3984 hex_dump_internal(NULL, f, 0, buf, size);
3987 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3989 hex_dump_internal(avcl, NULL, level, buf, size);
3992 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3995 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3996 PRINT("stream #%d:\n", pkt->stream_index);
3997 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3998 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3999 /* DTS is _always_ valid after av_read_frame() */
4001 if (pkt->dts == AV_NOPTS_VALUE)
4004 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
4005 /* PTS may not be known if B-frames are present. */
4007 if (pkt->pts == AV_NOPTS_VALUE)
4010 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
4012 PRINT(" size=%d\n", pkt->size);
4015 av_hex_dump(f, pkt->data, pkt->size);
4019 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
4021 AVRational tb = { 1, AV_TIME_BASE };
4022 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
4026 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
4028 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
4032 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
4034 AVRational tb = { 1, AV_TIME_BASE };
4035 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4039 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4042 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4045 void av_url_split(char *proto, int proto_size,
4046 char *authorization, int authorization_size,
4047 char *hostname, int hostname_size,
4049 char *path, int path_size,
4052 const char *p, *ls, *at, *col, *brk;
4054 if (port_ptr) *port_ptr = -1;
4055 if (proto_size > 0) proto[0] = 0;
4056 if (authorization_size > 0) authorization[0] = 0;
4057 if (hostname_size > 0) hostname[0] = 0;
4058 if (path_size > 0) path[0] = 0;
4060 /* parse protocol */
4061 if ((p = strchr(url, ':'))) {
4062 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4067 /* no protocol means plain filename */
4068 av_strlcpy(path, url, path_size);
4072 /* separate path from hostname */
4073 ls = strchr(p, '/');
4075 ls = strchr(p, '?');
4077 av_strlcpy(path, ls, path_size);
4079 ls = &p[strlen(p)]; // XXX
4081 /* the rest is hostname, use that to parse auth/port */
4083 /* authorization (user[:pass]@hostname) */
4084 if ((at = strchr(p, '@')) && at < ls) {
4085 av_strlcpy(authorization, p,
4086 FFMIN(authorization_size, at + 1 - p));
4087 p = at + 1; /* skip '@' */
4090 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4092 av_strlcpy(hostname, p + 1,
4093 FFMIN(hostname_size, brk - p));
4094 if (brk[1] == ':' && port_ptr)
4095 *port_ptr = atoi(brk + 2);
4096 } else if ((col = strchr(p, ':')) && col < ls) {
4097 av_strlcpy(hostname, p,
4098 FFMIN(col + 1 - p, hostname_size));
4099 if (port_ptr) *port_ptr = atoi(col + 1);
4101 av_strlcpy(hostname, p,
4102 FFMIN(ls + 1 - p, hostname_size));
4106 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4109 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4112 'C', 'D', 'E', 'F' };
4113 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4116 'c', 'd', 'e', 'f' };
4117 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4119 for(i = 0; i < s; i++) {
4120 buff[i * 2] = hex_table[src[i] >> 4];
4121 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4127 int ff_hex_to_data(uint8_t *data, const char *p)
4134 p += strspn(p, SPACE_CHARS);
4137 c = toupper((unsigned char) *p++);
4138 if (c >= '0' && c <= '9')
4140 else if (c >= 'A' && c <= 'F')
4155 #if FF_API_SET_PTS_INFO
4156 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4157 unsigned int pts_num, unsigned int pts_den)
4159 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4163 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4164 unsigned int pts_num, unsigned int pts_den)
4167 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4168 if(new_tb.num != pts_num)
4169 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4171 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4173 if(new_tb.num <= 0 || new_tb.den <= 0) {
4174 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4177 s->time_base = new_tb;
4178 s->pts_wrap_bits = pts_wrap_bits;
4181 int ff_url_join(char *str, int size, const char *proto,
4182 const char *authorization, const char *hostname,
4183 int port, const char *fmt, ...)
4186 struct addrinfo hints = { 0 }, *ai;
4191 av_strlcatf(str, size, "%s://", proto);
4192 if (authorization && authorization[0])
4193 av_strlcatf(str, size, "%s@", authorization);
4194 #if CONFIG_NETWORK && defined(AF_INET6)
4195 /* Determine if hostname is a numerical IPv6 address,
4196 * properly escape it within [] in that case. */
4197 hints.ai_flags = AI_NUMERICHOST;
4198 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4199 if (ai->ai_family == AF_INET6) {
4200 av_strlcat(str, "[", size);
4201 av_strlcat(str, hostname, size);
4202 av_strlcat(str, "]", size);
4204 av_strlcat(str, hostname, size);
4209 /* Not an IPv6 address, just output the plain string. */
4210 av_strlcat(str, hostname, size);
4213 av_strlcatf(str, size, ":%d", port);
4216 int len = strlen(str);
4219 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4225 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4226 AVFormatContext *src)
4231 local_pkt.stream_index = dst_stream;
4232 if (pkt->pts != AV_NOPTS_VALUE)
4233 local_pkt.pts = av_rescale_q(pkt->pts,
4234 src->streams[pkt->stream_index]->time_base,
4235 dst->streams[dst_stream]->time_base);
4236 if (pkt->dts != AV_NOPTS_VALUE)
4237 local_pkt.dts = av_rescale_q(pkt->dts,
4238 src->streams[pkt->stream_index]->time_base,
4239 dst->streams[dst_stream]->time_base);
4240 return av_write_frame(dst, &local_pkt);
4243 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4246 const char *ptr = str;
4248 /* Parse key=value pairs. */
4251 char *dest = NULL, *dest_end;
4252 int key_len, dest_len = 0;
4254 /* Skip whitespace and potential commas. */
4255 while (*ptr && (isspace(*ptr) || *ptr == ','))
4262 if (!(ptr = strchr(key, '=')))
4265 key_len = ptr - key;
4267 callback_get_buf(context, key, key_len, &dest, &dest_len);
4268 dest_end = dest + dest_len - 1;
4272 while (*ptr && *ptr != '\"') {
4276 if (dest && dest < dest_end)
4280 if (dest && dest < dest_end)
4288 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4289 if (dest && dest < dest_end)
4297 int ff_find_stream_index(AVFormatContext *s, int id)
4300 for (i = 0; i < s->nb_streams; i++) {
4301 if (s->streams[i]->id == id)
4307 void ff_make_absolute_url(char *buf, int size, const char *base,
4311 /* Absolute path, relative to the current server */
4312 if (base && strstr(base, "://") && rel[0] == '/') {
4314 av_strlcpy(buf, base, size);
4315 sep = strstr(buf, "://");
4318 sep = strchr(sep, '/');
4322 av_strlcat(buf, rel, size);
4325 /* If rel actually is an absolute url, just copy it */
4326 if (!base || strstr(rel, "://") || rel[0] == '/') {
4327 av_strlcpy(buf, rel, size);
4331 av_strlcpy(buf, base, size);
4332 /* Remove the file name from the base url */
4333 sep = strrchr(buf, '/');
4338 while (av_strstart(rel, "../", NULL) && sep) {
4339 /* Remove the path delimiter at the end */
4341 sep = strrchr(buf, '/');
4342 /* If the next directory name to pop off is "..", break here */
4343 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4344 /* Readd the slash we just removed */
4345 av_strlcat(buf, "/", size);
4348 /* Cut off the directory name */
4355 av_strlcat(buf, rel, size);
4358 int64_t ff_iso8601_to_unix_time(const char *datestr)
4361 struct tm time1 = {0}, time2 = {0};
4363 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4364 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4366 return av_timegm(&time2);
4368 return av_timegm(&time1);
4370 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4371 "the date string.\n");
4376 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4379 if (ofmt->query_codec)
4380 return ofmt->query_codec(codec_id, std_compliance);
4381 else if (ofmt->codec_tag)
4382 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4383 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4384 codec_id == ofmt->subtitle_codec)
4387 return AVERROR_PATCHWELCOME;
4390 int avformat_network_init(void)
4394 ff_network_inited_globally = 1;
4395 if ((ret = ff_network_init()) < 0)
4402 int avformat_network_deinit(void)
4411 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4412 uint64_t channel_layout, int32_t sample_rate,
4413 int32_t width, int32_t height)
4419 return AVERROR(EINVAL);
4422 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4424 if (channel_layout) {
4426 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4430 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4432 if (width || height) {
4434 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4436 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4438 return AVERROR(ENOMEM);
4439 bytestream_put_le32(&data, flags);
4441 bytestream_put_le32(&data, channels);
4443 bytestream_put_le64(&data, channel_layout);
4445 bytestream_put_le32(&data, sample_rate);
4446 if (width || height) {
4447 bytestream_put_le32(&data, width);
4448 bytestream_put_le32(&data, height);
4453 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4455 return ff_codec_bmp_tags;
4457 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4459 return ff_codec_wav_tags;
4462 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4464 AVRational undef = {0, 1};
4465 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4466 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4467 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4469 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4470 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4471 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4472 stream_sample_aspect_ratio = undef;
4474 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4475 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4476 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4477 frame_sample_aspect_ratio = undef;
4479 if (stream_sample_aspect_ratio.num)
4480 return stream_sample_aspect_ratio;
4482 return frame_sample_aspect_ratio;