2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
56 * various utility functions for use within FFmpeg
59 unsigned avformat_version(void)
61 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
62 return LIBAVFORMAT_VERSION_INT;
65 const char *avformat_configuration(void)
67 return FFMPEG_CONFIGURATION;
70 const char *avformat_license(void)
72 #define LICENSE_PREFIX "libavformat license: "
73 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
76 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
78 static int is_relative(int64_t ts) {
79 return ts > (RELATIVE_TS_BASE - (1LL<<48));
82 /* fraction handling */
85 * f = val + (num / den) + 0.5.
87 * 'num' is normalized so that it is such as 0 <= num < den.
89 * @param f fractional number
90 * @param val integer value
91 * @param num must be >= 0
92 * @param den must be >= 1
94 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
107 * Fractional addition to f: f = f + (incr / f->den).
109 * @param f fractional number
110 * @param incr increment, can be positive or negative
112 static void frac_add(AVFrac *f, int64_t incr)
125 } else if (num >= den) {
132 /** head of registered input format linked list */
133 static AVInputFormat *first_iformat = NULL;
134 /** head of registered output format linked list */
135 static AVOutputFormat *first_oformat = NULL;
137 AVInputFormat *av_iformat_next(AVInputFormat *f)
139 if(f) return f->next;
140 else return first_iformat;
143 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
145 if(f) return f->next;
146 else return first_oformat;
149 void av_register_input_format(AVInputFormat *format)
153 while (*p != NULL) p = &(*p)->next;
158 void av_register_output_format(AVOutputFormat *format)
162 while (*p != NULL) p = &(*p)->next;
167 int av_match_ext(const char *filename, const char *extensions)
175 ext = strrchr(filename, '.');
181 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
184 if (!av_strcasecmp(ext1, ext))
194 static int match_format(const char *name, const char *names)
202 namelen = strlen(name);
203 while ((p = strchr(names, ','))) {
204 len = FFMAX(p - names, namelen);
205 if (!av_strncasecmp(name, names, len))
209 return !av_strcasecmp(name, names);
212 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
213 const char *mime_type)
215 AVOutputFormat *fmt = NULL, *fmt_found;
216 int score_max, score;
218 /* specific test for image sequences */
219 #if CONFIG_IMAGE2_MUXER
220 if (!short_name && filename &&
221 av_filename_number_test(filename) &&
222 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
223 return av_guess_format("image2", NULL, NULL);
226 /* Find the proper file type. */
229 while ((fmt = av_oformat_next(fmt))) {
231 if (fmt->name && short_name && !av_strcasecmp(fmt->name, short_name))
233 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
235 if (filename && fmt->extensions &&
236 av_match_ext(filename, fmt->extensions)) {
239 if (score > score_max) {
247 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
248 const char *filename, const char *mime_type, enum AVMediaType type){
249 if(type == AVMEDIA_TYPE_VIDEO){
250 enum CodecID codec_id= CODEC_ID_NONE;
252 #if CONFIG_IMAGE2_MUXER
253 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
254 codec_id= ff_guess_image2_codec(filename);
257 if(codec_id == CODEC_ID_NONE)
258 codec_id= fmt->video_codec;
260 }else if(type == AVMEDIA_TYPE_AUDIO)
261 return fmt->audio_codec;
262 else if (type == AVMEDIA_TYPE_SUBTITLE)
263 return fmt->subtitle_codec;
265 return CODEC_ID_NONE;
268 AVInputFormat *av_find_input_format(const char *short_name)
270 AVInputFormat *fmt = NULL;
271 while ((fmt = av_iformat_next(fmt))) {
272 if (match_format(short_name, fmt->name))
278 int ffio_limit(AVIOContext *s, int size)
281 int64_t remaining= s->maxsize - avio_tell(s);
282 if(remaining < size){
283 int64_t newsize= avio_size(s);
284 if(!s->maxsize || s->maxsize<newsize)
285 s->maxsize= newsize - !newsize;
286 remaining= s->maxsize - avio_tell(s);
287 remaining= FFMAX(remaining, 0);
290 if(s->maxsize>=0 && remaining+1 < size){
291 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
298 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
301 int orig_size = size;
302 size= ffio_limit(s, size);
304 ret= av_new_packet(pkt, size);
309 pkt->pos= avio_tell(s);
311 ret= avio_read(s, pkt->data, size);
315 av_shrink_packet(pkt, ret);
316 if (pkt->size < orig_size)
317 pkt->flags |= AV_PKT_FLAG_CORRUPT;
322 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
327 return av_get_packet(s, pkt, size);
328 old_size = pkt->size;
329 ret = av_grow_packet(pkt, size);
332 ret = avio_read(s, pkt->data + old_size, size);
333 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
338 int av_filename_number_test(const char *filename)
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
344 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
346 AVProbeData lpd = *pd;
347 AVInputFormat *fmt1 = NULL, *fmt;
348 int score, nodat = 0, score_max=0;
350 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
351 int id3len = ff_id3v2_tag_len(lpd.buf);
352 if (lpd.buf_size > id3len + 16) {
354 lpd.buf_size -= id3len;
360 while ((fmt1 = av_iformat_next(fmt1))) {
361 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
364 if (fmt1->read_probe) {
365 score = fmt1->read_probe(&lpd);
366 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
367 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
368 } else if (fmt1->extensions) {
369 if (av_match_ext(lpd.filename, fmt1->extensions)) {
373 if (score > score_max) {
376 }else if (score == score_max)
379 *score_ret= score_max;
384 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
387 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
388 if(score_ret > *score_max){
389 *score_max= score_ret;
395 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
397 return av_probe_input_format2(pd, is_opened, &score);
400 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
402 static const struct {
403 const char *name; enum CodecID id; enum AVMediaType type;
405 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
406 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
407 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
408 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
409 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
410 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
411 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
412 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
413 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
417 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
421 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
422 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
423 for (i = 0; fmt_id_type[i].name; i++) {
424 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
425 st->codec->codec_id = fmt_id_type[i].id;
426 st->codec->codec_type = fmt_id_type[i].type;
434 /************************************************************/
435 /* input media file */
437 int av_demuxer_open(AVFormatContext *ic){
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic);
446 if (ic->pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
453 /** size of probe buffer, for guessing file type from file contents */
454 #define PROBE_BUF_MIN 2048
455 #define PROBE_BUF_MAX (1<<20)
457 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
458 const char *filename, void *logctx,
459 unsigned int offset, unsigned int max_probe_size)
461 AVProbeData pd = { filename ? filename : "", NULL, -offset };
462 unsigned char *buf = NULL;
463 int ret = 0, probe_size;
465 if (!max_probe_size) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size > PROBE_BUF_MAX) {
468 max_probe_size = PROBE_BUF_MAX;
469 } else if (max_probe_size < PROBE_BUF_MIN) {
470 return AVERROR(EINVAL);
473 if (offset >= max_probe_size) {
474 return AVERROR(EINVAL);
477 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
478 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
479 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
480 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
483 if (probe_size < offset) {
487 /* read probe data */
488 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
491 return AVERROR(ENOMEM);
494 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
495 /* fail if error was not end of file, otherwise, lower score */
496 if (ret != AVERROR_EOF) {
501 ret = 0; /* error was end of file, nothing read */
504 pd.buf = &buf[offset];
506 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
508 /* guess file format */
509 *fmt = av_probe_input_format2(&pd, 1, &score);
511 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
512 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
514 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
520 return AVERROR_INVALIDDATA;
523 /* rewind. reuse probe buffer to avoid seeking */
524 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
530 /* open input file and probe the format if necessary */
531 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
534 AVProbeData pd = {filename, NULL, 0};
537 s->flags |= AVFMT_FLAG_CUSTOM_IO;
539 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
540 else if (s->iformat->flags & AVFMT_NOFILE)
541 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
542 "will be ignored with AVFMT_NOFILE format.\n");
546 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
547 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
550 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
551 &s->interrupt_callback, options)) < 0)
555 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
558 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
559 AVPacketList **plast_pktl){
560 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
565 (*plast_pktl)->next = pktl;
567 *packet_buffer = pktl;
569 /* add the packet in the buffered packet list */
575 static void queue_attached_pictures(AVFormatContext *s)
578 for (i = 0; i < s->nb_streams; i++)
579 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
580 s->streams[i]->discard < AVDISCARD_ALL) {
581 AVPacket copy = s->streams[i]->attached_pic;
582 copy.destruct = NULL;
583 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
587 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
589 AVFormatContext *s = *ps;
591 AVDictionary *tmp = NULL;
592 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
594 if (!s && !(s = avformat_alloc_context()))
595 return AVERROR(ENOMEM);
597 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
598 return AVERROR(EINVAL);
604 av_dict_copy(&tmp, *options, 0);
606 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
609 if ((ret = init_input(s, filename, &tmp)) < 0)
612 /* check filename in case an image number is expected */
613 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
614 if (!av_filename_number_test(filename)) {
615 ret = AVERROR(EINVAL);
620 s->duration = s->start_time = AV_NOPTS_VALUE;
621 av_strlcpy(s->filename, filename, sizeof(s->filename));
623 /* allocate private data */
624 if (s->iformat->priv_data_size > 0) {
625 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
626 ret = AVERROR(ENOMEM);
629 if (s->iformat->priv_class) {
630 *(const AVClass**)s->priv_data = s->iformat->priv_class;
631 av_opt_set_defaults(s->priv_data);
632 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
637 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
639 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
641 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
642 if ((ret = s->iformat->read_header(s)) < 0)
645 if (id3v2_extra_meta &&
646 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
648 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
650 queue_attached_pictures(s);
652 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
653 s->data_offset = avio_tell(s->pb);
655 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
658 av_dict_free(options);
665 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
667 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
669 avformat_free_context(s);
674 /*******************************************************/
676 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
682 AVPacketList *pktl = s->raw_packet_buffer;
686 if(s->streams[pkt->stream_index]->request_probe <= 0){
687 s->raw_packet_buffer = pktl->next;
688 s->raw_packet_buffer_remaining_size += pkt->size;
695 ret= s->iformat->read_packet(s, pkt);
697 if (!pktl || ret == AVERROR(EAGAIN))
699 for (i = 0; i < s->nb_streams; i++)
700 if(s->streams[i]->request_probe > 0)
701 s->streams[i]->request_probe = -1;
705 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
706 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
707 av_log(s, AV_LOG_WARNING,
708 "Dropped corrupted packet (stream = %d)\n",
714 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
715 av_packet_merge_side_data(pkt);
717 if(pkt->stream_index >= (unsigned)s->nb_streams){
718 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
722 st= s->streams[pkt->stream_index];
724 switch(st->codec->codec_type){
725 case AVMEDIA_TYPE_VIDEO:
726 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
728 case AVMEDIA_TYPE_AUDIO:
729 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
731 case AVMEDIA_TYPE_SUBTITLE:
732 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
736 if(!pktl && st->request_probe <= 0)
739 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
740 s->raw_packet_buffer_remaining_size -= pkt->size;
742 if(st->request_probe>0){
743 AVProbeData *pd = &st->probe_data;
745 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
748 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
749 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
750 pd->buf_size += pkt->size;
751 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
753 end= s->raw_packet_buffer_remaining_size <= 0
754 || st->probe_packets<=0;
756 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
757 int score= set_codec_from_probe_data(s, st, pd);
758 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
762 st->request_probe= -1;
763 if(st->codec->codec_id != CODEC_ID_NONE){
764 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
766 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
773 #if FF_API_READ_PACKET
774 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
776 return ff_read_packet(s, pkt);
781 /**********************************************************/
783 static int determinable_frame_size(AVCodecContext *avctx)
785 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
786 avctx->codec_id == CODEC_ID_MP1 ||
787 avctx->codec_id == CODEC_ID_MP2 ||
788 avctx->codec_id == CODEC_ID_MP3/* ||
789 avctx->codec_id == CODEC_ID_CELT*/)
795 * Get the number of samples of an audio frame. Return -1 on error.
797 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
801 /* give frame_size priority if demuxing */
802 if (!mux && enc->frame_size > 1)
803 return enc->frame_size;
805 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
808 /* fallback to using frame_size if muxing */
809 if (enc->frame_size > 1)
810 return enc->frame_size;
817 * Return the frame duration in seconds. Return 0 if not available.
819 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
820 AVCodecParserContext *pc, AVPacket *pkt)
826 switch(st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 if (st->r_frame_rate.num && !pc) {
829 *pnum = st->r_frame_rate.den;
830 *pden = st->r_frame_rate.num;
831 } else if(st->time_base.num*1000LL > st->time_base.den) {
832 *pnum = st->time_base.num;
833 *pden = st->time_base.den;
834 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
835 *pnum = st->codec->time_base.num;
836 *pden = st->codec->time_base.den;
837 if (pc && pc->repeat_pict) {
838 *pnum = (*pnum) * (1 + pc->repeat_pict);
840 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
841 //Thus if we have no parser in such case leave duration undefined.
842 if(st->codec->ticks_per_frame>1 && !pc){
847 case AVMEDIA_TYPE_AUDIO:
848 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
849 if (frame_size <= 0 || st->codec->sample_rate <= 0)
852 *pden = st->codec->sample_rate;
859 static int is_intra_only(AVCodecContext *enc){
860 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
862 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
863 switch(enc->codec_id){
865 case CODEC_ID_MJPEGB:
867 case CODEC_ID_PRORES:
868 case CODEC_ID_RAWVIDEO:
870 case CODEC_ID_DVVIDEO:
871 case CODEC_ID_HUFFYUV:
872 case CODEC_ID_FFVHUFF:
877 case CODEC_ID_JPEG2000:
879 case CODEC_ID_UTVIDEO:
887 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
891 if (pktl == s->parse_queue_end)
892 return s->packet_buffer;
896 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
897 int64_t dts, int64_t pts)
899 AVStream *st= s->streams[stream_index];
900 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
902 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
905 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
908 if (is_relative(pts))
909 pts += st->first_dts - RELATIVE_TS_BASE;
911 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
912 if(pktl->pkt.stream_index != stream_index)
914 if(is_relative(pktl->pkt.pts))
915 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
917 if(is_relative(pktl->pkt.dts))
918 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
920 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
921 st->start_time= pktl->pkt.pts;
923 if (st->start_time == AV_NOPTS_VALUE)
924 st->start_time = pts;
927 static void update_initial_durations(AVFormatContext *s, AVStream *st,
928 int stream_index, int duration)
930 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
931 int64_t cur_dts= RELATIVE_TS_BASE;
933 if(st->first_dts != AV_NOPTS_VALUE){
934 cur_dts= st->first_dts;
935 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
936 if(pktl->pkt.stream_index == stream_index){
937 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
942 if(pktl && pktl->pkt.dts != st->first_dts) {
943 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
947 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
950 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
951 st->first_dts = cur_dts;
952 }else if(st->cur_dts != RELATIVE_TS_BASE)
955 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
956 if(pktl->pkt.stream_index != stream_index)
958 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
959 && !pktl->pkt.duration){
960 pktl->pkt.dts= cur_dts;
961 if(!st->codec->has_b_frames)
962 pktl->pkt.pts= cur_dts;
963 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
964 pktl->pkt.duration = duration;
967 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
970 st->cur_dts= cur_dts;
973 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
974 AVCodecParserContext *pc, AVPacket *pkt)
976 int num, den, presentation_delayed, delay, i;
979 if (s->flags & AVFMT_FLAG_NOFILLIN)
982 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
983 pkt->dts= AV_NOPTS_VALUE;
985 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
986 //FIXME Set low_delay = 0 when has_b_frames = 1
987 st->codec->has_b_frames = 1;
989 /* do we have a video B-frame ? */
990 delay= st->codec->has_b_frames;
991 presentation_delayed = 0;
993 /* XXX: need has_b_frame, but cannot get it if the codec is
996 pc && pc->pict_type != AV_PICTURE_TYPE_B)
997 presentation_delayed = 1;
999 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1000 pkt->dts -= 1LL<<st->pts_wrap_bits;
1003 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1004 // we take the conservative approach and discard both
1005 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1006 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1007 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1008 pkt->dts= AV_NOPTS_VALUE;
1011 if (pkt->duration == 0) {
1012 compute_frame_duration(&num, &den, st, pc, pkt);
1014 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1017 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1018 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1020 /* correct timestamps with byte offset if demuxers only have timestamps
1021 on packet boundaries */
1022 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1023 /* this will estimate bitrate based on this frame's duration and size */
1024 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1025 if(pkt->pts != AV_NOPTS_VALUE)
1027 if(pkt->dts != AV_NOPTS_VALUE)
1031 if (pc && pc->dts_sync_point >= 0) {
1032 // we have synchronization info from the parser
1033 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1035 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1036 if (pkt->dts != AV_NOPTS_VALUE) {
1037 // got DTS from the stream, update reference timestamp
1038 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1039 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1040 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1041 // compute DTS based on reference timestamp
1042 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1043 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1045 if (pc->dts_sync_point > 0)
1046 st->reference_dts = pkt->dts; // new reference
1050 /* This may be redundant, but it should not hurt. */
1051 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1052 presentation_delayed = 1;
1054 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1055 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1056 /* interpolate PTS and DTS if they are not present */
1057 //We skip H264 currently because delay and has_b_frames are not reliably set
1058 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1059 if (presentation_delayed) {
1060 /* DTS = decompression timestamp */
1061 /* PTS = presentation timestamp */
1062 if (pkt->dts == AV_NOPTS_VALUE)
1063 pkt->dts = st->last_IP_pts;
1064 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1065 if (pkt->dts == AV_NOPTS_VALUE)
1066 pkt->dts = st->cur_dts;
1068 /* this is tricky: the dts must be incremented by the duration
1069 of the frame we are displaying, i.e. the last I- or P-frame */
1070 if (st->last_IP_duration == 0)
1071 st->last_IP_duration = pkt->duration;
1072 if(pkt->dts != AV_NOPTS_VALUE)
1073 st->cur_dts = pkt->dts + st->last_IP_duration;
1074 st->last_IP_duration = pkt->duration;
1075 st->last_IP_pts= pkt->pts;
1076 /* cannot compute PTS if not present (we can compute it only
1077 by knowing the future */
1078 } else if (pkt->pts != AV_NOPTS_VALUE ||
1079 pkt->dts != AV_NOPTS_VALUE ||
1081 int duration = pkt->duration;
1083 if(pkt->pts != AV_NOPTS_VALUE && duration){
1084 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1085 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1086 if( old_diff < new_diff && old_diff < (duration>>3)
1087 && (!strcmp(s->iformat->name, "mpeg") ||
1088 !strcmp(s->iformat->name, "mpegts"))){
1089 pkt->pts += duration;
1090 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1091 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1092 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1096 /* presentation is not delayed : PTS and DTS are the same */
1097 if (pkt->pts == AV_NOPTS_VALUE)
1098 pkt->pts = pkt->dts;
1099 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1101 if (pkt->pts == AV_NOPTS_VALUE)
1102 pkt->pts = st->cur_dts;
1103 pkt->dts = pkt->pts;
1104 if (pkt->pts != AV_NOPTS_VALUE)
1105 st->cur_dts = pkt->pts + duration;
1109 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1110 st->pts_buffer[0]= pkt->pts;
1111 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1112 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1113 if(pkt->dts == AV_NOPTS_VALUE)
1114 pkt->dts= st->pts_buffer[0];
1115 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1116 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1118 if(pkt->dts > st->cur_dts)
1119 st->cur_dts = pkt->dts;
1122 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1123 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1126 if(is_intra_only(st->codec))
1127 pkt->flags |= AV_PKT_FLAG_KEY;
1129 pkt->convergence_duration = pc->convergence_duration;
1132 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1135 AVPacketList *pktl = *pkt_buf;
1136 *pkt_buf = pktl->next;
1137 av_free_packet(&pktl->pkt);
1140 *pkt_buf_end = NULL;
1144 * Parse a packet, add all split parts to parse_queue
1146 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1148 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1150 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1151 AVStream *st = s->streams[stream_index];
1152 uint8_t *data = pkt ? pkt->data : NULL;
1153 int size = pkt ? pkt->size : 0;
1154 int ret = 0, got_output = 0;
1157 av_init_packet(&flush_pkt);
1160 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1161 // preserve 0-size sync packets
1162 compute_pkt_fields(s, st, st->parser, pkt);
1165 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1168 av_init_packet(&out_pkt);
1169 len = av_parser_parse2(st->parser, st->codec,
1170 &out_pkt.data, &out_pkt.size, data, size,
1171 pkt->pts, pkt->dts, pkt->pos);
1173 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1174 /* increment read pointer */
1178 got_output = !!out_pkt.size;
1183 /* set the duration */
1184 out_pkt.duration = 0;
1185 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1186 if (st->codec->sample_rate > 0) {
1187 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1188 (AVRational){ 1, st->codec->sample_rate },
1192 } else if (st->codec->time_base.num != 0 &&
1193 st->codec->time_base.den != 0) {
1194 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1195 st->codec->time_base,
1200 out_pkt.stream_index = st->index;
1201 out_pkt.pts = st->parser->pts;
1202 out_pkt.dts = st->parser->dts;
1203 out_pkt.pos = st->parser->pos;
1205 if (st->parser->key_frame == 1 ||
1206 (st->parser->key_frame == -1 &&
1207 st->parser->pict_type == AV_PICTURE_TYPE_I))
1208 out_pkt.flags |= AV_PKT_FLAG_KEY;
1210 compute_pkt_fields(s, st, st->parser, &out_pkt);
1212 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1213 out_pkt.flags & AV_PKT_FLAG_KEY) {
1214 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1215 ff_reduce_index(s, st->index);
1216 av_add_index_entry(st, pos, out_pkt.dts,
1217 0, 0, AVINDEX_KEYFRAME);
1220 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1221 out_pkt.destruct = pkt->destruct;
1222 pkt->destruct = NULL;
1224 if ((ret = av_dup_packet(&out_pkt)) < 0)
1227 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1228 av_free_packet(&out_pkt);
1229 ret = AVERROR(ENOMEM);
1235 /* end of the stream => close and free the parser */
1236 if (pkt == &flush_pkt) {
1237 av_parser_close(st->parser);
1242 av_free_packet(pkt);
1246 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1247 AVPacketList **pkt_buffer_end,
1251 av_assert0(*pkt_buffer);
1254 *pkt_buffer = pktl->next;
1256 *pkt_buffer_end = NULL;
1261 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1263 int ret = 0, i, got_packet = 0;
1265 av_init_packet(pkt);
1267 while (!got_packet && !s->parse_queue) {
1271 /* read next packet */
1272 ret = ff_read_packet(s, &cur_pkt);
1274 if (ret == AVERROR(EAGAIN))
1276 /* flush the parsers */
1277 for(i = 0; i < s->nb_streams; i++) {
1279 if (st->parser && st->need_parsing)
1280 parse_packet(s, NULL, st->index);
1282 /* all remaining packets are now in parse_queue =>
1283 * really terminate parsing */
1287 st = s->streams[cur_pkt.stream_index];
1289 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1290 cur_pkt.dts != AV_NOPTS_VALUE &&
1291 cur_pkt.pts < cur_pkt.dts) {
1292 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1293 cur_pkt.stream_index,
1294 av_ts2str(cur_pkt.pts),
1295 av_ts2str(cur_pkt.dts),
1298 if (s->debug & FF_FDEBUG_TS)
1299 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1300 cur_pkt.stream_index,
1301 av_ts2str(cur_pkt.pts),
1302 av_ts2str(cur_pkt.dts),
1307 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1308 st->parser = av_parser_init(st->codec->codec_id);
1310 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1311 "%s, packets or times may be invalid.\n",
1312 avcodec_get_name(st->codec->codec_id));
1313 /* no parser available: just output the raw packets */
1314 st->need_parsing = AVSTREAM_PARSE_NONE;
1315 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1316 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1317 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1318 st->parser->flags |= PARSER_FLAG_ONCE;
1319 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1320 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1324 if (!st->need_parsing || !st->parser) {
1325 /* no parsing needed: we just output the packet as is */
1327 compute_pkt_fields(s, st, NULL, pkt);
1328 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1329 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1330 ff_reduce_index(s, st->index);
1331 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1334 } else if (st->discard < AVDISCARD_ALL) {
1335 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1339 av_free_packet(&cur_pkt);
1341 if (pkt->flags & AV_PKT_FLAG_KEY)
1342 st->skip_to_keyframe = 0;
1343 if (st->skip_to_keyframe) {
1344 av_free_packet(&cur_pkt);
1349 if (!got_packet && s->parse_queue)
1350 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1352 if(s->debug & FF_FDEBUG_TS)
1353 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1355 av_ts2str(pkt->pts),
1356 av_ts2str(pkt->dts),
1364 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1366 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1371 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1372 &s->packet_buffer_end,
1374 read_frame_internal(s, pkt);
1379 AVPacketList *pktl = s->packet_buffer;
1382 AVPacket *next_pkt = &pktl->pkt;
1384 if (next_pkt->dts != AV_NOPTS_VALUE) {
1385 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1386 // last dts seen for this stream. if any of packets following
1387 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1388 int64_t last_dts = next_pkt->dts;
1389 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1390 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1391 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1392 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1393 next_pkt->pts = pktl->pkt.dts;
1395 if (last_dts != AV_NOPTS_VALUE) {
1396 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1397 last_dts = pktl->pkt.dts;
1402 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1403 // Fixing the last reference frame had none pts issue (For MXF etc).
1404 // We only do this when
1406 // 2. we are not able to resolve a pts value for current packet.
1407 // 3. the packets for this stream at the end of the files had valid dts.
1408 next_pkt->pts = last_dts + next_pkt->duration;
1410 pktl = s->packet_buffer;
1413 /* read packet from packet buffer, if there is data */
1414 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1415 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1416 ret = read_from_packet_buffer(&s->packet_buffer,
1417 &s->packet_buffer_end, pkt);
1422 ret = read_frame_internal(s, pkt);
1424 if (pktl && ret != AVERROR(EAGAIN)) {
1431 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1432 &s->packet_buffer_end)) < 0)
1433 return AVERROR(ENOMEM);
1437 if (is_relative(pkt->dts))
1438 pkt->dts -= RELATIVE_TS_BASE;
1439 if (is_relative(pkt->pts))
1440 pkt->pts -= RELATIVE_TS_BASE;
1444 /* XXX: suppress the packet queue */
1445 static void flush_packet_queue(AVFormatContext *s)
1447 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1448 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1449 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1451 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1454 /*******************************************************/
1457 int av_find_default_stream_index(AVFormatContext *s)
1459 int first_audio_index = -1;
1463 if (s->nb_streams <= 0)
1465 for(i = 0; i < s->nb_streams; i++) {
1467 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1468 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1471 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1472 first_audio_index = i;
1474 return first_audio_index >= 0 ? first_audio_index : 0;
1478 * Flush the frame reader.
1480 void ff_read_frame_flush(AVFormatContext *s)
1485 flush_packet_queue(s);
1487 /* for each stream, reset read state */
1488 for(i = 0; i < s->nb_streams; i++) {
1492 av_parser_close(st->parser);
1495 st->last_IP_pts = AV_NOPTS_VALUE;
1496 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1497 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1498 st->reference_dts = AV_NOPTS_VALUE;
1500 st->probe_packets = MAX_PROBE_PACKETS;
1502 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1503 st->pts_buffer[j]= AV_NOPTS_VALUE;
1507 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1511 for(i = 0; i < s->nb_streams; i++) {
1512 AVStream *st = s->streams[i];
1514 st->cur_dts = av_rescale(timestamp,
1515 st->time_base.den * (int64_t)ref_st->time_base.num,
1516 st->time_base.num * (int64_t)ref_st->time_base.den);
1520 void ff_reduce_index(AVFormatContext *s, int stream_index)
1522 AVStream *st= s->streams[stream_index];
1523 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1525 if((unsigned)st->nb_index_entries >= max_entries){
1527 for(i=0; 2*i<st->nb_index_entries; i++)
1528 st->index_entries[i]= st->index_entries[2*i];
1529 st->nb_index_entries= i;
1533 int ff_add_index_entry(AVIndexEntry **index_entries,
1534 int *nb_index_entries,
1535 unsigned int *index_entries_allocated_size,
1536 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1538 AVIndexEntry *entries, *ie;
1541 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1544 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1545 timestamp -= RELATIVE_TS_BASE;
1547 entries = av_fast_realloc(*index_entries,
1548 index_entries_allocated_size,
1549 (*nb_index_entries + 1) *
1550 sizeof(AVIndexEntry));
1554 *index_entries= entries;
1556 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1559 index= (*nb_index_entries)++;
1560 ie= &entries[index];
1561 assert(index==0 || ie[-1].timestamp < timestamp);
1563 ie= &entries[index];
1564 if(ie->timestamp != timestamp){
1565 if(ie->timestamp <= timestamp)
1567 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1568 (*nb_index_entries)++;
1569 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1570 distance= ie->min_distance;
1574 ie->timestamp = timestamp;
1575 ie->min_distance= distance;
1582 int av_add_index_entry(AVStream *st,
1583 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1585 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1586 &st->index_entries_allocated_size, pos,
1587 timestamp, size, distance, flags);
1590 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1591 int64_t wanted_timestamp, int flags)
1599 //optimize appending index entries at the end
1600 if(b && entries[b-1].timestamp < wanted_timestamp)
1605 timestamp = entries[m].timestamp;
1606 if(timestamp >= wanted_timestamp)
1608 if(timestamp <= wanted_timestamp)
1611 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1613 if(!(flags & AVSEEK_FLAG_ANY)){
1614 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1615 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1624 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1627 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1628 wanted_timestamp, flags);
1631 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1633 AVInputFormat *avif= s->iformat;
1634 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1635 int64_t ts_min, ts_max, ts;
1640 if (stream_index < 0)
1643 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1646 ts_min= AV_NOPTS_VALUE;
1647 pos_limit= -1; //gcc falsely says it may be uninitialized
1649 st= s->streams[stream_index];
1650 if(st->index_entries){
1653 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1654 index= FFMAX(index, 0);
1655 e= &st->index_entries[index];
1657 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1659 ts_min= e->timestamp;
1660 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1661 pos_min, av_ts2str(ts_min));
1666 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1667 assert(index < st->nb_index_entries);
1669 e= &st->index_entries[index];
1670 assert(e->timestamp >= target_ts);
1672 ts_max= e->timestamp;
1673 pos_limit= pos_max - e->min_distance;
1674 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1675 pos_max, pos_limit, av_ts2str(ts_max));
1679 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1684 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1687 ff_read_frame_flush(s);
1688 ff_update_cur_dts(s, st, ts);
1693 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1694 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1695 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1696 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1699 int64_t start_pos, filesize;
1702 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1704 if(ts_min == AV_NOPTS_VALUE){
1705 pos_min = s->data_offset;
1706 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1707 if (ts_min == AV_NOPTS_VALUE)
1711 if(ts_min >= target_ts){
1716 if(ts_max == AV_NOPTS_VALUE){
1718 filesize = avio_size(s->pb);
1719 pos_max = filesize - 1;
1722 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1724 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1725 if (ts_max == AV_NOPTS_VALUE)
1729 int64_t tmp_pos= pos_max + 1;
1730 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1731 if(tmp_ts == AV_NOPTS_VALUE)
1735 if(tmp_pos >= filesize)
1741 if(ts_max <= target_ts){
1746 if(ts_min > ts_max){
1748 }else if(ts_min == ts_max){
1753 while (pos_min < pos_limit) {
1754 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1755 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1756 assert(pos_limit <= pos_max);
1759 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1760 // interpolate position (better than dichotomy)
1761 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1762 + pos_min - approximate_keyframe_distance;
1763 }else if(no_change==1){
1764 // bisection, if interpolation failed to change min or max pos last time
1765 pos = (pos_min + pos_limit)>>1;
1767 /* linear search if bisection failed, can only happen if there
1768 are very few or no keyframes between min/max */
1773 else if(pos > pos_limit)
1777 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1782 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1783 pos_min, pos, pos_max,
1784 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1785 pos_limit, start_pos, no_change);
1786 if(ts == AV_NOPTS_VALUE){
1787 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1790 assert(ts != AV_NOPTS_VALUE);
1791 if (target_ts <= ts) {
1792 pos_limit = start_pos - 1;
1796 if (target_ts >= ts) {
1802 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1803 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1806 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1808 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1809 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1810 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1816 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1817 int64_t pos_min, pos_max;
1819 pos_min = s->data_offset;
1820 pos_max = avio_size(s->pb) - 1;
1822 if (pos < pos_min) pos= pos_min;
1823 else if(pos > pos_max) pos= pos_max;
1825 avio_seek(s->pb, pos, SEEK_SET);
1830 static int seek_frame_generic(AVFormatContext *s,
1831 int stream_index, int64_t timestamp, int flags)
1838 st = s->streams[stream_index];
1840 index = av_index_search_timestamp(st, timestamp, flags);
1842 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1845 if(index < 0 || index==st->nb_index_entries-1){
1849 if(st->nb_index_entries){
1850 assert(st->index_entries);
1851 ie= &st->index_entries[st->nb_index_entries-1];
1852 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1854 ff_update_cur_dts(s, st, ie->timestamp);
1856 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1862 read_status = av_read_frame(s, &pkt);
1863 } while (read_status == AVERROR(EAGAIN));
1864 if (read_status < 0)
1866 av_free_packet(&pkt);
1867 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1868 if(pkt.flags & AV_PKT_FLAG_KEY)
1870 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1871 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1876 index = av_index_search_timestamp(st, timestamp, flags);
1881 ff_read_frame_flush(s);
1882 AV_NOWARN_DEPRECATED(
1883 if (s->iformat->read_seek){
1884 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1888 ie = &st->index_entries[index];
1889 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1891 ff_update_cur_dts(s, st, ie->timestamp);
1896 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1897 int64_t timestamp, int flags)
1902 if (flags & AVSEEK_FLAG_BYTE) {
1903 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1905 ff_read_frame_flush(s);
1906 return seek_frame_byte(s, stream_index, timestamp, flags);
1909 if(stream_index < 0){
1910 stream_index= av_find_default_stream_index(s);
1911 if(stream_index < 0)
1914 st= s->streams[stream_index];
1915 /* timestamp for default must be expressed in AV_TIME_BASE units */
1916 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1919 /* first, we try the format specific seek */
1920 AV_NOWARN_DEPRECATED(
1921 if (s->iformat->read_seek) {
1922 ff_read_frame_flush(s);
1923 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1931 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1932 ff_read_frame_flush(s);
1933 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1934 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1935 ff_read_frame_flush(s);
1936 return seek_frame_generic(s, stream_index, timestamp, flags);
1942 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1944 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1947 queue_attached_pictures(s);
1952 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1954 if(min_ts > ts || max_ts < ts)
1957 if (s->iformat->read_seek2) {
1959 ff_read_frame_flush(s);
1960 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1963 queue_attached_pictures(s);
1967 if(s->iformat->read_timestamp){
1968 //try to seek via read_timestamp()
1971 //Fallback to old API if new is not implemented but old is
1972 //Note the old has somewat different sematics
1973 AV_NOWARN_DEPRECATED(
1974 if (s->iformat->read_seek || 1) {
1975 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1976 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1977 if (ret<0 && ts != min_ts && max_ts != ts) {
1978 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1980 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
1986 // try some generic seek like seek_frame_generic() but with new ts semantics
1989 /*******************************************************/
1992 * Return TRUE if the stream has accurate duration in any stream.
1994 * @return TRUE if the stream has accurate duration for at least one component.
1996 static int has_duration(AVFormatContext *ic)
2001 for(i = 0;i < ic->nb_streams; i++) {
2002 st = ic->streams[i];
2003 if (st->duration != AV_NOPTS_VALUE)
2006 if (ic->duration != AV_NOPTS_VALUE)
2012 * Estimate the stream timings from the one of each components.
2014 * Also computes the global bitrate if possible.
2016 static void update_stream_timings(AVFormatContext *ic)
2018 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2019 int64_t duration, duration1, filesize;
2023 start_time = INT64_MAX;
2024 start_time_text = INT64_MAX;
2025 end_time = INT64_MIN;
2026 duration = INT64_MIN;
2027 for(i = 0;i < ic->nb_streams; i++) {
2028 st = ic->streams[i];
2029 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2030 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2031 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2032 if (start_time1 < start_time_text)
2033 start_time_text = start_time1;
2035 start_time = FFMIN(start_time, start_time1);
2036 if (st->duration != AV_NOPTS_VALUE) {
2037 end_time1 = start_time1
2038 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2039 end_time = FFMAX(end_time, end_time1);
2042 if (st->duration != AV_NOPTS_VALUE) {
2043 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2044 duration = FFMAX(duration, duration1);
2047 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2048 start_time = start_time_text;
2049 if (start_time != INT64_MAX) {
2050 ic->start_time = start_time;
2051 if (end_time != INT64_MIN)
2052 duration = FFMAX(duration, end_time - start_time);
2054 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2055 ic->duration = duration;
2057 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2058 /* compute the bitrate */
2059 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2060 (double)ic->duration;
2064 static void fill_all_stream_timings(AVFormatContext *ic)
2069 update_stream_timings(ic);
2070 for(i = 0;i < ic->nb_streams; i++) {
2071 st = ic->streams[i];
2072 if (st->start_time == AV_NOPTS_VALUE) {
2073 if(ic->start_time != AV_NOPTS_VALUE)
2074 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2075 if(ic->duration != AV_NOPTS_VALUE)
2076 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2081 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2083 int64_t filesize, duration;
2087 /* if bit_rate is already set, we believe it */
2088 if (ic->bit_rate <= 0) {
2090 for(i=0;i<ic->nb_streams;i++) {
2091 st = ic->streams[i];
2092 if (st->codec->bit_rate > 0)
2093 bit_rate += st->codec->bit_rate;
2095 ic->bit_rate = bit_rate;
2098 /* if duration is already set, we believe it */
2099 if (ic->duration == AV_NOPTS_VALUE &&
2100 ic->bit_rate != 0) {
2101 filesize = ic->pb ? avio_size(ic->pb) : 0;
2103 for(i = 0; i < ic->nb_streams; i++) {
2104 st = ic->streams[i];
2105 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2106 if (st->duration == AV_NOPTS_VALUE)
2107 st->duration = duration;
2113 #define DURATION_MAX_READ_SIZE 250000
2114 #define DURATION_MAX_RETRY 3
2116 /* only usable for MPEG-PS streams */
2117 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2119 AVPacket pkt1, *pkt = &pkt1;
2121 int read_size, i, ret;
2123 int64_t filesize, offset, duration;
2126 /* flush packet queue */
2127 flush_packet_queue(ic);
2129 for (i=0; i<ic->nb_streams; i++) {
2130 st = ic->streams[i];
2131 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2132 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2135 av_parser_close(st->parser);
2140 /* estimate the end time (duration) */
2141 /* XXX: may need to support wrapping */
2142 filesize = ic->pb ? avio_size(ic->pb) : 0;
2143 end_time = AV_NOPTS_VALUE;
2145 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2149 avio_seek(ic->pb, offset, SEEK_SET);
2152 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2156 ret = ff_read_packet(ic, pkt);
2157 } while(ret == AVERROR(EAGAIN));
2160 read_size += pkt->size;
2161 st = ic->streams[pkt->stream_index];
2162 if (pkt->pts != AV_NOPTS_VALUE &&
2163 (st->start_time != AV_NOPTS_VALUE ||
2164 st->first_dts != AV_NOPTS_VALUE)) {
2165 duration = end_time = pkt->pts;
2166 if (st->start_time != AV_NOPTS_VALUE)
2167 duration -= st->start_time;
2169 duration -= st->first_dts;
2171 duration += 1LL<<st->pts_wrap_bits;
2173 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2174 st->duration = duration;
2177 av_free_packet(pkt);
2179 }while( end_time==AV_NOPTS_VALUE
2180 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2181 && ++retry <= DURATION_MAX_RETRY);
2183 fill_all_stream_timings(ic);
2185 avio_seek(ic->pb, old_offset, SEEK_SET);
2186 for (i=0; i<ic->nb_streams; i++) {
2188 st->cur_dts= st->first_dts;
2189 st->last_IP_pts = AV_NOPTS_VALUE;
2190 st->reference_dts = AV_NOPTS_VALUE;
2194 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2198 /* get the file size, if possible */
2199 if (ic->iformat->flags & AVFMT_NOFILE) {
2202 file_size = avio_size(ic->pb);
2203 file_size = FFMAX(0, file_size);
2206 if ((!strcmp(ic->iformat->name, "mpeg") ||
2207 !strcmp(ic->iformat->name, "mpegts")) &&
2208 file_size && ic->pb->seekable) {
2209 /* get accurate estimate from the PTSes */
2210 estimate_timings_from_pts(ic, old_offset);
2211 } else if (has_duration(ic)) {
2212 /* at least one component has timings - we use them for all
2214 fill_all_stream_timings(ic);
2216 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2217 /* less precise: use bitrate info */
2218 estimate_timings_from_bit_rate(ic);
2220 update_stream_timings(ic);
2224 AVStream av_unused *st;
2225 for(i = 0;i < ic->nb_streams; i++) {
2226 st = ic->streams[i];
2227 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2228 (double) st->start_time / AV_TIME_BASE,
2229 (double) st->duration / AV_TIME_BASE);
2231 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2232 (double) ic->start_time / AV_TIME_BASE,
2233 (double) ic->duration / AV_TIME_BASE,
2234 ic->bit_rate / 1000);
2238 static int has_codec_parameters(AVStream *st)
2240 AVCodecContext *avctx = st->codec;
2242 switch (avctx->codec_type) {
2243 case AVMEDIA_TYPE_AUDIO:
2244 val = avctx->sample_rate && avctx->channels;
2245 if (!avctx->frame_size && determinable_frame_size(avctx))
2247 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2250 case AVMEDIA_TYPE_VIDEO:
2252 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2255 case AVMEDIA_TYPE_DATA:
2256 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2261 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2264 static int has_decode_delay_been_guessed(AVStream *st)
2266 return st->codec->codec_id != CODEC_ID_H264 ||
2267 st->info->nb_decoded_frames >= 6;
2270 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2271 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2274 int got_picture = 1, ret = 0;
2276 AVPacket pkt = *avpkt;
2278 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2279 AVDictionary *thread_opt = NULL;
2281 codec = st->codec->codec ? st->codec->codec :
2282 avcodec_find_decoder(st->codec->codec_id);
2285 st->info->found_decoder = -1;
2289 /* force thread count to 1 since the h264 decoder will not extract SPS
2290 * and PPS to extradata during multi-threaded decoding */
2291 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2292 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2294 av_dict_free(&thread_opt);
2296 st->info->found_decoder = -1;
2299 st->info->found_decoder = 1;
2300 } else if (!st->info->found_decoder)
2301 st->info->found_decoder = 1;
2303 if (st->info->found_decoder < 0)
2306 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2308 (!has_codec_parameters(st) ||
2309 !has_decode_delay_been_guessed(st) ||
2310 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2312 avcodec_get_frame_defaults(&picture);
2313 switch(st->codec->codec_type) {
2314 case AVMEDIA_TYPE_VIDEO:
2315 ret = avcodec_decode_video2(st->codec, &picture,
2316 &got_picture, &pkt);
2318 case AVMEDIA_TYPE_AUDIO:
2319 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2326 st->info->nb_decoded_frames++;
2332 if(!pkt.data && !got_picture)
2337 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2339 while (tags->id != CODEC_ID_NONE) {
2347 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2350 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2351 if(tag == tags[i].tag)
2354 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2355 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2358 return CODEC_ID_NONE;
2361 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2364 for(i=0; tags && tags[i]; i++){
2365 int tag= ff_codec_get_tag(tags[i], id);
2371 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2374 for(i=0; tags && tags[i]; i++){
2375 enum CodecID id= ff_codec_get_id(tags[i], tag);
2376 if(id!=CODEC_ID_NONE) return id;
2378 return CODEC_ID_NONE;
2381 static void compute_chapters_end(AVFormatContext *s)
2384 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2386 for (i = 0; i < s->nb_chapters; i++)
2387 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2388 AVChapter *ch = s->chapters[i];
2389 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2392 for (j = 0; j < s->nb_chapters; j++) {
2393 AVChapter *ch1 = s->chapters[j];
2394 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2395 if (j != i && next_start > ch->start && next_start < end)
2398 ch->end = (end == INT64_MAX) ? ch->start : end;
2402 static int get_std_framerate(int i){
2403 if(i<60*12) return i*1001;
2404 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2408 * Is the time base unreliable.
2409 * This is a heuristic to balance between quick acceptance of the values in
2410 * the headers vs. some extra checks.
2411 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2412 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2413 * And there are "variable" fps files this needs to detect as well.
2415 static int tb_unreliable(AVCodecContext *c){
2416 if( c->time_base.den >= 101L*c->time_base.num
2417 || c->time_base.den < 5L*c->time_base.num
2418 /* || c->codec_tag == AV_RL32("DIVX")
2419 || c->codec_tag == AV_RL32("XVID")*/
2420 || c->codec_id == CODEC_ID_MPEG2VIDEO
2421 || c->codec_id == CODEC_ID_H264
2427 #if FF_API_FORMAT_PARAMETERS
2428 int av_find_stream_info(AVFormatContext *ic)
2430 return avformat_find_stream_info(ic, NULL);
2434 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2436 int i, count, ret, read_size, j;
2438 AVPacket pkt1, *pkt;
2439 int64_t old_offset = avio_tell(ic->pb);
2440 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2441 int flush_codecs = 1;
2444 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2446 for(i=0;i<ic->nb_streams;i++) {
2448 AVDictionary *thread_opt = NULL;
2449 st = ic->streams[i];
2451 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2452 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2453 /* if(!st->time_base.num)
2455 if(!st->codec->time_base.num)
2456 st->codec->time_base= st->time_base;
2458 //only for the split stuff
2459 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2460 st->parser = av_parser_init(st->codec->codec_id);
2462 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2463 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2464 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2465 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2469 codec = st->codec->codec ? st->codec->codec :
2470 avcodec_find_decoder(st->codec->codec_id);
2472 /* force thread count to 1 since the h264 decoder will not extract SPS
2473 * and PPS to extradata during multi-threaded decoding */
2474 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2476 /* Ensure that subtitle_header is properly set. */
2477 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2478 && codec && !st->codec->codec)
2479 avcodec_open2(st->codec, codec, options ? &options[i]
2482 //try to just open decoders, in case this is enough to get parameters
2483 if (!has_codec_parameters(st)) {
2484 if (codec && !st->codec->codec)
2485 avcodec_open2(st->codec, codec, options ? &options[i]
2489 av_dict_free(&thread_opt);
2492 for (i=0; i<ic->nb_streams; i++) {
2493 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2499 if (ff_check_interrupt(&ic->interrupt_callback)){
2501 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2505 /* check if one codec still needs to be handled */
2506 for(i=0;i<ic->nb_streams;i++) {
2507 int fps_analyze_framecount = 20;
2509 st = ic->streams[i];
2510 if (!has_codec_parameters(st))
2512 /* if the timebase is coarse (like the usual millisecond precision
2513 of mkv), we need to analyze more frames to reliably arrive at
2515 if (av_q2d(st->time_base) > 0.0005)
2516 fps_analyze_framecount *= 2;
2517 if (ic->fps_probe_size >= 0)
2518 fps_analyze_framecount = ic->fps_probe_size;
2519 /* variable fps and no guess at the real fps */
2520 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2521 && st->info->duration_count < fps_analyze_framecount
2522 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2524 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2526 if (st->first_dts == AV_NOPTS_VALUE &&
2527 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2528 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2531 if (i == ic->nb_streams) {
2532 /* NOTE: if the format has no header, then we need to read
2533 some packets to get most of the streams, so we cannot
2535 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2536 /* if we found the info for all the codecs, we can stop */
2538 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2543 /* we did not get all the codec info, but we read too much data */
2544 if (read_size >= ic->probesize) {
2546 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2547 for (i = 0; i < ic->nb_streams; i++)
2548 if (!ic->streams[i]->r_frame_rate.num &&
2549 ic->streams[i]->info->duration_count <= 1)
2550 av_log(ic, AV_LOG_WARNING,
2551 "Stream #%d: not enough frames to estimate rate; "
2552 "consider increasing probesize\n", i);
2556 /* NOTE: a new stream can be added there if no header in file
2557 (AVFMTCTX_NOHEADER) */
2558 ret = read_frame_internal(ic, &pkt1);
2559 if (ret == AVERROR(EAGAIN))
2567 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2568 if ((ret = av_dup_packet(pkt)) < 0)
2569 goto find_stream_info_err;
2571 read_size += pkt->size;
2573 st = ic->streams[pkt->stream_index];
2574 if (st->codec_info_nb_frames>1) {
2576 if (st->time_base.den > 0)
2577 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2578 if (st->avg_frame_rate.num > 0)
2579 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2581 if (t >= ic->max_analyze_duration) {
2582 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2585 st->info->codec_info_duration += pkt->duration;
2588 int64_t last = st->info->last_dts;
2590 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2591 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2592 int64_t duration= pkt->dts - last;
2594 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2595 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2596 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2597 int framerate= get_std_framerate(i);
2598 double sdts= dts*framerate/(1001*12);
2600 int ticks= lrintf(sdts+j*0.5);
2601 double error= sdts - ticks + j*0.5;
2602 st->info->duration_error[j][0][i] += error;
2603 st->info->duration_error[j][1][i] += error*error;
2606 st->info->duration_count++;
2607 // ignore the first 4 values, they might have some random jitter
2608 if (st->info->duration_count > 3)
2609 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2611 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2612 st->info->last_dts = pkt->dts;
2614 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2615 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2616 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2617 st->codec->extradata_size= i;
2618 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2619 if (!st->codec->extradata)
2620 return AVERROR(ENOMEM);
2621 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2622 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2626 /* if still no information, we try to open the codec and to
2627 decompress the frame. We try to avoid that in most cases as
2628 it takes longer and uses more memory. For MPEG-4, we need to
2629 decompress for QuickTime.
2631 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2632 least one frame of codec data, this makes sure the codec initializes
2633 the channel configuration and does not only trust the values from the container.
2635 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2637 st->codec_info_nb_frames++;
2642 AVPacket empty_pkt = { 0 };
2644 av_init_packet(&empty_pkt);
2646 ret = -1; /* we could not have all the codec parameters before EOF */
2647 for(i=0;i<ic->nb_streams;i++) {
2648 st = ic->streams[i];
2650 /* flush the decoders */
2651 if (st->info->found_decoder == 1) {
2653 err = try_decode_frame(st, &empty_pkt,
2654 (options && i < orig_nb_streams) ?
2655 &options[i] : NULL);
2656 } while (err > 0 && !has_codec_parameters(st));
2659 av_log(ic, AV_LOG_INFO,
2660 "decoding for stream %d failed\n", st->index);
2664 if (!has_codec_parameters(st)){
2666 avcodec_string(buf, sizeof(buf), st->codec, 0);
2667 av_log(ic, AV_LOG_WARNING,
2668 "Could not find codec parameters (%s)\n", buf);
2675 // close codecs which were opened in try_decode_frame()
2676 for(i=0;i<ic->nb_streams;i++) {
2677 st = ic->streams[i];
2678 avcodec_close(st->codec);
2680 for(i=0;i<ic->nb_streams;i++) {
2681 st = ic->streams[i];
2682 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2683 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2684 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2685 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2686 st->codec->codec_tag= tag;
2689 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2690 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2691 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2692 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2693 // the check for tb_unreliable() is not completely correct, since this is not about handling
2694 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2695 // ipmovie.c produces.
2696 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2697 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2698 if (st->info->duration_count && !st->r_frame_rate.num
2699 && tb_unreliable(st->codec) /*&&
2700 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2701 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2703 double best_error= 0.01;
2705 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2708 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2710 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2713 int n= st->info->duration_count;
2714 double a= st->info->duration_error[k][0][j] / n;
2715 double error= st->info->duration_error[k][1][j]/n - a*a;
2717 if(error < best_error && best_error> 0.000000001){
2719 num = get_std_framerate(j);
2722 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2725 // do not increase frame rate by more than 1 % in order to match a standard rate.
2726 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2727 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2730 if (!st->r_frame_rate.num){
2731 if( st->codec->time_base.den * (int64_t)st->time_base.num
2732 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2733 st->r_frame_rate.num = st->codec->time_base.den;
2734 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2736 st->r_frame_rate.num = st->time_base.den;
2737 st->r_frame_rate.den = st->time_base.num;
2740 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2741 if(!st->codec->bits_per_coded_sample)
2742 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2743 // set stream disposition based on audio service type
2744 switch (st->codec->audio_service_type) {
2745 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2746 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2747 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2748 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2749 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2750 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2751 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2752 st->disposition = AV_DISPOSITION_COMMENT; break;
2753 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2754 st->disposition = AV_DISPOSITION_KARAOKE; break;
2759 estimate_timings(ic, old_offset);
2761 compute_chapters_end(ic);
2763 find_stream_info_err:
2764 for (i=0; i < ic->nb_streams; i++) {
2765 if (ic->streams[i]->codec)
2766 ic->streams[i]->codec->thread_count = 0;
2767 av_freep(&ic->streams[i]->info);
2770 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2774 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2778 for (i = 0; i < ic->nb_programs; i++) {
2779 if (ic->programs[i] == last) {
2783 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2784 if (ic->programs[i]->stream_index[j] == s)
2785 return ic->programs[i];
2791 int av_find_best_stream(AVFormatContext *ic,
2792 enum AVMediaType type,
2793 int wanted_stream_nb,
2795 AVCodec **decoder_ret,
2798 int i, nb_streams = ic->nb_streams;
2799 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2800 unsigned *program = NULL;
2801 AVCodec *decoder = NULL, *best_decoder = NULL;
2803 if (related_stream >= 0 && wanted_stream_nb < 0) {
2804 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2806 program = p->stream_index;
2807 nb_streams = p->nb_stream_indexes;
2810 for (i = 0; i < nb_streams; i++) {
2811 int real_stream_index = program ? program[i] : i;
2812 AVStream *st = ic->streams[real_stream_index];
2813 AVCodecContext *avctx = st->codec;
2814 if (avctx->codec_type != type)
2816 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2818 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2821 decoder = avcodec_find_decoder(st->codec->codec_id);
2824 ret = AVERROR_DECODER_NOT_FOUND;
2828 if (best_count >= st->codec_info_nb_frames)
2830 best_count = st->codec_info_nb_frames;
2831 ret = real_stream_index;
2832 best_decoder = decoder;
2833 if (program && i == nb_streams - 1 && ret < 0) {
2835 nb_streams = ic->nb_streams;
2836 i = 0; /* no related stream found, try again with everything */
2840 *decoder_ret = best_decoder;
2844 /*******************************************************/
2846 int av_read_play(AVFormatContext *s)
2848 if (s->iformat->read_play)
2849 return s->iformat->read_play(s);
2851 return avio_pause(s->pb, 0);
2852 return AVERROR(ENOSYS);
2855 int av_read_pause(AVFormatContext *s)
2857 if (s->iformat->read_pause)
2858 return s->iformat->read_pause(s);
2860 return avio_pause(s->pb, 1);
2861 return AVERROR(ENOSYS);
2864 void avformat_free_context(AVFormatContext *s)
2870 if (s->iformat && s->iformat->priv_class && s->priv_data)
2871 av_opt_free(s->priv_data);
2873 for(i=0;i<s->nb_streams;i++) {
2874 /* free all data in a stream component */
2877 av_parser_close(st->parser);
2879 if (st->attached_pic.data)
2880 av_free_packet(&st->attached_pic);
2881 av_dict_free(&st->metadata);
2882 av_freep(&st->index_entries);
2883 av_freep(&st->codec->extradata);
2884 av_freep(&st->codec->subtitle_header);
2885 av_freep(&st->codec);
2886 av_freep(&st->priv_data);
2887 av_freep(&st->info);
2890 for(i=s->nb_programs-1; i>=0; i--) {
2891 av_dict_free(&s->programs[i]->metadata);
2892 av_freep(&s->programs[i]->stream_index);
2893 av_freep(&s->programs[i]);
2895 av_freep(&s->programs);
2896 av_freep(&s->priv_data);
2897 while(s->nb_chapters--) {
2898 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2899 av_freep(&s->chapters[s->nb_chapters]);
2901 av_freep(&s->chapters);
2902 av_dict_free(&s->metadata);
2903 av_freep(&s->streams);
2907 #if FF_API_CLOSE_INPUT_FILE
2908 void av_close_input_file(AVFormatContext *s)
2910 avformat_close_input(&s);
2914 void avformat_close_input(AVFormatContext **ps)
2916 AVFormatContext *s = *ps;
2917 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2919 flush_packet_queue(s);
2920 if (s->iformat && (s->iformat->read_close))
2921 s->iformat->read_close(s);
2922 avformat_free_context(s);
2928 #if FF_API_NEW_STREAM
2929 AVStream *av_new_stream(AVFormatContext *s, int id)
2931 AVStream *st = avformat_new_stream(s, NULL);
2938 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2944 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2946 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2949 s->streams = streams;
2951 st = av_mallocz(sizeof(AVStream));
2954 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2958 st->info->last_dts = AV_NOPTS_VALUE;
2960 st->codec = avcodec_alloc_context3(c);
2962 /* no default bitrate if decoding */
2963 st->codec->bit_rate = 0;
2965 st->index = s->nb_streams;
2966 st->start_time = AV_NOPTS_VALUE;
2967 st->duration = AV_NOPTS_VALUE;
2968 /* we set the current DTS to 0 so that formats without any timestamps
2969 but durations get some timestamps, formats with some unknown
2970 timestamps have their first few packets buffered and the
2971 timestamps corrected before they are returned to the user */
2972 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
2973 st->first_dts = AV_NOPTS_VALUE;
2974 st->probe_packets = MAX_PROBE_PACKETS;
2976 /* default pts setting is MPEG-like */
2977 avpriv_set_pts_info(st, 33, 1, 90000);
2978 st->last_IP_pts = AV_NOPTS_VALUE;
2979 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2980 st->pts_buffer[i]= AV_NOPTS_VALUE;
2981 st->reference_dts = AV_NOPTS_VALUE;
2983 st->sample_aspect_ratio = (AVRational){0,1};
2985 s->streams[s->nb_streams++] = st;
2989 AVProgram *av_new_program(AVFormatContext *ac, int id)
2991 AVProgram *program=NULL;
2994 av_dlog(ac, "new_program: id=0x%04x\n", id);
2996 for(i=0; i<ac->nb_programs; i++)
2997 if(ac->programs[i]->id == id)
2998 program = ac->programs[i];
3001 program = av_mallocz(sizeof(AVProgram));
3004 dynarray_add(&ac->programs, &ac->nb_programs, program);
3005 program->discard = AVDISCARD_NONE;
3012 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3014 AVChapter *chapter = NULL;
3017 for(i=0; i<s->nb_chapters; i++)
3018 if(s->chapters[i]->id == id)
3019 chapter = s->chapters[i];
3022 chapter= av_mallocz(sizeof(AVChapter));
3025 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3027 av_dict_set(&chapter->metadata, "title", title, 0);
3029 chapter->time_base= time_base;
3030 chapter->start = start;
3036 /************************************************************/
3037 /* output media file */
3039 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3040 const char *format, const char *filename)
3042 AVFormatContext *s = avformat_alloc_context();
3051 oformat = av_guess_format(format, NULL, NULL);
3053 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3054 ret = AVERROR(EINVAL);
3058 oformat = av_guess_format(NULL, filename, NULL);
3060 ret = AVERROR(EINVAL);
3061 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3068 s->oformat = oformat;
3069 if (s->oformat->priv_data_size > 0) {
3070 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3073 if (s->oformat->priv_class) {
3074 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3075 av_opt_set_defaults(s->priv_data);
3078 s->priv_data = NULL;
3081 av_strlcpy(s->filename, filename, sizeof(s->filename));
3085 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3086 ret = AVERROR(ENOMEM);
3088 avformat_free_context(s);
3092 #if FF_API_ALLOC_OUTPUT_CONTEXT
3093 AVFormatContext *avformat_alloc_output_context(const char *format,
3094 AVOutputFormat *oformat, const char *filename)
3096 AVFormatContext *avctx;
3097 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3098 return ret < 0 ? NULL : avctx;
3102 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3104 const AVCodecTag *avctag;
3106 enum CodecID id = CODEC_ID_NONE;
3107 unsigned int tag = 0;
3110 * Check that tag + id is in the table
3111 * If neither is in the table -> OK
3112 * If tag is in the table with another id -> FAIL
3113 * If id is in the table with another tag -> FAIL unless strict < normal
3115 for (n = 0; s->oformat->codec_tag[n]; n++) {
3116 avctag = s->oformat->codec_tag[n];
3117 while (avctag->id != CODEC_ID_NONE) {
3118 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3120 if (id == st->codec->codec_id)
3123 if (avctag->id == st->codec->codec_id)
3128 if (id != CODEC_ID_NONE)
3130 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3135 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3139 AVDictionary *tmp = NULL;
3142 av_dict_copy(&tmp, *options, 0);
3143 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3145 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3146 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3149 // some sanity checks
3150 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3151 av_log(s, AV_LOG_ERROR, "no streams\n");
3152 ret = AVERROR(EINVAL);
3156 for(i=0;i<s->nb_streams;i++) {
3159 switch (st->codec->codec_type) {
3160 case AVMEDIA_TYPE_AUDIO:
3161 if(st->codec->sample_rate<=0){
3162 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3163 ret = AVERROR(EINVAL);
3166 if(!st->codec->block_align)
3167 st->codec->block_align = st->codec->channels *
3168 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3170 case AVMEDIA_TYPE_VIDEO:
3171 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3172 av_log(s, AV_LOG_ERROR, "time base not set\n");
3173 ret = AVERROR(EINVAL);
3176 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3177 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3178 ret = AVERROR(EINVAL);
3181 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3182 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3184 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3185 "(%d/%d) and encoder layer (%d/%d)\n",
3186 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3187 st->codec->sample_aspect_ratio.num,
3188 st->codec->sample_aspect_ratio.den);
3189 ret = AVERROR(EINVAL);
3195 if(s->oformat->codec_tag){
3196 if( st->codec->codec_tag
3197 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3198 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3199 && !validate_codec_tag(s, st)){
3200 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3201 st->codec->codec_tag= 0;
3203 if(st->codec->codec_tag){
3204 if (!validate_codec_tag(s, st)) {
3205 char tagbuf[32], cortag[32];
3206 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3207 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3208 av_log(s, AV_LOG_ERROR,
3209 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3210 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3211 ret = AVERROR_INVALIDDATA;
3215 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3218 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3219 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3220 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3223 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3224 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3225 if (!s->priv_data) {
3226 ret = AVERROR(ENOMEM);
3229 if (s->oformat->priv_class) {
3230 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3231 av_opt_set_defaults(s->priv_data);
3232 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3237 /* set muxer identification string */
3238 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3239 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3242 if(s->oformat->write_header){
3243 ret = s->oformat->write_header(s);
3248 /* init PTS generation */
3249 for(i=0;i<s->nb_streams;i++) {
3250 int64_t den = AV_NOPTS_VALUE;
3253 switch (st->codec->codec_type) {
3254 case AVMEDIA_TYPE_AUDIO:
3255 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3257 case AVMEDIA_TYPE_VIDEO:
3258 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3263 if (den != AV_NOPTS_VALUE) {
3265 ret = AVERROR_INVALIDDATA;
3268 frac_init(&st->pts, 0, 0, den);
3273 av_dict_free(options);
3282 //FIXME merge with compute_pkt_fields
3283 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3284 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3285 int num, den, frame_size, i;
3287 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3288 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3290 /* duration field */
3291 if (pkt->duration == 0) {
3292 compute_frame_duration(&num, &den, st, NULL, pkt);
3294 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3298 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3301 //XXX/FIXME this is a temporary hack until all encoders output pts
3302 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3305 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3309 // pkt->pts= st->cur_dts;
3310 pkt->pts= st->pts.val;
3313 //calculate dts from pts
3314 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3315 st->pts_buffer[0]= pkt->pts;
3316 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3317 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3318 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3319 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3321 pkt->dts= st->pts_buffer[0];
3324 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3325 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3326 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3327 av_log(s, AV_LOG_ERROR,
3328 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3329 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3330 return AVERROR(EINVAL);
3332 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3333 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3334 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3335 return AVERROR(EINVAL);
3338 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3339 st->cur_dts= pkt->dts;
3340 st->pts.val= pkt->dts;
3343 switch (st->codec->codec_type) {
3344 case AVMEDIA_TYPE_AUDIO:
3345 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3347 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3348 likely equal to the encoder delay, but it would be better if we
3349 had the real timestamps from the encoder */
3350 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3351 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3354 case AVMEDIA_TYPE_VIDEO:
3355 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3363 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3368 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3369 return s->oformat->write_packet(s, pkt);
3373 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3375 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3378 ret= s->oformat->write_packet(s, pkt);
3381 s->streams[pkt->stream_index]->nb_frames++;
3385 #define CHUNK_START 0x1000
3387 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3388 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3390 AVPacketList **next_point, *this_pktl;
3391 AVStream *st= s->streams[pkt->stream_index];
3392 int chunked= s->max_chunk_size || s->max_chunk_duration;
3394 this_pktl = av_mallocz(sizeof(AVPacketList));
3396 return AVERROR(ENOMEM);
3397 this_pktl->pkt= *pkt;
3398 pkt->destruct= NULL; // do not free original but only the copy
3399 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3401 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3402 next_point = &(st->last_in_packet_buffer->next);
3404 next_point = &s->packet_buffer;
3409 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3410 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3411 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3412 st->interleaver_chunk_size += pkt->size;
3413 st->interleaver_chunk_duration += pkt->duration;
3416 st->interleaver_chunk_size =
3417 st->interleaver_chunk_duration = 0;
3418 this_pktl->pkt.flags |= CHUNK_START;
3422 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3424 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3425 || !compare(s, &(*next_point)->pkt, pkt))){
3426 next_point= &(*next_point)->next;
3431 next_point = &(s->packet_buffer_end->next);
3434 assert(!*next_point);
3436 s->packet_buffer_end= this_pktl;
3439 this_pktl->next= *next_point;
3441 s->streams[pkt->stream_index]->last_in_packet_buffer=
3442 *next_point= this_pktl;
3446 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3448 AVStream *st = s->streams[ pkt ->stream_index];
3449 AVStream *st2= s->streams[ next->stream_index];
3450 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3452 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3453 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3454 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3456 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3457 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3460 comp= (ts>ts2) - (ts<ts2);
3464 return pkt->stream_index < next->stream_index;
3468 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3469 AVPacket *pkt, int flush)
3472 int stream_count=0, noninterleaved_count=0;
3473 int64_t delta_dts_max = 0;
3477 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3482 for(i=0; i < s->nb_streams; i++) {
3483 if (s->streams[i]->last_in_packet_buffer) {
3485 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3486 ++noninterleaved_count;
3490 if (s->nb_streams == stream_count) {
3493 for(i=0; i < s->nb_streams; i++) {
3494 if (s->streams[i]->last_in_packet_buffer) {
3496 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3497 s->streams[i]->time_base,
3499 av_rescale_q(s->packet_buffer->pkt.dts,
3500 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3502 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3505 if(s->nb_streams == stream_count+noninterleaved_count &&
3506 delta_dts_max > 20*AV_TIME_BASE) {
3507 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3511 if(stream_count && flush){
3512 pktl= s->packet_buffer;
3515 s->packet_buffer= pktl->next;
3516 if(!s->packet_buffer)
3517 s->packet_buffer_end= NULL;
3519 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3520 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3524 av_init_packet(out);
3529 #if FF_API_INTERLEAVE_PACKET
3530 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3531 AVPacket *pkt, int flush)
3533 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3538 * Interleave an AVPacket correctly so it can be muxed.
3539 * @param out the interleaved packet will be output here
3540 * @param in the input packet
3541 * @param flush 1 if no further packets are available as input and all
3542 * remaining packets should be output
3543 * @return 1 if a packet was output, 0 if no packet could be output,
3544 * < 0 if an error occurred
3546 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3547 if (s->oformat->interleave_packet) {
3548 int ret = s->oformat->interleave_packet(s, out, in, flush);
3553 return ff_interleave_packet_per_dts(s, out, in, flush);
3556 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3560 AVStream *st= s->streams[ pkt->stream_index];
3562 //FIXME/XXX/HACK drop zero sized packets
3563 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3566 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3567 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3568 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3571 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3572 return AVERROR(EINVAL);
3574 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3580 int ret= interleave_packet(s, &opkt, pkt, flush);
3581 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3584 ret= s->oformat->write_packet(s, &opkt);
3586 s->streams[opkt.stream_index]->nb_frames++;
3588 av_free_packet(&opkt);
3593 if(s->pb && s->pb->error)
3594 return s->pb->error;
3598 int av_write_trailer(AVFormatContext *s)
3604 ret= interleave_packet(s, &pkt, NULL, 1);
3605 if(ret<0) //FIXME cleanup needed for ret<0 ?
3610 ret= s->oformat->write_packet(s, &pkt);
3612 s->streams[pkt.stream_index]->nb_frames++;
3614 av_free_packet(&pkt);
3618 if(s->pb && s->pb->error)
3622 if(s->oformat->write_trailer)
3623 ret = s->oformat->write_trailer(s);
3628 ret = s->pb ? s->pb->error : 0;
3629 for(i=0;i<s->nb_streams;i++) {
3630 av_freep(&s->streams[i]->priv_data);
3631 av_freep(&s->streams[i]->index_entries);
3633 if (s->oformat->priv_class)
3634 av_opt_free(s->priv_data);
3635 av_freep(&s->priv_data);
3639 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3640 int64_t *dts, int64_t *wall)
3642 if (!s->oformat || !s->oformat->get_output_timestamp)
3643 return AVERROR(ENOSYS);
3644 s->oformat->get_output_timestamp(s, stream, dts, wall);
3648 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3651 AVProgram *program=NULL;
3654 if (idx >= ac->nb_streams) {
3655 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3659 for(i=0; i<ac->nb_programs; i++){
3660 if(ac->programs[i]->id != progid)
3662 program = ac->programs[i];
3663 for(j=0; j<program->nb_stream_indexes; j++)
3664 if(program->stream_index[j] == idx)
3667 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3670 program->stream_index = tmp;
3671 program->stream_index[program->nb_stream_indexes++] = idx;
3676 static void print_fps(double d, const char *postfix){
3677 uint64_t v= lrintf(d*100);
3678 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3679 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3680 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3683 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3685 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3686 AVDictionaryEntry *tag=NULL;
3688 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3689 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3690 if(strcmp("language", tag->key)){
3691 const char *p = tag->value;
3692 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3695 size_t len = strcspn(p, "\xd\xa");
3696 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3697 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3699 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3700 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3703 av_log(ctx, AV_LOG_INFO, "\n");
3709 /* "user interface" functions */
3710 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3713 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3714 AVStream *st = ic->streams[i];
3715 int g = av_gcd(st->time_base.num, st->time_base.den);
3716 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3717 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3718 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3719 /* the pid is an important information, so we display it */
3720 /* XXX: add a generic system */
3721 if (flags & AVFMT_SHOW_IDS)
3722 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3724 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3725 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3726 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3727 if (st->sample_aspect_ratio.num && // default
3728 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3729 AVRational display_aspect_ratio;
3730 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3731 st->codec->width*st->sample_aspect_ratio.num,
3732 st->codec->height*st->sample_aspect_ratio.den,
3734 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3735 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3736 display_aspect_ratio.num, display_aspect_ratio.den);
3738 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3739 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3740 print_fps(av_q2d(st->avg_frame_rate), "fps");
3741 if(st->r_frame_rate.den && st->r_frame_rate.num)
3742 print_fps(av_q2d(st->r_frame_rate), "tbr");
3743 if(st->time_base.den && st->time_base.num)
3744 print_fps(1/av_q2d(st->time_base), "tbn");
3745 if(st->codec->time_base.den && st->codec->time_base.num)
3746 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3748 if (st->disposition & AV_DISPOSITION_DEFAULT)
3749 av_log(NULL, AV_LOG_INFO, " (default)");
3750 if (st->disposition & AV_DISPOSITION_DUB)
3751 av_log(NULL, AV_LOG_INFO, " (dub)");
3752 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3753 av_log(NULL, AV_LOG_INFO, " (original)");
3754 if (st->disposition & AV_DISPOSITION_COMMENT)
3755 av_log(NULL, AV_LOG_INFO, " (comment)");
3756 if (st->disposition & AV_DISPOSITION_LYRICS)
3757 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3758 if (st->disposition & AV_DISPOSITION_KARAOKE)
3759 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3760 if (st->disposition & AV_DISPOSITION_FORCED)
3761 av_log(NULL, AV_LOG_INFO, " (forced)");
3762 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3763 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3764 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3765 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3766 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3767 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3768 av_log(NULL, AV_LOG_INFO, "\n");
3769 dump_metadata(NULL, st->metadata, " ");
3772 void av_dump_format(AVFormatContext *ic,
3778 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3779 if (ic->nb_streams && !printed)
3782 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3783 is_output ? "Output" : "Input",
3785 is_output ? ic->oformat->name : ic->iformat->name,
3786 is_output ? "to" : "from", url);
3787 dump_metadata(NULL, ic->metadata, " ");
3789 av_log(NULL, AV_LOG_INFO, " Duration: ");
3790 if (ic->duration != AV_NOPTS_VALUE) {
3791 int hours, mins, secs, us;
3792 secs = ic->duration / AV_TIME_BASE;
3793 us = ic->duration % AV_TIME_BASE;
3798 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3799 (100 * us) / AV_TIME_BASE);
3801 av_log(NULL, AV_LOG_INFO, "N/A");
3803 if (ic->start_time != AV_NOPTS_VALUE) {
3805 av_log(NULL, AV_LOG_INFO, ", start: ");
3806 secs = ic->start_time / AV_TIME_BASE;
3807 us = abs(ic->start_time % AV_TIME_BASE);
3808 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3809 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3811 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3813 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3815 av_log(NULL, AV_LOG_INFO, "N/A");
3817 av_log(NULL, AV_LOG_INFO, "\n");
3819 for (i = 0; i < ic->nb_chapters; i++) {
3820 AVChapter *ch = ic->chapters[i];
3821 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3822 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3823 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3825 dump_metadata(NULL, ch->metadata, " ");
3827 if(ic->nb_programs) {
3828 int j, k, total = 0;
3829 for(j=0; j<ic->nb_programs; j++) {
3830 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3832 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3833 name ? name->value : "");
3834 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3835 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3836 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3837 printed[ic->programs[j]->stream_index[k]] = 1;
3839 total += ic->programs[j]->nb_stream_indexes;
3841 if (total < ic->nb_streams)
3842 av_log(NULL, AV_LOG_INFO, " No Program\n");
3844 for(i=0;i<ic->nb_streams;i++)
3846 dump_stream_format(ic, i, index, is_output);
3851 int64_t av_gettime(void)
3854 gettimeofday(&tv,NULL);
3855 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3858 uint64_t ff_ntp_time(void)
3860 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3863 int av_get_frame_filename(char *buf, int buf_size,
3864 const char *path, int number)
3867 char *q, buf1[20], c;
3868 int nd, len, percentd_found;
3880 while (isdigit(*p)) {
3881 nd = nd * 10 + *p++ - '0';
3884 } while (isdigit(c));
3893 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3895 if ((q - buf + len) > buf_size - 1)
3897 memcpy(q, buf1, len);
3905 if ((q - buf) < buf_size - 1)
3909 if (!percentd_found)
3918 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3922 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3924 for(i=0;i<size;i+=16) {
3931 PRINT(" %02x", buf[i+j]);
3936 for(j=0;j<len;j++) {
3938 if (c < ' ' || c > '~')
3947 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3949 hex_dump_internal(NULL, f, 0, buf, size);
3952 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3954 hex_dump_internal(avcl, NULL, level, buf, size);
3957 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3960 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3961 PRINT("stream #%d:\n", pkt->stream_index);
3962 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3963 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3964 /* DTS is _always_ valid after av_read_frame() */
3966 if (pkt->dts == AV_NOPTS_VALUE)
3969 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3970 /* PTS may not be known if B-frames are present. */
3972 if (pkt->pts == AV_NOPTS_VALUE)
3975 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3977 PRINT(" size=%d\n", pkt->size);
3980 av_hex_dump(f, pkt->data, pkt->size);
3984 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3986 AVRational tb = { 1, AV_TIME_BASE };
3987 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3991 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3993 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3997 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3999 AVRational tb = { 1, AV_TIME_BASE };
4000 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4004 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4007 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4010 void av_url_split(char *proto, int proto_size,
4011 char *authorization, int authorization_size,
4012 char *hostname, int hostname_size,
4014 char *path, int path_size,
4017 const char *p, *ls, *at, *col, *brk;
4019 if (port_ptr) *port_ptr = -1;
4020 if (proto_size > 0) proto[0] = 0;
4021 if (authorization_size > 0) authorization[0] = 0;
4022 if (hostname_size > 0) hostname[0] = 0;
4023 if (path_size > 0) path[0] = 0;
4025 /* parse protocol */
4026 if ((p = strchr(url, ':'))) {
4027 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4032 /* no protocol means plain filename */
4033 av_strlcpy(path, url, path_size);
4037 /* separate path from hostname */
4038 ls = strchr(p, '/');
4040 ls = strchr(p, '?');
4042 av_strlcpy(path, ls, path_size);
4044 ls = &p[strlen(p)]; // XXX
4046 /* the rest is hostname, use that to parse auth/port */
4048 /* authorization (user[:pass]@hostname) */
4049 if ((at = strchr(p, '@')) && at < ls) {
4050 av_strlcpy(authorization, p,
4051 FFMIN(authorization_size, at + 1 - p));
4052 p = at + 1; /* skip '@' */
4055 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4057 av_strlcpy(hostname, p + 1,
4058 FFMIN(hostname_size, brk - p));
4059 if (brk[1] == ':' && port_ptr)
4060 *port_ptr = atoi(brk + 2);
4061 } else if ((col = strchr(p, ':')) && col < ls) {
4062 av_strlcpy(hostname, p,
4063 FFMIN(col + 1 - p, hostname_size));
4064 if (port_ptr) *port_ptr = atoi(col + 1);
4066 av_strlcpy(hostname, p,
4067 FFMIN(ls + 1 - p, hostname_size));
4071 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4074 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4077 'C', 'D', 'E', 'F' };
4078 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4081 'c', 'd', 'e', 'f' };
4082 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4084 for(i = 0; i < s; i++) {
4085 buff[i * 2] = hex_table[src[i] >> 4];
4086 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4092 int ff_hex_to_data(uint8_t *data, const char *p)
4099 p += strspn(p, SPACE_CHARS);
4102 c = toupper((unsigned char) *p++);
4103 if (c >= '0' && c <= '9')
4105 else if (c >= 'A' && c <= 'F')
4120 #if FF_API_SET_PTS_INFO
4121 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4122 unsigned int pts_num, unsigned int pts_den)
4124 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4128 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4129 unsigned int pts_num, unsigned int pts_den)
4132 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4133 if(new_tb.num != pts_num)
4134 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4136 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4138 if(new_tb.num <= 0 || new_tb.den <= 0) {
4139 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4142 s->time_base = new_tb;
4143 s->pts_wrap_bits = pts_wrap_bits;
4146 int ff_url_join(char *str, int size, const char *proto,
4147 const char *authorization, const char *hostname,
4148 int port, const char *fmt, ...)
4151 struct addrinfo hints = { 0 }, *ai;
4156 av_strlcatf(str, size, "%s://", proto);
4157 if (authorization && authorization[0])
4158 av_strlcatf(str, size, "%s@", authorization);
4159 #if CONFIG_NETWORK && defined(AF_INET6)
4160 /* Determine if hostname is a numerical IPv6 address,
4161 * properly escape it within [] in that case. */
4162 hints.ai_flags = AI_NUMERICHOST;
4163 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4164 if (ai->ai_family == AF_INET6) {
4165 av_strlcat(str, "[", size);
4166 av_strlcat(str, hostname, size);
4167 av_strlcat(str, "]", size);
4169 av_strlcat(str, hostname, size);
4174 /* Not an IPv6 address, just output the plain string. */
4175 av_strlcat(str, hostname, size);
4178 av_strlcatf(str, size, ":%d", port);
4181 int len = strlen(str);
4184 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4190 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4191 AVFormatContext *src)
4196 local_pkt.stream_index = dst_stream;
4197 if (pkt->pts != AV_NOPTS_VALUE)
4198 local_pkt.pts = av_rescale_q(pkt->pts,
4199 src->streams[pkt->stream_index]->time_base,
4200 dst->streams[dst_stream]->time_base);
4201 if (pkt->dts != AV_NOPTS_VALUE)
4202 local_pkt.dts = av_rescale_q(pkt->dts,
4203 src->streams[pkt->stream_index]->time_base,
4204 dst->streams[dst_stream]->time_base);
4205 return av_write_frame(dst, &local_pkt);
4208 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4211 const char *ptr = str;
4213 /* Parse key=value pairs. */
4216 char *dest = NULL, *dest_end;
4217 int key_len, dest_len = 0;
4219 /* Skip whitespace and potential commas. */
4220 while (*ptr && (isspace(*ptr) || *ptr == ','))
4227 if (!(ptr = strchr(key, '=')))
4230 key_len = ptr - key;
4232 callback_get_buf(context, key, key_len, &dest, &dest_len);
4233 dest_end = dest + dest_len - 1;
4237 while (*ptr && *ptr != '\"') {
4241 if (dest && dest < dest_end)
4245 if (dest && dest < dest_end)
4253 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4254 if (dest && dest < dest_end)
4262 int ff_find_stream_index(AVFormatContext *s, int id)
4265 for (i = 0; i < s->nb_streams; i++) {
4266 if (s->streams[i]->id == id)
4272 void ff_make_absolute_url(char *buf, int size, const char *base,
4276 /* Absolute path, relative to the current server */
4277 if (base && strstr(base, "://") && rel[0] == '/') {
4279 av_strlcpy(buf, base, size);
4280 sep = strstr(buf, "://");
4283 sep = strchr(sep, '/');
4287 av_strlcat(buf, rel, size);
4290 /* If rel actually is an absolute url, just copy it */
4291 if (!base || strstr(rel, "://") || rel[0] == '/') {
4292 av_strlcpy(buf, rel, size);
4296 av_strlcpy(buf, base, size);
4297 /* Remove the file name from the base url */
4298 sep = strrchr(buf, '/');
4303 while (av_strstart(rel, "../", NULL) && sep) {
4304 /* Remove the path delimiter at the end */
4306 sep = strrchr(buf, '/');
4307 /* If the next directory name to pop off is "..", break here */
4308 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4309 /* Readd the slash we just removed */
4310 av_strlcat(buf, "/", size);
4313 /* Cut off the directory name */
4320 av_strlcat(buf, rel, size);
4323 int64_t ff_iso8601_to_unix_time(const char *datestr)
4326 struct tm time1 = {0}, time2 = {0};
4328 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4329 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4331 return av_timegm(&time2);
4333 return av_timegm(&time1);
4335 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4336 "the date string.\n");
4341 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4344 if (ofmt->query_codec)
4345 return ofmt->query_codec(codec_id, std_compliance);
4346 else if (ofmt->codec_tag)
4347 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4348 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4349 codec_id == ofmt->subtitle_codec)
4352 return AVERROR_PATCHWELCOME;
4355 int avformat_network_init(void)
4359 ff_network_inited_globally = 1;
4360 if ((ret = ff_network_init()) < 0)
4367 int avformat_network_deinit(void)
4376 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4377 uint64_t channel_layout, int32_t sample_rate,
4378 int32_t width, int32_t height)
4384 return AVERROR(EINVAL);
4387 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4389 if (channel_layout) {
4391 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4395 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4397 if (width || height) {
4399 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4401 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4403 return AVERROR(ENOMEM);
4404 bytestream_put_le32(&data, flags);
4406 bytestream_put_le32(&data, channels);
4408 bytestream_put_le64(&data, channel_layout);
4410 bytestream_put_le32(&data, sample_rate);
4411 if (width || height) {
4412 bytestream_put_le32(&data, width);
4413 bytestream_put_le32(&data, height);
4418 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4420 return ff_codec_bmp_tags;
4422 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4424 return ff_codec_wav_tags;
4427 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4429 AVRational undef = {0, 1};
4430 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4431 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4432 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4434 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4435 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4436 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4437 stream_sample_aspect_ratio = undef;
4439 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4440 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4441 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4442 frame_sample_aspect_ratio = undef;
4444 if (stream_sample_aspect_ratio.num)
4445 return stream_sample_aspect_ratio;
4447 return frame_sample_aspect_ratio;