2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
54 * various utility functions for use within FFmpeg
57 unsigned avformat_version(void)
59 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return FFMPEG_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
76 static int is_relative(int64_t ts) {
77 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 /* fraction handling */
83 * f = val + (num / den) + 0.5.
85 * 'num' is normalized so that it is such as 0 <= num < den.
87 * @param f fractional number
88 * @param val integer value
89 * @param num must be >= 0
90 * @param den must be >= 1
92 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
105 * Fractional addition to f: f = f + (incr / f->den).
107 * @param f fractional number
108 * @param incr increment, can be positive or negative
110 static void frac_add(AVFrac *f, int64_t incr)
123 } else if (num >= den) {
130 /** head of registered input format linked list */
131 static AVInputFormat *first_iformat = NULL;
132 /** head of registered output format linked list */
133 static AVOutputFormat *first_oformat = NULL;
135 AVInputFormat *av_iformat_next(AVInputFormat *f)
137 if(f) return f->next;
138 else return first_iformat;
141 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
143 if(f) return f->next;
144 else return first_oformat;
147 void av_register_input_format(AVInputFormat *format)
151 while (*p != NULL) p = &(*p)->next;
156 void av_register_output_format(AVOutputFormat *format)
160 while (*p != NULL) p = &(*p)->next;
165 int av_match_ext(const char *filename, const char *extensions)
173 ext = strrchr(filename, '.');
179 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
182 if (!av_strcasecmp(ext1, ext))
192 static int match_format(const char *name, const char *names)
200 namelen = strlen(name);
201 while ((p = strchr(names, ','))) {
202 len = FFMAX(p - names, namelen);
203 if (!av_strncasecmp(name, names, len))
207 return !av_strcasecmp(name, names);
210 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
211 const char *mime_type)
213 AVOutputFormat *fmt = NULL, *fmt_found;
214 int score_max, score;
216 /* specific test for image sequences */
217 #if CONFIG_IMAGE2_MUXER
218 if (!short_name && filename &&
219 av_filename_number_test(filename) &&
220 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
221 return av_guess_format("image2", NULL, NULL);
224 /* Find the proper file type. */
227 while ((fmt = av_oformat_next(fmt))) {
229 if (fmt->name && short_name && match_format(short_name, fmt->name))
231 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
233 if (filename && fmt->extensions &&
234 av_match_ext(filename, fmt->extensions)) {
237 if (score > score_max) {
245 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
246 const char *filename, const char *mime_type, enum AVMediaType type){
247 if(type == AVMEDIA_TYPE_VIDEO){
248 enum CodecID codec_id= CODEC_ID_NONE;
250 #if CONFIG_IMAGE2_MUXER
251 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
252 codec_id= ff_guess_image2_codec(filename);
255 if(codec_id == CODEC_ID_NONE)
256 codec_id= fmt->video_codec;
258 }else if(type == AVMEDIA_TYPE_AUDIO)
259 return fmt->audio_codec;
260 else if (type == AVMEDIA_TYPE_SUBTITLE)
261 return fmt->subtitle_codec;
263 return CODEC_ID_NONE;
266 AVInputFormat *av_find_input_format(const char *short_name)
268 AVInputFormat *fmt = NULL;
269 while ((fmt = av_iformat_next(fmt))) {
270 if (match_format(short_name, fmt->name))
276 int ffio_limit(AVIOContext *s, int size)
279 int64_t remaining= s->maxsize - avio_tell(s);
280 if(remaining < size){
281 int64_t newsize= avio_size(s);
282 if(!s->maxsize || s->maxsize<newsize)
283 s->maxsize= newsize - !newsize;
284 remaining= s->maxsize - avio_tell(s);
285 remaining= FFMAX(remaining, 0);
288 if(s->maxsize>=0 && remaining+1 < size){
289 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
296 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
299 int orig_size = size;
300 size= ffio_limit(s, size);
302 ret= av_new_packet(pkt, size);
307 pkt->pos= avio_tell(s);
309 ret= avio_read(s, pkt->data, size);
313 av_shrink_packet(pkt, ret);
314 if (pkt->size < orig_size)
315 pkt->flags |= AV_PKT_FLAG_CORRUPT;
320 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
325 return av_get_packet(s, pkt, size);
326 old_size = pkt->size;
327 ret = av_grow_packet(pkt, size);
330 ret = avio_read(s, pkt->data + old_size, size);
331 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
336 int av_filename_number_test(const char *filename)
339 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
342 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
344 AVProbeData lpd = *pd;
345 AVInputFormat *fmt1 = NULL, *fmt;
346 int score, nodat = 0, score_max=0;
348 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
349 int id3len = ff_id3v2_tag_len(lpd.buf);
350 if (lpd.buf_size > id3len + 16) {
352 lpd.buf_size -= id3len;
358 while ((fmt1 = av_iformat_next(fmt1))) {
359 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
362 if (fmt1->read_probe) {
363 score = fmt1->read_probe(&lpd);
364 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
365 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
366 } else if (fmt1->extensions) {
367 if (av_match_ext(lpd.filename, fmt1->extensions)) {
371 if (score > score_max) {
374 }else if (score == score_max)
377 *score_ret= score_max;
382 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
385 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
386 if(score_ret > *score_max){
387 *score_max= score_ret;
393 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
395 return av_probe_input_format2(pd, is_opened, &score);
398 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
400 static const struct {
401 const char *name; enum CodecID id; enum AVMediaType type;
403 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
404 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
405 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
406 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
407 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
408 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
409 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
410 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
411 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
415 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
419 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
420 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
421 for (i = 0; fmt_id_type[i].name; i++) {
422 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
423 st->codec->codec_id = fmt_id_type[i].id;
424 st->codec->codec_type = fmt_id_type[i].type;
432 /************************************************************/
433 /* input media file */
435 int av_demuxer_open(AVFormatContext *ic){
438 if (ic->iformat->read_header) {
439 err = ic->iformat->read_header(ic);
444 if (ic->pb && !ic->data_offset)
445 ic->data_offset = avio_tell(ic->pb);
451 /** size of probe buffer, for guessing file type from file contents */
452 #define PROBE_BUF_MIN 2048
453 #define PROBE_BUF_MAX (1<<20)
455 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
456 const char *filename, void *logctx,
457 unsigned int offset, unsigned int max_probe_size)
459 AVProbeData pd = { filename ? filename : "", NULL, -offset };
460 unsigned char *buf = NULL;
461 int ret = 0, probe_size;
463 if (!max_probe_size) {
464 max_probe_size = PROBE_BUF_MAX;
465 } else if (max_probe_size > PROBE_BUF_MAX) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size < PROBE_BUF_MIN) {
468 return AVERROR(EINVAL);
471 if (offset >= max_probe_size) {
472 return AVERROR(EINVAL);
475 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
476 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
477 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
478 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
481 if (probe_size < offset) {
485 /* read probe data */
486 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
489 return AVERROR(ENOMEM);
492 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
493 /* fail if error was not end of file, otherwise, lower score */
494 if (ret != AVERROR_EOF) {
499 ret = 0; /* error was end of file, nothing read */
502 pd.buf = &buf[offset];
504 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
506 /* guess file format */
507 *fmt = av_probe_input_format2(&pd, 1, &score);
509 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
510 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
512 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
518 return AVERROR_INVALIDDATA;
521 /* rewind. reuse probe buffer to avoid seeking */
522 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
528 /* open input file and probe the format if necessary */
529 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
532 AVProbeData pd = {filename, NULL, 0};
535 s->flags |= AVFMT_FLAG_CUSTOM_IO;
537 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
538 else if (s->iformat->flags & AVFMT_NOFILE)
539 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
540 "will be ignored with AVFMT_NOFILE format.\n");
544 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
545 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
548 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
549 &s->interrupt_callback, options)) < 0)
553 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 static void queue_attached_pictures(AVFormatContext *s)
576 for (i = 0; i < s->nb_streams; i++)
577 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
578 s->streams[i]->discard < AVDISCARD_ALL) {
579 AVPacket copy = s->streams[i]->attached_pic;
580 copy.destruct = NULL;
581 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
585 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
587 AVFormatContext *s = *ps;
589 AVDictionary *tmp = NULL;
590 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
592 if (!s && !(s = avformat_alloc_context()))
593 return AVERROR(ENOMEM);
595 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
596 return AVERROR(EINVAL);
602 av_dict_copy(&tmp, *options, 0);
604 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
607 if ((ret = init_input(s, filename, &tmp)) < 0)
610 /* check filename in case an image number is expected */
611 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
612 if (!av_filename_number_test(filename)) {
613 ret = AVERROR(EINVAL);
618 s->duration = s->start_time = AV_NOPTS_VALUE;
619 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
621 /* allocate private data */
622 if (s->iformat->priv_data_size > 0) {
623 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
624 ret = AVERROR(ENOMEM);
627 if (s->iformat->priv_class) {
628 *(const AVClass**)s->priv_data = s->iformat->priv_class;
629 av_opt_set_defaults(s->priv_data);
630 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
635 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
637 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
639 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
640 if ((ret = s->iformat->read_header(s)) < 0)
643 if (id3v2_extra_meta &&
644 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
646 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
648 queue_attached_pictures(s);
650 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
651 s->data_offset = avio_tell(s->pb);
653 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
656 av_dict_free(options);
663 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
665 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
667 avformat_free_context(s);
672 /*******************************************************/
674 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
676 if(st->request_probe>0){
677 AVProbeData *pd = &st->probe_data;
679 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
683 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
684 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
685 pd->buf_size += pkt->size;
686 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
688 st->probe_packets = 0;
691 end= s->raw_packet_buffer_remaining_size <= 0
692 || st->probe_packets<=0;
694 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
695 int score= set_codec_from_probe_data(s, st, pd);
696 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
700 st->request_probe= -1;
701 if(st->codec->codec_id != CODEC_ID_NONE){
702 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
704 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
710 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
716 AVPacketList *pktl = s->raw_packet_buffer;
720 st = s->streams[pkt->stream_index];
721 if(st->request_probe <= 0){
722 s->raw_packet_buffer = pktl->next;
723 s->raw_packet_buffer_remaining_size += pkt->size;
730 ret= s->iformat->read_packet(s, pkt);
732 if (!pktl || ret == AVERROR(EAGAIN))
734 for (i = 0; i < s->nb_streams; i++) {
736 if (st->probe_packets) {
737 probe_codec(s, st, NULL);
739 av_assert0(st->request_probe <= 0);
744 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
745 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
746 av_log(s, AV_LOG_WARNING,
747 "Dropped corrupted packet (stream = %d)\n",
753 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
754 av_packet_merge_side_data(pkt);
756 if(pkt->stream_index >= (unsigned)s->nb_streams){
757 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
761 st= s->streams[pkt->stream_index];
763 switch(st->codec->codec_type){
764 case AVMEDIA_TYPE_VIDEO:
765 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
767 case AVMEDIA_TYPE_AUDIO:
768 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
770 case AVMEDIA_TYPE_SUBTITLE:
771 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
775 if(!pktl && st->request_probe <= 0)
778 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
779 s->raw_packet_buffer_remaining_size -= pkt->size;
781 probe_codec(s, st, pkt);
785 #if FF_API_READ_PACKET
786 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
788 return ff_read_packet(s, pkt);
793 /**********************************************************/
795 static int determinable_frame_size(AVCodecContext *avctx)
797 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
798 avctx->codec_id == CODEC_ID_MP1 ||
799 avctx->codec_id == CODEC_ID_MP2 ||
800 avctx->codec_id == CODEC_ID_MP3/* ||
801 avctx->codec_id == CODEC_ID_CELT*/)
807 * Get the number of samples of an audio frame. Return -1 on error.
809 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
813 /* give frame_size priority if demuxing */
814 if (!mux && enc->frame_size > 1)
815 return enc->frame_size;
817 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
820 /* fallback to using frame_size if muxing */
821 if (enc->frame_size > 1)
822 return enc->frame_size;
829 * Return the frame duration in seconds. Return 0 if not available.
831 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
832 AVCodecParserContext *pc, AVPacket *pkt)
838 switch(st->codec->codec_type) {
839 case AVMEDIA_TYPE_VIDEO:
840 if (st->r_frame_rate.num && !pc) {
841 *pnum = st->r_frame_rate.den;
842 *pden = st->r_frame_rate.num;
843 } else if(st->time_base.num*1000LL > st->time_base.den) {
844 *pnum = st->time_base.num;
845 *pden = st->time_base.den;
846 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
847 *pnum = st->codec->time_base.num;
848 *pden = st->codec->time_base.den;
849 if (pc && pc->repeat_pict) {
850 *pnum = (*pnum) * (1 + pc->repeat_pict);
852 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
853 //Thus if we have no parser in such case leave duration undefined.
854 if(st->codec->ticks_per_frame>1 && !pc){
859 case AVMEDIA_TYPE_AUDIO:
860 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
861 if (frame_size <= 0 || st->codec->sample_rate <= 0)
864 *pden = st->codec->sample_rate;
871 static int is_intra_only(AVCodecContext *enc){
872 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
874 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
875 switch(enc->codec_id){
877 case CODEC_ID_MJPEGB:
879 case CODEC_ID_PRORES:
880 case CODEC_ID_RAWVIDEO:
882 case CODEC_ID_DVVIDEO:
883 case CODEC_ID_HUFFYUV:
884 case CODEC_ID_FFVHUFF:
889 case CODEC_ID_JPEG2000:
891 case CODEC_ID_UTVIDEO:
899 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
903 if (pktl == s->parse_queue_end)
904 return s->packet_buffer;
908 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
909 int64_t dts, int64_t pts)
911 AVStream *st= s->streams[stream_index];
912 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
914 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
917 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
920 if (is_relative(pts))
921 pts += st->first_dts - RELATIVE_TS_BASE;
923 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
924 if(pktl->pkt.stream_index != stream_index)
926 if(is_relative(pktl->pkt.pts))
927 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
929 if(is_relative(pktl->pkt.dts))
930 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
932 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
933 st->start_time= pktl->pkt.pts;
935 if (st->start_time == AV_NOPTS_VALUE)
936 st->start_time = pts;
939 static void update_initial_durations(AVFormatContext *s, AVStream *st,
940 int stream_index, int duration)
942 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
943 int64_t cur_dts= RELATIVE_TS_BASE;
945 if(st->first_dts != AV_NOPTS_VALUE){
946 cur_dts= st->first_dts;
947 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
948 if(pktl->pkt.stream_index == stream_index){
949 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
954 if(pktl && pktl->pkt.dts != st->first_dts) {
955 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
959 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
962 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
963 st->first_dts = cur_dts;
964 }else if(st->cur_dts != RELATIVE_TS_BASE)
967 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
968 if(pktl->pkt.stream_index != stream_index)
970 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
971 && !pktl->pkt.duration){
972 pktl->pkt.dts= cur_dts;
973 if(!st->codec->has_b_frames)
974 pktl->pkt.pts= cur_dts;
975 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
976 pktl->pkt.duration = duration;
979 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
982 st->cur_dts= cur_dts;
985 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
986 AVCodecParserContext *pc, AVPacket *pkt)
988 int num, den, presentation_delayed, delay, i;
991 if (s->flags & AVFMT_FLAG_NOFILLIN)
994 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
995 pkt->dts= AV_NOPTS_VALUE;
997 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
998 //FIXME Set low_delay = 0 when has_b_frames = 1
999 st->codec->has_b_frames = 1;
1001 /* do we have a video B-frame ? */
1002 delay= st->codec->has_b_frames;
1003 presentation_delayed = 0;
1005 /* XXX: need has_b_frame, but cannot get it if the codec is
1008 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1009 presentation_delayed = 1;
1011 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1012 pkt->dts -= 1LL<<st->pts_wrap_bits;
1015 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1016 // we take the conservative approach and discard both
1017 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1018 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1019 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1020 pkt->dts= AV_NOPTS_VALUE;
1023 if (pkt->duration == 0) {
1024 compute_frame_duration(&num, &den, st, pc, pkt);
1026 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1029 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1030 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1032 /* correct timestamps with byte offset if demuxers only have timestamps
1033 on packet boundaries */
1034 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1035 /* this will estimate bitrate based on this frame's duration and size */
1036 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1037 if(pkt->pts != AV_NOPTS_VALUE)
1039 if(pkt->dts != AV_NOPTS_VALUE)
1043 if (pc && pc->dts_sync_point >= 0) {
1044 // we have synchronization info from the parser
1045 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1047 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1048 if (pkt->dts != AV_NOPTS_VALUE) {
1049 // got DTS from the stream, update reference timestamp
1050 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1051 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1052 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1053 // compute DTS based on reference timestamp
1054 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1055 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1057 if (pc->dts_sync_point > 0)
1058 st->reference_dts = pkt->dts; // new reference
1062 /* This may be redundant, but it should not hurt. */
1063 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1064 presentation_delayed = 1;
1066 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1067 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1068 /* interpolate PTS and DTS if they are not present */
1069 //We skip H264 currently because delay and has_b_frames are not reliably set
1070 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1071 if (presentation_delayed) {
1072 /* DTS = decompression timestamp */
1073 /* PTS = presentation timestamp */
1074 if (pkt->dts == AV_NOPTS_VALUE)
1075 pkt->dts = st->last_IP_pts;
1076 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1077 if (pkt->dts == AV_NOPTS_VALUE)
1078 pkt->dts = st->cur_dts;
1080 /* this is tricky: the dts must be incremented by the duration
1081 of the frame we are displaying, i.e. the last I- or P-frame */
1082 if (st->last_IP_duration == 0)
1083 st->last_IP_duration = pkt->duration;
1084 if(pkt->dts != AV_NOPTS_VALUE)
1085 st->cur_dts = pkt->dts + st->last_IP_duration;
1086 st->last_IP_duration = pkt->duration;
1087 st->last_IP_pts= pkt->pts;
1088 /* cannot compute PTS if not present (we can compute it only
1089 by knowing the future */
1090 } else if (pkt->pts != AV_NOPTS_VALUE ||
1091 pkt->dts != AV_NOPTS_VALUE ||
1093 int duration = pkt->duration;
1095 if(pkt->pts != AV_NOPTS_VALUE && duration){
1096 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1097 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1098 if( old_diff < new_diff && old_diff < (duration>>3)
1099 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO
1100 && (!strcmp(s->iformat->name, "mpeg") ||
1101 !strcmp(s->iformat->name, "mpegts"))){
1102 pkt->pts += duration;
1103 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1104 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1105 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1109 /* presentation is not delayed : PTS and DTS are the same */
1110 if (pkt->pts == AV_NOPTS_VALUE)
1111 pkt->pts = pkt->dts;
1112 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1114 if (pkt->pts == AV_NOPTS_VALUE)
1115 pkt->pts = st->cur_dts;
1116 pkt->dts = pkt->pts;
1117 if (pkt->pts != AV_NOPTS_VALUE)
1118 st->cur_dts = pkt->pts + duration;
1122 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1123 st->pts_buffer[0]= pkt->pts;
1124 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1125 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1126 if(pkt->dts == AV_NOPTS_VALUE)
1127 pkt->dts= st->pts_buffer[0];
1128 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1129 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1131 if(pkt->dts > st->cur_dts)
1132 st->cur_dts = pkt->dts;
1135 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1136 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1139 if(is_intra_only(st->codec))
1140 pkt->flags |= AV_PKT_FLAG_KEY;
1142 pkt->convergence_duration = pc->convergence_duration;
1145 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1148 AVPacketList *pktl = *pkt_buf;
1149 *pkt_buf = pktl->next;
1150 av_free_packet(&pktl->pkt);
1153 *pkt_buf_end = NULL;
1157 * Parse a packet, add all split parts to parse_queue
1159 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1161 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1163 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1164 AVStream *st = s->streams[stream_index];
1165 uint8_t *data = pkt ? pkt->data : NULL;
1166 int size = pkt ? pkt->size : 0;
1167 int ret = 0, got_output = 0;
1170 av_init_packet(&flush_pkt);
1173 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1174 // preserve 0-size sync packets
1175 compute_pkt_fields(s, st, st->parser, pkt);
1178 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1181 av_init_packet(&out_pkt);
1182 len = av_parser_parse2(st->parser, st->codec,
1183 &out_pkt.data, &out_pkt.size, data, size,
1184 pkt->pts, pkt->dts, pkt->pos);
1186 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1187 /* increment read pointer */
1191 got_output = !!out_pkt.size;
1196 /* set the duration */
1197 out_pkt.duration = 0;
1198 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1199 if (st->codec->sample_rate > 0) {
1200 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1201 (AVRational){ 1, st->codec->sample_rate },
1205 } else if (st->codec->time_base.num != 0 &&
1206 st->codec->time_base.den != 0) {
1207 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1208 st->codec->time_base,
1213 out_pkt.stream_index = st->index;
1214 out_pkt.pts = st->parser->pts;
1215 out_pkt.dts = st->parser->dts;
1216 out_pkt.pos = st->parser->pos;
1218 if (st->parser->key_frame == 1 ||
1219 (st->parser->key_frame == -1 &&
1220 st->parser->pict_type == AV_PICTURE_TYPE_I))
1221 out_pkt.flags |= AV_PKT_FLAG_KEY;
1223 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1224 out_pkt.flags |= AV_PKT_FLAG_KEY;
1226 compute_pkt_fields(s, st, st->parser, &out_pkt);
1228 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1229 out_pkt.flags & AV_PKT_FLAG_KEY) {
1230 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1231 ff_reduce_index(s, st->index);
1232 av_add_index_entry(st, pos, out_pkt.dts,
1233 0, 0, AVINDEX_KEYFRAME);
1236 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1237 out_pkt.destruct = pkt->destruct;
1238 pkt->destruct = NULL;
1240 if ((ret = av_dup_packet(&out_pkt)) < 0)
1243 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1244 av_free_packet(&out_pkt);
1245 ret = AVERROR(ENOMEM);
1251 /* end of the stream => close and free the parser */
1252 if (pkt == &flush_pkt) {
1253 av_parser_close(st->parser);
1258 av_free_packet(pkt);
1262 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1263 AVPacketList **pkt_buffer_end,
1267 av_assert0(*pkt_buffer);
1270 *pkt_buffer = pktl->next;
1272 *pkt_buffer_end = NULL;
1277 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1279 int ret = 0, i, got_packet = 0;
1281 av_init_packet(pkt);
1283 while (!got_packet && !s->parse_queue) {
1287 /* read next packet */
1288 ret = ff_read_packet(s, &cur_pkt);
1290 if (ret == AVERROR(EAGAIN))
1292 /* flush the parsers */
1293 for(i = 0; i < s->nb_streams; i++) {
1295 if (st->parser && st->need_parsing)
1296 parse_packet(s, NULL, st->index);
1298 /* all remaining packets are now in parse_queue =>
1299 * really terminate parsing */
1303 st = s->streams[cur_pkt.stream_index];
1305 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1306 cur_pkt.dts != AV_NOPTS_VALUE &&
1307 cur_pkt.pts < cur_pkt.dts) {
1308 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1309 cur_pkt.stream_index,
1310 av_ts2str(cur_pkt.pts),
1311 av_ts2str(cur_pkt.dts),
1314 if (s->debug & FF_FDEBUG_TS)
1315 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1316 cur_pkt.stream_index,
1317 av_ts2str(cur_pkt.pts),
1318 av_ts2str(cur_pkt.dts),
1323 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1324 st->parser = av_parser_init(st->codec->codec_id);
1326 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1327 "%s, packets or times may be invalid.\n",
1328 avcodec_get_name(st->codec->codec_id));
1329 /* no parser available: just output the raw packets */
1330 st->need_parsing = AVSTREAM_PARSE_NONE;
1331 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1332 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1333 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1334 st->parser->flags |= PARSER_FLAG_ONCE;
1335 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1336 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1340 if (!st->need_parsing || !st->parser) {
1341 /* no parsing needed: we just output the packet as is */
1343 compute_pkt_fields(s, st, NULL, pkt);
1344 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1345 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1346 ff_reduce_index(s, st->index);
1347 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1350 } else if (st->discard < AVDISCARD_ALL) {
1351 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1355 av_free_packet(&cur_pkt);
1357 if (pkt->flags & AV_PKT_FLAG_KEY)
1358 st->skip_to_keyframe = 0;
1359 if (st->skip_to_keyframe) {
1360 av_free_packet(&cur_pkt);
1365 if (!got_packet && s->parse_queue)
1366 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1368 if(s->debug & FF_FDEBUG_TS)
1369 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1371 av_ts2str(pkt->pts),
1372 av_ts2str(pkt->dts),
1380 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1382 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1387 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1388 &s->packet_buffer_end,
1390 read_frame_internal(s, pkt);
1395 AVPacketList *pktl = s->packet_buffer;
1398 AVPacket *next_pkt = &pktl->pkt;
1400 if (next_pkt->dts != AV_NOPTS_VALUE) {
1401 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1402 // last dts seen for this stream. if any of packets following
1403 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1404 int64_t last_dts = next_pkt->dts;
1405 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1406 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1407 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1408 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1409 next_pkt->pts = pktl->pkt.dts;
1411 if (last_dts != AV_NOPTS_VALUE) {
1412 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1413 last_dts = pktl->pkt.dts;
1418 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1419 // Fixing the last reference frame had none pts issue (For MXF etc).
1420 // We only do this when
1422 // 2. we are not able to resolve a pts value for current packet.
1423 // 3. the packets for this stream at the end of the files had valid dts.
1424 next_pkt->pts = last_dts + next_pkt->duration;
1426 pktl = s->packet_buffer;
1429 /* read packet from packet buffer, if there is data */
1430 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1431 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1432 ret = read_from_packet_buffer(&s->packet_buffer,
1433 &s->packet_buffer_end, pkt);
1438 ret = read_frame_internal(s, pkt);
1440 if (pktl && ret != AVERROR(EAGAIN)) {
1447 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1448 &s->packet_buffer_end)) < 0)
1449 return AVERROR(ENOMEM);
1454 if(s->streams[pkt->stream_index]->skip_samples) {
1455 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1456 AV_WL32(p, s->streams[pkt->stream_index]->skip_samples);
1457 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", s->streams[pkt->stream_index]->skip_samples);
1458 s->streams[pkt->stream_index]->skip_samples = 0;
1461 if (is_relative(pkt->dts))
1462 pkt->dts -= RELATIVE_TS_BASE;
1463 if (is_relative(pkt->pts))
1464 pkt->pts -= RELATIVE_TS_BASE;
1468 /* XXX: suppress the packet queue */
1469 static void flush_packet_queue(AVFormatContext *s)
1471 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1472 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1473 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1475 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1478 /*******************************************************/
1481 int av_find_default_stream_index(AVFormatContext *s)
1483 int first_audio_index = -1;
1487 if (s->nb_streams <= 0)
1489 for(i = 0; i < s->nb_streams; i++) {
1491 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1492 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1495 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1496 first_audio_index = i;
1498 return first_audio_index >= 0 ? first_audio_index : 0;
1502 * Flush the frame reader.
1504 void ff_read_frame_flush(AVFormatContext *s)
1509 flush_packet_queue(s);
1511 /* for each stream, reset read state */
1512 for(i = 0; i < s->nb_streams; i++) {
1516 av_parser_close(st->parser);
1519 st->last_IP_pts = AV_NOPTS_VALUE;
1520 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1521 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1522 st->reference_dts = AV_NOPTS_VALUE;
1524 st->probe_packets = MAX_PROBE_PACKETS;
1526 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1527 st->pts_buffer[j]= AV_NOPTS_VALUE;
1531 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1535 for(i = 0; i < s->nb_streams; i++) {
1536 AVStream *st = s->streams[i];
1538 st->cur_dts = av_rescale(timestamp,
1539 st->time_base.den * (int64_t)ref_st->time_base.num,
1540 st->time_base.num * (int64_t)ref_st->time_base.den);
1544 void ff_reduce_index(AVFormatContext *s, int stream_index)
1546 AVStream *st= s->streams[stream_index];
1547 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1549 if((unsigned)st->nb_index_entries >= max_entries){
1551 for(i=0; 2*i<st->nb_index_entries; i++)
1552 st->index_entries[i]= st->index_entries[2*i];
1553 st->nb_index_entries= i;
1557 int ff_add_index_entry(AVIndexEntry **index_entries,
1558 int *nb_index_entries,
1559 unsigned int *index_entries_allocated_size,
1560 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1562 AVIndexEntry *entries, *ie;
1565 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1568 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1569 timestamp -= RELATIVE_TS_BASE;
1571 entries = av_fast_realloc(*index_entries,
1572 index_entries_allocated_size,
1573 (*nb_index_entries + 1) *
1574 sizeof(AVIndexEntry));
1578 *index_entries= entries;
1580 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1583 index= (*nb_index_entries)++;
1584 ie= &entries[index];
1585 assert(index==0 || ie[-1].timestamp < timestamp);
1587 ie= &entries[index];
1588 if(ie->timestamp != timestamp){
1589 if(ie->timestamp <= timestamp)
1591 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1592 (*nb_index_entries)++;
1593 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1594 distance= ie->min_distance;
1598 ie->timestamp = timestamp;
1599 ie->min_distance= distance;
1606 int av_add_index_entry(AVStream *st,
1607 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1609 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1610 &st->index_entries_allocated_size, pos,
1611 timestamp, size, distance, flags);
1614 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1615 int64_t wanted_timestamp, int flags)
1623 //optimize appending index entries at the end
1624 if(b && entries[b-1].timestamp < wanted_timestamp)
1629 timestamp = entries[m].timestamp;
1630 if(timestamp >= wanted_timestamp)
1632 if(timestamp <= wanted_timestamp)
1635 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1637 if(!(flags & AVSEEK_FLAG_ANY)){
1638 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1639 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1648 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1651 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1652 wanted_timestamp, flags);
1655 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1657 AVInputFormat *avif= s->iformat;
1658 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1659 int64_t ts_min, ts_max, ts;
1664 if (stream_index < 0)
1667 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1670 ts_min= AV_NOPTS_VALUE;
1671 pos_limit= -1; //gcc falsely says it may be uninitialized
1673 st= s->streams[stream_index];
1674 if(st->index_entries){
1677 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1678 index= FFMAX(index, 0);
1679 e= &st->index_entries[index];
1681 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1683 ts_min= e->timestamp;
1684 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1685 pos_min, av_ts2str(ts_min));
1690 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1691 assert(index < st->nb_index_entries);
1693 e= &st->index_entries[index];
1694 assert(e->timestamp >= target_ts);
1696 ts_max= e->timestamp;
1697 pos_limit= pos_max - e->min_distance;
1698 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1699 pos_max, pos_limit, av_ts2str(ts_max));
1703 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1708 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1711 ff_read_frame_flush(s);
1712 ff_update_cur_dts(s, st, ts);
1717 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1718 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1719 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1720 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1723 int64_t start_pos, filesize;
1726 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1728 if(ts_min == AV_NOPTS_VALUE){
1729 pos_min = s->data_offset;
1730 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1731 if (ts_min == AV_NOPTS_VALUE)
1735 if(ts_min >= target_ts){
1740 if(ts_max == AV_NOPTS_VALUE){
1742 filesize = avio_size(s->pb);
1743 pos_max = filesize - 1;
1746 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1748 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1749 if (ts_max == AV_NOPTS_VALUE)
1753 int64_t tmp_pos= pos_max + 1;
1754 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1755 if(tmp_ts == AV_NOPTS_VALUE)
1759 if(tmp_pos >= filesize)
1765 if(ts_max <= target_ts){
1770 if(ts_min > ts_max){
1772 }else if(ts_min == ts_max){
1777 while (pos_min < pos_limit) {
1778 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1779 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1780 assert(pos_limit <= pos_max);
1783 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1784 // interpolate position (better than dichotomy)
1785 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1786 + pos_min - approximate_keyframe_distance;
1787 }else if(no_change==1){
1788 // bisection, if interpolation failed to change min or max pos last time
1789 pos = (pos_min + pos_limit)>>1;
1791 /* linear search if bisection failed, can only happen if there
1792 are very few or no keyframes between min/max */
1797 else if(pos > pos_limit)
1801 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1806 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1807 pos_min, pos, pos_max,
1808 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1809 pos_limit, start_pos, no_change);
1810 if(ts == AV_NOPTS_VALUE){
1811 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1814 assert(ts != AV_NOPTS_VALUE);
1815 if (target_ts <= ts) {
1816 pos_limit = start_pos - 1;
1820 if (target_ts >= ts) {
1826 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1827 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1830 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1832 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1833 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1834 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1840 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1841 int64_t pos_min, pos_max;
1843 pos_min = s->data_offset;
1844 pos_max = avio_size(s->pb) - 1;
1846 if (pos < pos_min) pos= pos_min;
1847 else if(pos > pos_max) pos= pos_max;
1849 avio_seek(s->pb, pos, SEEK_SET);
1854 static int seek_frame_generic(AVFormatContext *s,
1855 int stream_index, int64_t timestamp, int flags)
1862 st = s->streams[stream_index];
1864 index = av_index_search_timestamp(st, timestamp, flags);
1866 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1869 if(index < 0 || index==st->nb_index_entries-1){
1873 if(st->nb_index_entries){
1874 assert(st->index_entries);
1875 ie= &st->index_entries[st->nb_index_entries-1];
1876 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1878 ff_update_cur_dts(s, st, ie->timestamp);
1880 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1886 read_status = av_read_frame(s, &pkt);
1887 } while (read_status == AVERROR(EAGAIN));
1888 if (read_status < 0)
1890 av_free_packet(&pkt);
1891 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1892 if(pkt.flags & AV_PKT_FLAG_KEY)
1894 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1895 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1900 index = av_index_search_timestamp(st, timestamp, flags);
1905 ff_read_frame_flush(s);
1906 AV_NOWARN_DEPRECATED(
1907 if (s->iformat->read_seek){
1908 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1912 ie = &st->index_entries[index];
1913 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1915 ff_update_cur_dts(s, st, ie->timestamp);
1920 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1921 int64_t timestamp, int flags)
1926 if (flags & AVSEEK_FLAG_BYTE) {
1927 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1929 ff_read_frame_flush(s);
1930 return seek_frame_byte(s, stream_index, timestamp, flags);
1933 if(stream_index < 0){
1934 stream_index= av_find_default_stream_index(s);
1935 if(stream_index < 0)
1938 st= s->streams[stream_index];
1939 /* timestamp for default must be expressed in AV_TIME_BASE units */
1940 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1943 /* first, we try the format specific seek */
1944 AV_NOWARN_DEPRECATED(
1945 if (s->iformat->read_seek) {
1946 ff_read_frame_flush(s);
1947 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1955 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1956 ff_read_frame_flush(s);
1957 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1958 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1959 ff_read_frame_flush(s);
1960 return seek_frame_generic(s, stream_index, timestamp, flags);
1966 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1968 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1971 queue_attached_pictures(s);
1976 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1978 if(min_ts > ts || max_ts < ts)
1981 if (s->iformat->read_seek2) {
1983 ff_read_frame_flush(s);
1984 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1987 queue_attached_pictures(s);
1991 if(s->iformat->read_timestamp){
1992 //try to seek via read_timestamp()
1995 //Fallback to old API if new is not implemented but old is
1996 //Note the old has somewat different sematics
1997 AV_NOWARN_DEPRECATED(
1998 if (s->iformat->read_seek || 1) {
1999 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
2000 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2001 if (ret<0 && ts != min_ts && max_ts != ts) {
2002 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2004 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2010 // try some generic seek like seek_frame_generic() but with new ts semantics
2013 /*******************************************************/
2016 * Return TRUE if the stream has accurate duration in any stream.
2018 * @return TRUE if the stream has accurate duration for at least one component.
2020 static int has_duration(AVFormatContext *ic)
2025 for(i = 0;i < ic->nb_streams; i++) {
2026 st = ic->streams[i];
2027 if (st->duration != AV_NOPTS_VALUE)
2030 if (ic->duration != AV_NOPTS_VALUE)
2036 * Estimate the stream timings from the one of each components.
2038 * Also computes the global bitrate if possible.
2040 static void update_stream_timings(AVFormatContext *ic)
2042 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2043 int64_t duration, duration1, filesize;
2047 start_time = INT64_MAX;
2048 start_time_text = INT64_MAX;
2049 end_time = INT64_MIN;
2050 duration = INT64_MIN;
2051 for(i = 0;i < ic->nb_streams; i++) {
2052 st = ic->streams[i];
2053 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2054 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2055 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2056 if (start_time1 < start_time_text)
2057 start_time_text = start_time1;
2059 start_time = FFMIN(start_time, start_time1);
2060 if (st->duration != AV_NOPTS_VALUE) {
2061 end_time1 = start_time1
2062 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2063 end_time = FFMAX(end_time, end_time1);
2066 if (st->duration != AV_NOPTS_VALUE) {
2067 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2068 duration = FFMAX(duration, duration1);
2071 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2072 start_time = start_time_text;
2073 else if(start_time > start_time_text)
2074 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2076 if (start_time != INT64_MAX) {
2077 ic->start_time = start_time;
2078 if (end_time != INT64_MIN)
2079 duration = FFMAX(duration, end_time - start_time);
2081 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2082 ic->duration = duration;
2084 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2085 /* compute the bitrate */
2086 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2087 (double)ic->duration;
2091 static void fill_all_stream_timings(AVFormatContext *ic)
2096 update_stream_timings(ic);
2097 for(i = 0;i < ic->nb_streams; i++) {
2098 st = ic->streams[i];
2099 if (st->start_time == AV_NOPTS_VALUE) {
2100 if(ic->start_time != AV_NOPTS_VALUE)
2101 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2102 if(ic->duration != AV_NOPTS_VALUE)
2103 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2108 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2110 int64_t filesize, duration;
2114 /* if bit_rate is already set, we believe it */
2115 if (ic->bit_rate <= 0) {
2117 for(i=0;i<ic->nb_streams;i++) {
2118 st = ic->streams[i];
2119 if (st->codec->bit_rate > 0)
2120 bit_rate += st->codec->bit_rate;
2122 ic->bit_rate = bit_rate;
2125 /* if duration is already set, we believe it */
2126 if (ic->duration == AV_NOPTS_VALUE &&
2127 ic->bit_rate != 0) {
2128 filesize = ic->pb ? avio_size(ic->pb) : 0;
2130 for(i = 0; i < ic->nb_streams; i++) {
2131 st = ic->streams[i];
2132 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2133 if (st->duration == AV_NOPTS_VALUE)
2134 st->duration = duration;
2140 #define DURATION_MAX_READ_SIZE 250000
2141 #define DURATION_MAX_RETRY 3
2143 /* only usable for MPEG-PS streams */
2144 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2146 AVPacket pkt1, *pkt = &pkt1;
2148 int read_size, i, ret;
2150 int64_t filesize, offset, duration;
2153 /* flush packet queue */
2154 flush_packet_queue(ic);
2156 for (i=0; i<ic->nb_streams; i++) {
2157 st = ic->streams[i];
2158 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2159 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2162 av_parser_close(st->parser);
2167 /* estimate the end time (duration) */
2168 /* XXX: may need to support wrapping */
2169 filesize = ic->pb ? avio_size(ic->pb) : 0;
2170 end_time = AV_NOPTS_VALUE;
2172 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2176 avio_seek(ic->pb, offset, SEEK_SET);
2179 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2183 ret = ff_read_packet(ic, pkt);
2184 } while(ret == AVERROR(EAGAIN));
2187 read_size += pkt->size;
2188 st = ic->streams[pkt->stream_index];
2189 if (pkt->pts != AV_NOPTS_VALUE &&
2190 (st->start_time != AV_NOPTS_VALUE ||
2191 st->first_dts != AV_NOPTS_VALUE)) {
2192 duration = end_time = pkt->pts;
2193 if (st->start_time != AV_NOPTS_VALUE)
2194 duration -= st->start_time;
2196 duration -= st->first_dts;
2198 duration += 1LL<<st->pts_wrap_bits;
2200 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2201 st->duration = duration;
2204 av_free_packet(pkt);
2206 }while( end_time==AV_NOPTS_VALUE
2207 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2208 && ++retry <= DURATION_MAX_RETRY);
2210 fill_all_stream_timings(ic);
2212 avio_seek(ic->pb, old_offset, SEEK_SET);
2213 for (i=0; i<ic->nb_streams; i++) {
2215 st->cur_dts= st->first_dts;
2216 st->last_IP_pts = AV_NOPTS_VALUE;
2217 st->reference_dts = AV_NOPTS_VALUE;
2221 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2225 /* get the file size, if possible */
2226 if (ic->iformat->flags & AVFMT_NOFILE) {
2229 file_size = avio_size(ic->pb);
2230 file_size = FFMAX(0, file_size);
2233 if ((!strcmp(ic->iformat->name, "mpeg") ||
2234 !strcmp(ic->iformat->name, "mpegts")) &&
2235 file_size && ic->pb->seekable) {
2236 /* get accurate estimate from the PTSes */
2237 estimate_timings_from_pts(ic, old_offset);
2238 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2239 } else if (has_duration(ic)) {
2240 /* at least one component has timings - we use them for all
2242 fill_all_stream_timings(ic);
2243 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2245 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2246 /* less precise: use bitrate info */
2247 estimate_timings_from_bit_rate(ic);
2248 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2250 update_stream_timings(ic);
2254 AVStream av_unused *st;
2255 for(i = 0;i < ic->nb_streams; i++) {
2256 st = ic->streams[i];
2257 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2258 (double) st->start_time / AV_TIME_BASE,
2259 (double) st->duration / AV_TIME_BASE);
2261 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2262 (double) ic->start_time / AV_TIME_BASE,
2263 (double) ic->duration / AV_TIME_BASE,
2264 ic->bit_rate / 1000);
2268 static int has_codec_parameters(AVStream *st)
2270 AVCodecContext *avctx = st->codec;
2272 switch (avctx->codec_type) {
2273 case AVMEDIA_TYPE_AUDIO:
2274 val = avctx->sample_rate && avctx->channels;
2275 if (!avctx->frame_size && determinable_frame_size(avctx))
2277 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2280 case AVMEDIA_TYPE_VIDEO:
2282 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2285 case AVMEDIA_TYPE_DATA:
2286 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2291 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2294 static int has_decode_delay_been_guessed(AVStream *st)
2296 if(st->codec->codec_id != CODEC_ID_H264) return 1;
2297 #if CONFIG_H264_DECODER
2298 if(st->codec->has_b_frames &&
2299 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
2302 if(st->codec->has_b_frames<3)
2303 return st->info->nb_decoded_frames >= 6;
2304 else if(st->codec->has_b_frames<4)
2305 return st->info->nb_decoded_frames >= 18;
2307 return st->info->nb_decoded_frames >= 20;
2310 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2311 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2314 int got_picture = 1, ret = 0;
2316 AVPacket pkt = *avpkt;
2318 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2319 AVDictionary *thread_opt = NULL;
2321 codec = st->codec->codec ? st->codec->codec :
2322 avcodec_find_decoder(st->codec->codec_id);
2325 st->info->found_decoder = -1;
2329 /* force thread count to 1 since the h264 decoder will not extract SPS
2330 * and PPS to extradata during multi-threaded decoding */
2331 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2332 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2334 av_dict_free(&thread_opt);
2336 st->info->found_decoder = -1;
2339 st->info->found_decoder = 1;
2340 } else if (!st->info->found_decoder)
2341 st->info->found_decoder = 1;
2343 if (st->info->found_decoder < 0)
2346 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2348 (!has_codec_parameters(st) ||
2349 !has_decode_delay_been_guessed(st) ||
2350 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2352 avcodec_get_frame_defaults(&picture);
2353 switch(st->codec->codec_type) {
2354 case AVMEDIA_TYPE_VIDEO:
2355 ret = avcodec_decode_video2(st->codec, &picture,
2356 &got_picture, &pkt);
2358 case AVMEDIA_TYPE_AUDIO:
2359 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2366 st->info->nb_decoded_frames++;
2372 if(!pkt.data && !got_picture)
2377 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2379 while (tags->id != CODEC_ID_NONE) {
2387 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2390 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2391 if(tag == tags[i].tag)
2394 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2395 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2398 return CODEC_ID_NONE;
2401 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2404 for(i=0; tags && tags[i]; i++){
2405 int tag= ff_codec_get_tag(tags[i], id);
2411 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2414 for(i=0; tags && tags[i]; i++){
2415 enum CodecID id= ff_codec_get_id(tags[i], tag);
2416 if(id!=CODEC_ID_NONE) return id;
2418 return CODEC_ID_NONE;
2421 static void compute_chapters_end(AVFormatContext *s)
2424 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2426 for (i = 0; i < s->nb_chapters; i++)
2427 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2428 AVChapter *ch = s->chapters[i];
2429 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2432 for (j = 0; j < s->nb_chapters; j++) {
2433 AVChapter *ch1 = s->chapters[j];
2434 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2435 if (j != i && next_start > ch->start && next_start < end)
2438 ch->end = (end == INT64_MAX) ? ch->start : end;
2442 static int get_std_framerate(int i){
2443 if(i<60*12) return (i+1)*1001;
2444 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2448 * Is the time base unreliable.
2449 * This is a heuristic to balance between quick acceptance of the values in
2450 * the headers vs. some extra checks.
2451 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2452 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2453 * And there are "variable" fps files this needs to detect as well.
2455 static int tb_unreliable(AVCodecContext *c){
2456 if( c->time_base.den >= 101L*c->time_base.num
2457 || c->time_base.den < 5L*c->time_base.num
2458 /* || c->codec_tag == AV_RL32("DIVX")
2459 || c->codec_tag == AV_RL32("XVID")*/
2460 || c->codec_id == CODEC_ID_MPEG2VIDEO
2461 || c->codec_id == CODEC_ID_H264
2467 #if FF_API_FORMAT_PARAMETERS
2468 int av_find_stream_info(AVFormatContext *ic)
2470 return avformat_find_stream_info(ic, NULL);
2474 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2476 int i, count, ret, read_size, j;
2478 AVPacket pkt1, *pkt;
2479 int64_t old_offset = avio_tell(ic->pb);
2480 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2481 int flush_codecs = 1;
2484 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2486 for(i=0;i<ic->nb_streams;i++) {
2488 AVDictionary *thread_opt = NULL;
2489 st = ic->streams[i];
2491 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2492 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2493 /* if(!st->time_base.num)
2495 if(!st->codec->time_base.num)
2496 st->codec->time_base= st->time_base;
2498 //only for the split stuff
2499 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2500 st->parser = av_parser_init(st->codec->codec_id);
2502 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2503 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2504 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2505 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2507 } else if (st->need_parsing) {
2508 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2509 "%s, packets or times may be invalid.\n",
2510 avcodec_get_name(st->codec->codec_id));
2513 codec = st->codec->codec ? st->codec->codec :
2514 avcodec_find_decoder(st->codec->codec_id);
2516 /* force thread count to 1 since the h264 decoder will not extract SPS
2517 * and PPS to extradata during multi-threaded decoding */
2518 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2520 /* Ensure that subtitle_header is properly set. */
2521 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2522 && codec && !st->codec->codec)
2523 avcodec_open2(st->codec, codec, options ? &options[i]
2526 //try to just open decoders, in case this is enough to get parameters
2527 if (!has_codec_parameters(st)) {
2528 if (codec && !st->codec->codec)
2529 avcodec_open2(st->codec, codec, options ? &options[i]
2533 av_dict_free(&thread_opt);
2536 for (i=0; i<ic->nb_streams; i++) {
2537 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2543 if (ff_check_interrupt(&ic->interrupt_callback)){
2545 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2549 /* check if one codec still needs to be handled */
2550 for(i=0;i<ic->nb_streams;i++) {
2551 int fps_analyze_framecount = 20;
2553 st = ic->streams[i];
2554 if (!has_codec_parameters(st))
2556 /* if the timebase is coarse (like the usual millisecond precision
2557 of mkv), we need to analyze more frames to reliably arrive at
2559 if (av_q2d(st->time_base) > 0.0005)
2560 fps_analyze_framecount *= 2;
2561 if (ic->fps_probe_size >= 0)
2562 fps_analyze_framecount = ic->fps_probe_size;
2563 /* variable fps and no guess at the real fps */
2564 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2565 && st->info->duration_count < fps_analyze_framecount
2566 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2568 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2570 if (st->first_dts == AV_NOPTS_VALUE &&
2571 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2572 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2575 if (i == ic->nb_streams) {
2576 /* NOTE: if the format has no header, then we need to read
2577 some packets to get most of the streams, so we cannot
2579 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2580 /* if we found the info for all the codecs, we can stop */
2582 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2587 /* we did not get all the codec info, but we read too much data */
2588 if (read_size >= ic->probesize) {
2590 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2591 for (i = 0; i < ic->nb_streams; i++)
2592 if (!ic->streams[i]->r_frame_rate.num &&
2593 ic->streams[i]->info->duration_count <= 1)
2594 av_log(ic, AV_LOG_WARNING,
2595 "Stream #%d: not enough frames to estimate rate; "
2596 "consider increasing probesize\n", i);
2600 /* NOTE: a new stream can be added there if no header in file
2601 (AVFMTCTX_NOHEADER) */
2602 ret = read_frame_internal(ic, &pkt1);
2603 if (ret == AVERROR(EAGAIN))
2611 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2612 if ((ret = av_dup_packet(pkt)) < 0)
2613 goto find_stream_info_err;
2615 read_size += pkt->size;
2617 st = ic->streams[pkt->stream_index];
2618 if (st->codec_info_nb_frames>1) {
2620 if (st->time_base.den > 0)
2621 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2622 if (st->avg_frame_rate.num > 0)
2623 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2625 if (t >= ic->max_analyze_duration) {
2626 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2629 st->info->codec_info_duration += pkt->duration;
2632 int64_t last = st->info->last_dts;
2634 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2635 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2636 int64_t duration= pkt->dts - last;
2638 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2639 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2640 for (i=0; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2641 int framerate= get_std_framerate(i);
2642 double sdts= dts*framerate/(1001*12);
2644 int ticks= lrintf(sdts+j*0.5);
2645 double error= sdts - ticks + j*0.5;
2646 st->info->duration_error[j][0][i] += error;
2647 st->info->duration_error[j][1][i] += error*error;
2650 st->info->duration_count++;
2651 // ignore the first 4 values, they might have some random jitter
2652 if (st->info->duration_count > 3)
2653 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2655 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2656 st->info->last_dts = pkt->dts;
2658 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2659 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2660 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2661 st->codec->extradata_size= i;
2662 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2663 if (!st->codec->extradata)
2664 return AVERROR(ENOMEM);
2665 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2666 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2670 /* if still no information, we try to open the codec and to
2671 decompress the frame. We try to avoid that in most cases as
2672 it takes longer and uses more memory. For MPEG-4, we need to
2673 decompress for QuickTime.
2675 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2676 least one frame of codec data, this makes sure the codec initializes
2677 the channel configuration and does not only trust the values from the container.
2679 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2681 st->codec_info_nb_frames++;
2686 AVPacket empty_pkt = { 0 };
2688 av_init_packet(&empty_pkt);
2690 ret = -1; /* we could not have all the codec parameters before EOF */
2691 for(i=0;i<ic->nb_streams;i++) {
2692 st = ic->streams[i];
2694 /* flush the decoders */
2695 if (st->info->found_decoder == 1) {
2697 err = try_decode_frame(st, &empty_pkt,
2698 (options && i < orig_nb_streams) ?
2699 &options[i] : NULL);
2700 } while (err > 0 && !has_codec_parameters(st));
2703 av_log(ic, AV_LOG_INFO,
2704 "decoding for stream %d failed\n", st->index);
2708 if (!has_codec_parameters(st)){
2710 avcodec_string(buf, sizeof(buf), st->codec, 0);
2711 av_log(ic, AV_LOG_WARNING,
2712 "Could not find codec parameters (%s)\n", buf);
2719 // close codecs which were opened in try_decode_frame()
2720 for(i=0;i<ic->nb_streams;i++) {
2721 st = ic->streams[i];
2722 avcodec_close(st->codec);
2724 for(i=0;i<ic->nb_streams;i++) {
2725 st = ic->streams[i];
2726 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2727 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2728 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2729 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2730 st->codec->codec_tag= tag;
2733 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2734 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2735 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2736 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2737 // the check for tb_unreliable() is not completely correct, since this is not about handling
2738 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2739 // ipmovie.c produces.
2740 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2741 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2742 if (st->info->duration_count && !st->r_frame_rate.num
2743 && tb_unreliable(st->codec) /*&&
2744 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2745 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2747 double best_error= 0.01;
2749 for (j=0; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2752 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2754 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2757 int n= st->info->duration_count;
2758 double a= st->info->duration_error[k][0][j] / n;
2759 double error= st->info->duration_error[k][1][j]/n - a*a;
2761 if(error < best_error && best_error> 0.000000001){
2763 num = get_std_framerate(j);
2766 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2769 // do not increase frame rate by more than 1 % in order to match a standard rate.
2770 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2771 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2774 if (!st->r_frame_rate.num){
2775 if( st->codec->time_base.den * (int64_t)st->time_base.num
2776 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2777 st->r_frame_rate.num = st->codec->time_base.den;
2778 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2780 st->r_frame_rate.num = st->time_base.den;
2781 st->r_frame_rate.den = st->time_base.num;
2784 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2785 if(!st->codec->bits_per_coded_sample)
2786 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2787 // set stream disposition based on audio service type
2788 switch (st->codec->audio_service_type) {
2789 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2790 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2791 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2792 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2793 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2794 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2795 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2796 st->disposition = AV_DISPOSITION_COMMENT; break;
2797 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2798 st->disposition = AV_DISPOSITION_KARAOKE; break;
2803 estimate_timings(ic, old_offset);
2805 compute_chapters_end(ic);
2807 find_stream_info_err:
2808 for (i=0; i < ic->nb_streams; i++) {
2809 if (ic->streams[i]->codec)
2810 ic->streams[i]->codec->thread_count = 0;
2811 av_freep(&ic->streams[i]->info);
2814 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2818 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2822 for (i = 0; i < ic->nb_programs; i++) {
2823 if (ic->programs[i] == last) {
2827 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2828 if (ic->programs[i]->stream_index[j] == s)
2829 return ic->programs[i];
2835 int av_find_best_stream(AVFormatContext *ic,
2836 enum AVMediaType type,
2837 int wanted_stream_nb,
2839 AVCodec **decoder_ret,
2842 int i, nb_streams = ic->nb_streams;
2843 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2844 unsigned *program = NULL;
2845 AVCodec *decoder = NULL, *best_decoder = NULL;
2847 if (related_stream >= 0 && wanted_stream_nb < 0) {
2848 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2850 program = p->stream_index;
2851 nb_streams = p->nb_stream_indexes;
2854 for (i = 0; i < nb_streams; i++) {
2855 int real_stream_index = program ? program[i] : i;
2856 AVStream *st = ic->streams[real_stream_index];
2857 AVCodecContext *avctx = st->codec;
2858 if (avctx->codec_type != type)
2860 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2862 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2865 decoder = avcodec_find_decoder(st->codec->codec_id);
2868 ret = AVERROR_DECODER_NOT_FOUND;
2872 if (best_count >= st->codec_info_nb_frames)
2874 best_count = st->codec_info_nb_frames;
2875 ret = real_stream_index;
2876 best_decoder = decoder;
2877 if (program && i == nb_streams - 1 && ret < 0) {
2879 nb_streams = ic->nb_streams;
2880 i = 0; /* no related stream found, try again with everything */
2884 *decoder_ret = best_decoder;
2888 /*******************************************************/
2890 int av_read_play(AVFormatContext *s)
2892 if (s->iformat->read_play)
2893 return s->iformat->read_play(s);
2895 return avio_pause(s->pb, 0);
2896 return AVERROR(ENOSYS);
2899 int av_read_pause(AVFormatContext *s)
2901 if (s->iformat->read_pause)
2902 return s->iformat->read_pause(s);
2904 return avio_pause(s->pb, 1);
2905 return AVERROR(ENOSYS);
2908 void avformat_free_context(AVFormatContext *s)
2914 if (s->iformat && s->iformat->priv_class && s->priv_data)
2915 av_opt_free(s->priv_data);
2917 for(i=0;i<s->nb_streams;i++) {
2918 /* free all data in a stream component */
2921 av_parser_close(st->parser);
2923 if (st->attached_pic.data)
2924 av_free_packet(&st->attached_pic);
2925 av_dict_free(&st->metadata);
2926 av_freep(&st->index_entries);
2927 av_freep(&st->codec->extradata);
2928 av_freep(&st->codec->subtitle_header);
2929 av_freep(&st->codec);
2930 av_freep(&st->priv_data);
2931 av_freep(&st->info);
2934 for(i=s->nb_programs-1; i>=0; i--) {
2935 av_dict_free(&s->programs[i]->metadata);
2936 av_freep(&s->programs[i]->stream_index);
2937 av_freep(&s->programs[i]);
2939 av_freep(&s->programs);
2940 av_freep(&s->priv_data);
2941 while(s->nb_chapters--) {
2942 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2943 av_freep(&s->chapters[s->nb_chapters]);
2945 av_freep(&s->chapters);
2946 av_dict_free(&s->metadata);
2947 av_freep(&s->streams);
2951 #if FF_API_CLOSE_INPUT_FILE
2952 void av_close_input_file(AVFormatContext *s)
2954 avformat_close_input(&s);
2958 void avformat_close_input(AVFormatContext **ps)
2960 AVFormatContext *s = *ps;
2961 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2963 flush_packet_queue(s);
2964 if (s->iformat && (s->iformat->read_close))
2965 s->iformat->read_close(s);
2966 avformat_free_context(s);
2972 #if FF_API_NEW_STREAM
2973 AVStream *av_new_stream(AVFormatContext *s, int id)
2975 AVStream *st = avformat_new_stream(s, NULL);
2982 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2988 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2990 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2993 s->streams = streams;
2995 st = av_mallocz(sizeof(AVStream));
2998 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3002 st->info->last_dts = AV_NOPTS_VALUE;
3004 st->codec = avcodec_alloc_context3(c);
3006 /* no default bitrate if decoding */
3007 st->codec->bit_rate = 0;
3009 st->index = s->nb_streams;
3010 st->start_time = AV_NOPTS_VALUE;
3011 st->duration = AV_NOPTS_VALUE;
3012 /* we set the current DTS to 0 so that formats without any timestamps
3013 but durations get some timestamps, formats with some unknown
3014 timestamps have their first few packets buffered and the
3015 timestamps corrected before they are returned to the user */
3016 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3017 st->first_dts = AV_NOPTS_VALUE;
3018 st->probe_packets = MAX_PROBE_PACKETS;
3020 /* default pts setting is MPEG-like */
3021 avpriv_set_pts_info(st, 33, 1, 90000);
3022 st->last_IP_pts = AV_NOPTS_VALUE;
3023 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3024 st->pts_buffer[i]= AV_NOPTS_VALUE;
3025 st->reference_dts = AV_NOPTS_VALUE;
3027 st->sample_aspect_ratio = (AVRational){0,1};
3029 s->streams[s->nb_streams++] = st;
3033 AVProgram *av_new_program(AVFormatContext *ac, int id)
3035 AVProgram *program=NULL;
3038 av_dlog(ac, "new_program: id=0x%04x\n", id);
3040 for(i=0; i<ac->nb_programs; i++)
3041 if(ac->programs[i]->id == id)
3042 program = ac->programs[i];
3045 program = av_mallocz(sizeof(AVProgram));
3048 dynarray_add(&ac->programs, &ac->nb_programs, program);
3049 program->discard = AVDISCARD_NONE;
3056 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3058 AVChapter *chapter = NULL;
3061 for(i=0; i<s->nb_chapters; i++)
3062 if(s->chapters[i]->id == id)
3063 chapter = s->chapters[i];
3066 chapter= av_mallocz(sizeof(AVChapter));
3069 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3071 av_dict_set(&chapter->metadata, "title", title, 0);
3073 chapter->time_base= time_base;
3074 chapter->start = start;
3080 /************************************************************/
3081 /* output media file */
3083 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3084 const char *format, const char *filename)
3086 AVFormatContext *s = avformat_alloc_context();
3095 oformat = av_guess_format(format, NULL, NULL);
3097 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3098 ret = AVERROR(EINVAL);
3102 oformat = av_guess_format(NULL, filename, NULL);
3104 ret = AVERROR(EINVAL);
3105 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3112 s->oformat = oformat;
3113 if (s->oformat->priv_data_size > 0) {
3114 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3117 if (s->oformat->priv_class) {
3118 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3119 av_opt_set_defaults(s->priv_data);
3122 s->priv_data = NULL;
3125 av_strlcpy(s->filename, filename, sizeof(s->filename));
3129 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3130 ret = AVERROR(ENOMEM);
3132 avformat_free_context(s);
3136 #if FF_API_ALLOC_OUTPUT_CONTEXT
3137 AVFormatContext *avformat_alloc_output_context(const char *format,
3138 AVOutputFormat *oformat, const char *filename)
3140 AVFormatContext *avctx;
3141 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3142 return ret < 0 ? NULL : avctx;
3146 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3148 const AVCodecTag *avctag;
3150 enum CodecID id = CODEC_ID_NONE;
3151 unsigned int tag = 0;
3154 * Check that tag + id is in the table
3155 * If neither is in the table -> OK
3156 * If tag is in the table with another id -> FAIL
3157 * If id is in the table with another tag -> FAIL unless strict < normal
3159 for (n = 0; s->oformat->codec_tag[n]; n++) {
3160 avctag = s->oformat->codec_tag[n];
3161 while (avctag->id != CODEC_ID_NONE) {
3162 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3164 if (id == st->codec->codec_id)
3167 if (avctag->id == st->codec->codec_id)
3172 if (id != CODEC_ID_NONE)
3174 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3179 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3183 AVDictionary *tmp = NULL;
3186 av_dict_copy(&tmp, *options, 0);
3187 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3189 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3190 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3193 // some sanity checks
3194 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3195 av_log(s, AV_LOG_ERROR, "no streams\n");
3196 ret = AVERROR(EINVAL);
3200 for(i=0;i<s->nb_streams;i++) {
3203 switch (st->codec->codec_type) {
3204 case AVMEDIA_TYPE_AUDIO:
3205 if(st->codec->sample_rate<=0){
3206 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3207 ret = AVERROR(EINVAL);
3210 if(!st->codec->block_align)
3211 st->codec->block_align = st->codec->channels *
3212 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3214 case AVMEDIA_TYPE_VIDEO:
3215 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3216 av_log(s, AV_LOG_ERROR, "time base not set\n");
3217 ret = AVERROR(EINVAL);
3220 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3221 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3222 ret = AVERROR(EINVAL);
3225 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3226 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3228 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3229 "(%d/%d) and encoder layer (%d/%d)\n",
3230 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3231 st->codec->sample_aspect_ratio.num,
3232 st->codec->sample_aspect_ratio.den);
3233 ret = AVERROR(EINVAL);
3239 if(s->oformat->codec_tag){
3240 if( st->codec->codec_tag
3241 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3242 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3243 && !validate_codec_tag(s, st)){
3244 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3245 st->codec->codec_tag= 0;
3247 if(st->codec->codec_tag){
3248 if (!validate_codec_tag(s, st)) {
3249 char tagbuf[32], cortag[32];
3250 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3251 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3252 av_log(s, AV_LOG_ERROR,
3253 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3254 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3255 ret = AVERROR_INVALIDDATA;
3259 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3262 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3263 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3264 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3267 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3268 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3269 if (!s->priv_data) {
3270 ret = AVERROR(ENOMEM);
3273 if (s->oformat->priv_class) {
3274 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3275 av_opt_set_defaults(s->priv_data);
3276 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3281 /* set muxer identification string */
3282 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3283 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3286 if(s->oformat->write_header){
3287 ret = s->oformat->write_header(s);
3292 /* init PTS generation */
3293 for(i=0;i<s->nb_streams;i++) {
3294 int64_t den = AV_NOPTS_VALUE;
3297 switch (st->codec->codec_type) {
3298 case AVMEDIA_TYPE_AUDIO:
3299 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3301 case AVMEDIA_TYPE_VIDEO:
3302 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3307 if (den != AV_NOPTS_VALUE) {
3309 ret = AVERROR_INVALIDDATA;
3312 frac_init(&st->pts, 0, 0, den);
3317 av_dict_free(options);
3326 //FIXME merge with compute_pkt_fields
3327 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3328 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3329 int num, den, frame_size, i;
3331 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3332 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3334 /* duration field */
3335 if (pkt->duration == 0) {
3336 compute_frame_duration(&num, &den, st, NULL, pkt);
3338 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3342 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3345 //XXX/FIXME this is a temporary hack until all encoders output pts
3346 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3349 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3353 // pkt->pts= st->cur_dts;
3354 pkt->pts= st->pts.val;
3357 //calculate dts from pts
3358 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3359 st->pts_buffer[0]= pkt->pts;
3360 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3361 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3362 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3363 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3365 pkt->dts= st->pts_buffer[0];
3368 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3369 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3370 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3371 av_log(s, AV_LOG_ERROR,
3372 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3373 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3374 return AVERROR(EINVAL);
3376 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3377 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3378 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3379 return AVERROR(EINVAL);
3382 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3383 st->cur_dts= pkt->dts;
3384 st->pts.val= pkt->dts;
3387 switch (st->codec->codec_type) {
3388 case AVMEDIA_TYPE_AUDIO:
3389 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3391 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3392 likely equal to the encoder delay, but it would be better if we
3393 had the real timestamps from the encoder */
3394 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3395 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3398 case AVMEDIA_TYPE_VIDEO:
3399 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3407 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3412 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3413 return s->oformat->write_packet(s, pkt);
3417 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3419 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3422 ret= s->oformat->write_packet(s, pkt);
3425 s->streams[pkt->stream_index]->nb_frames++;
3429 #define CHUNK_START 0x1000
3431 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3432 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3434 AVPacketList **next_point, *this_pktl;
3435 AVStream *st= s->streams[pkt->stream_index];
3436 int chunked= s->max_chunk_size || s->max_chunk_duration;
3438 this_pktl = av_mallocz(sizeof(AVPacketList));
3440 return AVERROR(ENOMEM);
3441 this_pktl->pkt= *pkt;
3442 pkt->destruct= NULL; // do not free original but only the copy
3443 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3445 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3446 next_point = &(st->last_in_packet_buffer->next);
3448 next_point = &s->packet_buffer;
3453 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3454 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3455 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3456 st->interleaver_chunk_size += pkt->size;
3457 st->interleaver_chunk_duration += pkt->duration;
3460 st->interleaver_chunk_size =
3461 st->interleaver_chunk_duration = 0;
3462 this_pktl->pkt.flags |= CHUNK_START;
3466 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3468 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3469 || !compare(s, &(*next_point)->pkt, pkt))){
3470 next_point= &(*next_point)->next;
3475 next_point = &(s->packet_buffer_end->next);
3478 assert(!*next_point);
3480 s->packet_buffer_end= this_pktl;
3483 this_pktl->next= *next_point;
3485 s->streams[pkt->stream_index]->last_in_packet_buffer=
3486 *next_point= this_pktl;
3490 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3492 AVStream *st = s->streams[ pkt ->stream_index];
3493 AVStream *st2= s->streams[ next->stream_index];
3494 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3496 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3497 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3498 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3500 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3501 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3504 comp= (ts>ts2) - (ts<ts2);
3508 return pkt->stream_index < next->stream_index;
3512 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3513 AVPacket *pkt, int flush)
3516 int stream_count=0, noninterleaved_count=0;
3517 int64_t delta_dts_max = 0;
3521 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3526 for(i=0; i < s->nb_streams; i++) {
3527 if (s->streams[i]->last_in_packet_buffer) {
3529 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3530 ++noninterleaved_count;
3534 if (s->nb_streams == stream_count) {
3537 for(i=0; i < s->nb_streams; i++) {
3538 if (s->streams[i]->last_in_packet_buffer) {
3540 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3541 s->streams[i]->time_base,
3543 av_rescale_q(s->packet_buffer->pkt.dts,
3544 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3546 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3549 if(s->nb_streams == stream_count+noninterleaved_count &&
3550 delta_dts_max > 20*AV_TIME_BASE) {
3551 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3555 if(stream_count && flush){
3556 pktl= s->packet_buffer;
3559 s->packet_buffer= pktl->next;
3560 if(!s->packet_buffer)
3561 s->packet_buffer_end= NULL;
3563 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3564 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3568 av_init_packet(out);
3573 #if FF_API_INTERLEAVE_PACKET
3574 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3575 AVPacket *pkt, int flush)
3577 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3582 * Interleave an AVPacket correctly so it can be muxed.
3583 * @param out the interleaved packet will be output here
3584 * @param in the input packet
3585 * @param flush 1 if no further packets are available as input and all
3586 * remaining packets should be output
3587 * @return 1 if a packet was output, 0 if no packet could be output,
3588 * < 0 if an error occurred
3590 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3591 if (s->oformat->interleave_packet) {
3592 int ret = s->oformat->interleave_packet(s, out, in, flush);
3597 return ff_interleave_packet_per_dts(s, out, in, flush);
3600 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3604 AVStream *st= s->streams[ pkt->stream_index];
3606 //FIXME/XXX/HACK drop zero sized packets
3607 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3610 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3611 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3612 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3615 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3616 return AVERROR(EINVAL);
3618 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3624 int ret= interleave_packet(s, &opkt, pkt, flush);
3625 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3628 ret= s->oformat->write_packet(s, &opkt);
3630 s->streams[opkt.stream_index]->nb_frames++;
3632 av_free_packet(&opkt);
3637 if(s->pb && s->pb->error)
3638 return s->pb->error;
3642 int av_write_trailer(AVFormatContext *s)
3648 ret= interleave_packet(s, &pkt, NULL, 1);
3649 if(ret<0) //FIXME cleanup needed for ret<0 ?
3654 ret= s->oformat->write_packet(s, &pkt);
3656 s->streams[pkt.stream_index]->nb_frames++;
3658 av_free_packet(&pkt);
3662 if(s->pb && s->pb->error)
3666 if(s->oformat->write_trailer)
3667 ret = s->oformat->write_trailer(s);
3672 ret = s->pb ? s->pb->error : 0;
3673 for(i=0;i<s->nb_streams;i++) {
3674 av_freep(&s->streams[i]->priv_data);
3675 av_freep(&s->streams[i]->index_entries);
3677 if (s->oformat->priv_class)
3678 av_opt_free(s->priv_data);
3679 av_freep(&s->priv_data);
3683 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3684 int64_t *dts, int64_t *wall)
3686 if (!s->oformat || !s->oformat->get_output_timestamp)
3687 return AVERROR(ENOSYS);
3688 s->oformat->get_output_timestamp(s, stream, dts, wall);
3692 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3695 AVProgram *program=NULL;
3698 if (idx >= ac->nb_streams) {
3699 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3703 for(i=0; i<ac->nb_programs; i++){
3704 if(ac->programs[i]->id != progid)
3706 program = ac->programs[i];
3707 for(j=0; j<program->nb_stream_indexes; j++)
3708 if(program->stream_index[j] == idx)
3711 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3714 program->stream_index = tmp;
3715 program->stream_index[program->nb_stream_indexes++] = idx;
3720 static void print_fps(double d, const char *postfix){
3721 uint64_t v= lrintf(d*100);
3722 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3723 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3724 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3727 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3729 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3730 AVDictionaryEntry *tag=NULL;
3732 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3733 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3734 if(strcmp("language", tag->key)){
3735 const char *p = tag->value;
3736 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3739 size_t len = strcspn(p, "\xd\xa");
3740 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3741 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3743 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3744 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3747 av_log(ctx, AV_LOG_INFO, "\n");
3753 /* "user interface" functions */
3754 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3757 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3758 AVStream *st = ic->streams[i];
3759 int g = av_gcd(st->time_base.num, st->time_base.den);
3760 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3761 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3762 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3763 /* the pid is an important information, so we display it */
3764 /* XXX: add a generic system */
3765 if (flags & AVFMT_SHOW_IDS)
3766 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3768 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3769 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3770 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3771 if (st->sample_aspect_ratio.num && // default
3772 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3773 AVRational display_aspect_ratio;
3774 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3775 st->codec->width*st->sample_aspect_ratio.num,
3776 st->codec->height*st->sample_aspect_ratio.den,
3778 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3779 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3780 display_aspect_ratio.num, display_aspect_ratio.den);
3782 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3783 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3784 print_fps(av_q2d(st->avg_frame_rate), "fps");
3785 if(st->r_frame_rate.den && st->r_frame_rate.num)
3786 print_fps(av_q2d(st->r_frame_rate), "tbr");
3787 if(st->time_base.den && st->time_base.num)
3788 print_fps(1/av_q2d(st->time_base), "tbn");
3789 if(st->codec->time_base.den && st->codec->time_base.num)
3790 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3792 if (st->disposition & AV_DISPOSITION_DEFAULT)
3793 av_log(NULL, AV_LOG_INFO, " (default)");
3794 if (st->disposition & AV_DISPOSITION_DUB)
3795 av_log(NULL, AV_LOG_INFO, " (dub)");
3796 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3797 av_log(NULL, AV_LOG_INFO, " (original)");
3798 if (st->disposition & AV_DISPOSITION_COMMENT)
3799 av_log(NULL, AV_LOG_INFO, " (comment)");
3800 if (st->disposition & AV_DISPOSITION_LYRICS)
3801 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3802 if (st->disposition & AV_DISPOSITION_KARAOKE)
3803 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3804 if (st->disposition & AV_DISPOSITION_FORCED)
3805 av_log(NULL, AV_LOG_INFO, " (forced)");
3806 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3807 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3808 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3809 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3810 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3811 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3812 av_log(NULL, AV_LOG_INFO, "\n");
3813 dump_metadata(NULL, st->metadata, " ");
3816 void av_dump_format(AVFormatContext *ic,
3822 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3823 if (ic->nb_streams && !printed)
3826 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3827 is_output ? "Output" : "Input",
3829 is_output ? ic->oformat->name : ic->iformat->name,
3830 is_output ? "to" : "from", url);
3831 dump_metadata(NULL, ic->metadata, " ");
3833 av_log(NULL, AV_LOG_INFO, " Duration: ");
3834 if (ic->duration != AV_NOPTS_VALUE) {
3835 int hours, mins, secs, us;
3836 secs = ic->duration / AV_TIME_BASE;
3837 us = ic->duration % AV_TIME_BASE;
3842 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3843 (100 * us) / AV_TIME_BASE);
3845 av_log(NULL, AV_LOG_INFO, "N/A");
3847 if (ic->start_time != AV_NOPTS_VALUE) {
3849 av_log(NULL, AV_LOG_INFO, ", start: ");
3850 secs = ic->start_time / AV_TIME_BASE;
3851 us = abs(ic->start_time % AV_TIME_BASE);
3852 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3853 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3855 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3857 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3859 av_log(NULL, AV_LOG_INFO, "N/A");
3861 av_log(NULL, AV_LOG_INFO, "\n");
3863 for (i = 0; i < ic->nb_chapters; i++) {
3864 AVChapter *ch = ic->chapters[i];
3865 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3866 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3867 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3869 dump_metadata(NULL, ch->metadata, " ");
3871 if(ic->nb_programs) {
3872 int j, k, total = 0;
3873 for(j=0; j<ic->nb_programs; j++) {
3874 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3876 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3877 name ? name->value : "");
3878 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3879 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3880 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3881 printed[ic->programs[j]->stream_index[k]] = 1;
3883 total += ic->programs[j]->nb_stream_indexes;
3885 if (total < ic->nb_streams)
3886 av_log(NULL, AV_LOG_INFO, " No Program\n");
3888 for(i=0;i<ic->nb_streams;i++)
3890 dump_stream_format(ic, i, index, is_output);
3895 #if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
3896 FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
3898 return av_gettime();
3902 uint64_t ff_ntp_time(void)
3904 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3907 int av_get_frame_filename(char *buf, int buf_size,
3908 const char *path, int number)
3911 char *q, buf1[20], c;
3912 int nd, len, percentd_found;
3924 while (isdigit(*p)) {
3925 nd = nd * 10 + *p++ - '0';
3928 } while (isdigit(c));
3937 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3939 if ((q - buf + len) > buf_size - 1)
3941 memcpy(q, buf1, len);
3949 if ((q - buf) < buf_size - 1)
3953 if (!percentd_found)
3962 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3966 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3968 for(i=0;i<size;i+=16) {
3975 PRINT(" %02x", buf[i+j]);
3980 for(j=0;j<len;j++) {
3982 if (c < ' ' || c > '~')
3991 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3993 hex_dump_internal(NULL, f, 0, buf, size);
3996 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3998 hex_dump_internal(avcl, NULL, level, buf, size);
4001 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
4004 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
4005 PRINT("stream #%d:\n", pkt->stream_index);
4006 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
4007 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
4008 /* DTS is _always_ valid after av_read_frame() */
4010 if (pkt->dts == AV_NOPTS_VALUE)
4013 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
4014 /* PTS may not be known if B-frames are present. */
4016 if (pkt->pts == AV_NOPTS_VALUE)
4019 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
4021 PRINT(" size=%d\n", pkt->size);
4024 av_hex_dump(f, pkt->data, pkt->size);
4028 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
4030 AVRational tb = { 1, AV_TIME_BASE };
4031 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
4035 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
4037 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
4041 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
4043 AVRational tb = { 1, AV_TIME_BASE };
4044 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4048 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4051 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4054 void av_url_split(char *proto, int proto_size,
4055 char *authorization, int authorization_size,
4056 char *hostname, int hostname_size,
4058 char *path, int path_size,
4061 const char *p, *ls, *at, *col, *brk;
4063 if (port_ptr) *port_ptr = -1;
4064 if (proto_size > 0) proto[0] = 0;
4065 if (authorization_size > 0) authorization[0] = 0;
4066 if (hostname_size > 0) hostname[0] = 0;
4067 if (path_size > 0) path[0] = 0;
4069 /* parse protocol */
4070 if ((p = strchr(url, ':'))) {
4071 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4076 /* no protocol means plain filename */
4077 av_strlcpy(path, url, path_size);
4081 /* separate path from hostname */
4082 ls = strchr(p, '/');
4084 ls = strchr(p, '?');
4086 av_strlcpy(path, ls, path_size);
4088 ls = &p[strlen(p)]; // XXX
4090 /* the rest is hostname, use that to parse auth/port */
4092 /* authorization (user[:pass]@hostname) */
4093 if ((at = strchr(p, '@')) && at < ls) {
4094 av_strlcpy(authorization, p,
4095 FFMIN(authorization_size, at + 1 - p));
4096 p = at + 1; /* skip '@' */
4099 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4101 av_strlcpy(hostname, p + 1,
4102 FFMIN(hostname_size, brk - p));
4103 if (brk[1] == ':' && port_ptr)
4104 *port_ptr = atoi(brk + 2);
4105 } else if ((col = strchr(p, ':')) && col < ls) {
4106 av_strlcpy(hostname, p,
4107 FFMIN(col + 1 - p, hostname_size));
4108 if (port_ptr) *port_ptr = atoi(col + 1);
4110 av_strlcpy(hostname, p,
4111 FFMIN(ls + 1 - p, hostname_size));
4115 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4118 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4121 'C', 'D', 'E', 'F' };
4122 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4125 'c', 'd', 'e', 'f' };
4126 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4128 for(i = 0; i < s; i++) {
4129 buff[i * 2] = hex_table[src[i] >> 4];
4130 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4136 int ff_hex_to_data(uint8_t *data, const char *p)
4143 p += strspn(p, SPACE_CHARS);
4146 c = toupper((unsigned char) *p++);
4147 if (c >= '0' && c <= '9')
4149 else if (c >= 'A' && c <= 'F')
4164 #if FF_API_SET_PTS_INFO
4165 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4166 unsigned int pts_num, unsigned int pts_den)
4168 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4172 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4173 unsigned int pts_num, unsigned int pts_den)
4176 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4177 if(new_tb.num != pts_num)
4178 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4180 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4182 if(new_tb.num <= 0 || new_tb.den <= 0) {
4183 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4186 s->time_base = new_tb;
4187 s->pts_wrap_bits = pts_wrap_bits;
4190 int ff_url_join(char *str, int size, const char *proto,
4191 const char *authorization, const char *hostname,
4192 int port, const char *fmt, ...)
4195 struct addrinfo hints = { 0 }, *ai;
4200 av_strlcatf(str, size, "%s://", proto);
4201 if (authorization && authorization[0])
4202 av_strlcatf(str, size, "%s@", authorization);
4203 #if CONFIG_NETWORK && defined(AF_INET6)
4204 /* Determine if hostname is a numerical IPv6 address,
4205 * properly escape it within [] in that case. */
4206 hints.ai_flags = AI_NUMERICHOST;
4207 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4208 if (ai->ai_family == AF_INET6) {
4209 av_strlcat(str, "[", size);
4210 av_strlcat(str, hostname, size);
4211 av_strlcat(str, "]", size);
4213 av_strlcat(str, hostname, size);
4218 /* Not an IPv6 address, just output the plain string. */
4219 av_strlcat(str, hostname, size);
4222 av_strlcatf(str, size, ":%d", port);
4225 int len = strlen(str);
4228 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4234 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4235 AVFormatContext *src)
4240 local_pkt.stream_index = dst_stream;
4241 if (pkt->pts != AV_NOPTS_VALUE)
4242 local_pkt.pts = av_rescale_q(pkt->pts,
4243 src->streams[pkt->stream_index]->time_base,
4244 dst->streams[dst_stream]->time_base);
4245 if (pkt->dts != AV_NOPTS_VALUE)
4246 local_pkt.dts = av_rescale_q(pkt->dts,
4247 src->streams[pkt->stream_index]->time_base,
4248 dst->streams[dst_stream]->time_base);
4249 return av_write_frame(dst, &local_pkt);
4252 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4255 const char *ptr = str;
4257 /* Parse key=value pairs. */
4260 char *dest = NULL, *dest_end;
4261 int key_len, dest_len = 0;
4263 /* Skip whitespace and potential commas. */
4264 while (*ptr && (isspace(*ptr) || *ptr == ','))
4271 if (!(ptr = strchr(key, '=')))
4274 key_len = ptr - key;
4276 callback_get_buf(context, key, key_len, &dest, &dest_len);
4277 dest_end = dest + dest_len - 1;
4281 while (*ptr && *ptr != '\"') {
4285 if (dest && dest < dest_end)
4289 if (dest && dest < dest_end)
4297 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4298 if (dest && dest < dest_end)
4306 int ff_find_stream_index(AVFormatContext *s, int id)
4309 for (i = 0; i < s->nb_streams; i++) {
4310 if (s->streams[i]->id == id)
4316 void ff_make_absolute_url(char *buf, int size, const char *base,
4320 /* Absolute path, relative to the current server */
4321 if (base && strstr(base, "://") && rel[0] == '/') {
4323 av_strlcpy(buf, base, size);
4324 sep = strstr(buf, "://");
4327 sep = strchr(sep, '/');
4331 av_strlcat(buf, rel, size);
4334 /* If rel actually is an absolute url, just copy it */
4335 if (!base || strstr(rel, "://") || rel[0] == '/') {
4336 av_strlcpy(buf, rel, size);
4340 av_strlcpy(buf, base, size);
4341 /* Remove the file name from the base url */
4342 sep = strrchr(buf, '/');
4347 while (av_strstart(rel, "../", NULL) && sep) {
4348 /* Remove the path delimiter at the end */
4350 sep = strrchr(buf, '/');
4351 /* If the next directory name to pop off is "..", break here */
4352 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4353 /* Readd the slash we just removed */
4354 av_strlcat(buf, "/", size);
4357 /* Cut off the directory name */
4364 av_strlcat(buf, rel, size);
4367 int64_t ff_iso8601_to_unix_time(const char *datestr)
4370 struct tm time1 = {0}, time2 = {0};
4372 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4373 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4375 return av_timegm(&time2);
4377 return av_timegm(&time1);
4379 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4380 "the date string.\n");
4385 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4388 if (ofmt->query_codec)
4389 return ofmt->query_codec(codec_id, std_compliance);
4390 else if (ofmt->codec_tag)
4391 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4392 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4393 codec_id == ofmt->subtitle_codec)
4396 return AVERROR_PATCHWELCOME;
4399 int avformat_network_init(void)
4403 ff_network_inited_globally = 1;
4404 if ((ret = ff_network_init()) < 0)
4411 int avformat_network_deinit(void)
4420 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4421 uint64_t channel_layout, int32_t sample_rate,
4422 int32_t width, int32_t height)
4428 return AVERROR(EINVAL);
4431 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4433 if (channel_layout) {
4435 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4439 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4441 if (width || height) {
4443 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4445 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4447 return AVERROR(ENOMEM);
4448 bytestream_put_le32(&data, flags);
4450 bytestream_put_le32(&data, channels);
4452 bytestream_put_le64(&data, channel_layout);
4454 bytestream_put_le32(&data, sample_rate);
4455 if (width || height) {
4456 bytestream_put_le32(&data, width);
4457 bytestream_put_le32(&data, height);
4462 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4464 return ff_codec_bmp_tags;
4466 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4468 return ff_codec_wav_tags;
4471 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4473 AVRational undef = {0, 1};
4474 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4475 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4476 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4478 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4479 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4480 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4481 stream_sample_aspect_ratio = undef;
4483 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4484 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4485 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4486 frame_sample_aspect_ratio = undef;
4488 if (stream_sample_aspect_ratio.num)
4489 return stream_sample_aspect_ratio;
4491 return frame_sample_aspect_ratio;