2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
54 * various utility functions for use within FFmpeg
57 unsigned avformat_version(void)
59 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return FFMPEG_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
76 static int is_relative(int64_t ts) {
77 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 /* fraction handling */
83 * f = val + (num / den) + 0.5.
85 * 'num' is normalized so that it is such as 0 <= num < den.
87 * @param f fractional number
88 * @param val integer value
89 * @param num must be >= 0
90 * @param den must be >= 1
92 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
105 * Fractional addition to f: f = f + (incr / f->den).
107 * @param f fractional number
108 * @param incr increment, can be positive or negative
110 static void frac_add(AVFrac *f, int64_t incr)
123 } else if (num >= den) {
130 /** head of registered input format linked list */
131 static AVInputFormat *first_iformat = NULL;
132 /** head of registered output format linked list */
133 static AVOutputFormat *first_oformat = NULL;
135 AVInputFormat *av_iformat_next(AVInputFormat *f)
137 if(f) return f->next;
138 else return first_iformat;
141 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
143 if(f) return f->next;
144 else return first_oformat;
147 void av_register_input_format(AVInputFormat *format)
151 while (*p != NULL) p = &(*p)->next;
156 void av_register_output_format(AVOutputFormat *format)
160 while (*p != NULL) p = &(*p)->next;
165 int av_match_ext(const char *filename, const char *extensions)
173 ext = strrchr(filename, '.');
179 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
182 if (!av_strcasecmp(ext1, ext))
192 static int match_format(const char *name, const char *names)
200 namelen = strlen(name);
201 while ((p = strchr(names, ','))) {
202 len = FFMAX(p - names, namelen);
203 if (!av_strncasecmp(name, names, len))
207 return !av_strcasecmp(name, names);
210 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
211 const char *mime_type)
213 AVOutputFormat *fmt = NULL, *fmt_found;
214 int score_max, score;
216 /* specific test for image sequences */
217 #if CONFIG_IMAGE2_MUXER
218 if (!short_name && filename &&
219 av_filename_number_test(filename) &&
220 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
221 return av_guess_format("image2", NULL, NULL);
224 /* Find the proper file type. */
227 while ((fmt = av_oformat_next(fmt))) {
229 if (fmt->name && short_name && match_format(short_name, fmt->name))
231 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
233 if (filename && fmt->extensions &&
234 av_match_ext(filename, fmt->extensions)) {
237 if (score > score_max) {
245 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
246 const char *filename, const char *mime_type, enum AVMediaType type){
247 if(type == AVMEDIA_TYPE_VIDEO){
248 enum CodecID codec_id= CODEC_ID_NONE;
250 #if CONFIG_IMAGE2_MUXER
251 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
252 codec_id= ff_guess_image2_codec(filename);
255 if(codec_id == CODEC_ID_NONE)
256 codec_id= fmt->video_codec;
258 }else if(type == AVMEDIA_TYPE_AUDIO)
259 return fmt->audio_codec;
260 else if (type == AVMEDIA_TYPE_SUBTITLE)
261 return fmt->subtitle_codec;
263 return CODEC_ID_NONE;
266 AVInputFormat *av_find_input_format(const char *short_name)
268 AVInputFormat *fmt = NULL;
269 while ((fmt = av_iformat_next(fmt))) {
270 if (match_format(short_name, fmt->name))
276 int ffio_limit(AVIOContext *s, int size)
279 int64_t remaining= s->maxsize - avio_tell(s);
280 if(remaining < size){
281 int64_t newsize= avio_size(s);
282 if(!s->maxsize || s->maxsize<newsize)
283 s->maxsize= newsize - !newsize;
284 remaining= s->maxsize - avio_tell(s);
285 remaining= FFMAX(remaining, 0);
288 if(s->maxsize>=0 && remaining+1 < size){
289 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
296 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
299 int orig_size = size;
300 size= ffio_limit(s, size);
302 ret= av_new_packet(pkt, size);
307 pkt->pos= avio_tell(s);
309 ret= avio_read(s, pkt->data, size);
313 av_shrink_packet(pkt, ret);
314 if (pkt->size < orig_size)
315 pkt->flags |= AV_PKT_FLAG_CORRUPT;
320 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
325 return av_get_packet(s, pkt, size);
326 old_size = pkt->size;
327 ret = av_grow_packet(pkt, size);
330 ret = avio_read(s, pkt->data + old_size, size);
331 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
336 int av_filename_number_test(const char *filename)
339 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
342 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
344 AVProbeData lpd = *pd;
345 AVInputFormat *fmt1 = NULL, *fmt;
346 int score, nodat = 0, score_max=0;
348 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
349 int id3len = ff_id3v2_tag_len(lpd.buf);
350 if (lpd.buf_size > id3len + 16) {
352 lpd.buf_size -= id3len;
358 while ((fmt1 = av_iformat_next(fmt1))) {
359 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
362 if (fmt1->read_probe) {
363 score = fmt1->read_probe(&lpd);
364 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
365 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
366 } else if (fmt1->extensions) {
367 if (av_match_ext(lpd.filename, fmt1->extensions)) {
371 if (score > score_max) {
374 }else if (score == score_max)
377 *score_ret= score_max;
382 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
385 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
386 if(score_ret > *score_max){
387 *score_max= score_ret;
393 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
395 return av_probe_input_format2(pd, is_opened, &score);
398 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
400 static const struct {
401 const char *name; enum CodecID id; enum AVMediaType type;
403 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
404 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
405 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
406 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
407 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
408 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
409 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
410 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
411 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
415 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
419 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
420 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
421 for (i = 0; fmt_id_type[i].name; i++) {
422 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
423 st->codec->codec_id = fmt_id_type[i].id;
424 st->codec->codec_type = fmt_id_type[i].type;
432 /************************************************************/
433 /* input media file */
435 int av_demuxer_open(AVFormatContext *ic){
438 if (ic->iformat->read_header) {
439 err = ic->iformat->read_header(ic);
444 if (ic->pb && !ic->data_offset)
445 ic->data_offset = avio_tell(ic->pb);
451 /** size of probe buffer, for guessing file type from file contents */
452 #define PROBE_BUF_MIN 2048
453 #define PROBE_BUF_MAX (1<<20)
455 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
456 const char *filename, void *logctx,
457 unsigned int offset, unsigned int max_probe_size)
459 AVProbeData pd = { filename ? filename : "", NULL, -offset };
460 unsigned char *buf = NULL;
461 int ret = 0, probe_size;
463 if (!max_probe_size) {
464 max_probe_size = PROBE_BUF_MAX;
465 } else if (max_probe_size > PROBE_BUF_MAX) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size < PROBE_BUF_MIN) {
468 return AVERROR(EINVAL);
471 if (offset >= max_probe_size) {
472 return AVERROR(EINVAL);
475 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
476 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
477 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
478 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
481 if (probe_size < offset) {
485 /* read probe data */
486 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
489 return AVERROR(ENOMEM);
492 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
493 /* fail if error was not end of file, otherwise, lower score */
494 if (ret != AVERROR_EOF) {
499 ret = 0; /* error was end of file, nothing read */
502 pd.buf = &buf[offset];
504 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
506 /* guess file format */
507 *fmt = av_probe_input_format2(&pd, 1, &score);
509 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
510 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
512 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
518 return AVERROR_INVALIDDATA;
521 /* rewind. reuse probe buffer to avoid seeking */
522 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
528 /* open input file and probe the format if necessary */
529 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
532 AVProbeData pd = {filename, NULL, 0};
535 s->flags |= AVFMT_FLAG_CUSTOM_IO;
537 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
538 else if (s->iformat->flags & AVFMT_NOFILE)
539 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
540 "will be ignored with AVFMT_NOFILE format.\n");
544 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
545 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
548 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
549 &s->interrupt_callback, options)) < 0)
553 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 static void queue_attached_pictures(AVFormatContext *s)
576 for (i = 0; i < s->nb_streams; i++)
577 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
578 s->streams[i]->discard < AVDISCARD_ALL) {
579 AVPacket copy = s->streams[i]->attached_pic;
580 copy.destruct = NULL;
581 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
585 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
587 AVFormatContext *s = *ps;
589 AVDictionary *tmp = NULL;
590 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
592 if (!s && !(s = avformat_alloc_context()))
593 return AVERROR(ENOMEM);
595 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
596 return AVERROR(EINVAL);
602 av_dict_copy(&tmp, *options, 0);
604 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
607 if ((ret = init_input(s, filename, &tmp)) < 0)
610 /* check filename in case an image number is expected */
611 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
612 if (!av_filename_number_test(filename)) {
613 ret = AVERROR(EINVAL);
618 s->duration = s->start_time = AV_NOPTS_VALUE;
619 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
621 /* allocate private data */
622 if (s->iformat->priv_data_size > 0) {
623 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
624 ret = AVERROR(ENOMEM);
627 if (s->iformat->priv_class) {
628 *(const AVClass**)s->priv_data = s->iformat->priv_class;
629 av_opt_set_defaults(s->priv_data);
630 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
635 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
637 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
639 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
640 if ((ret = s->iformat->read_header(s)) < 0)
643 if (id3v2_extra_meta &&
644 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
646 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
648 queue_attached_pictures(s);
650 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
651 s->data_offset = avio_tell(s->pb);
653 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
656 av_dict_free(options);
663 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
665 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
667 avformat_free_context(s);
672 /*******************************************************/
674 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
676 if(st->request_probe>0){
677 AVProbeData *pd = &st->probe_data;
679 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
683 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
684 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
685 pd->buf_size += pkt->size;
686 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
688 st->probe_packets = 0;
691 end= s->raw_packet_buffer_remaining_size <= 0
692 || st->probe_packets<=0;
694 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
695 int score= set_codec_from_probe_data(s, st, pd);
696 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
700 st->request_probe= -1;
701 if(st->codec->codec_id != CODEC_ID_NONE){
702 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
704 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
710 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
716 AVPacketList *pktl = s->raw_packet_buffer;
720 st = s->streams[pkt->stream_index];
721 if(st->request_probe <= 0){
722 s->raw_packet_buffer = pktl->next;
723 s->raw_packet_buffer_remaining_size += pkt->size;
730 ret= s->iformat->read_packet(s, pkt);
732 if (!pktl || ret == AVERROR(EAGAIN))
734 for (i = 0; i < s->nb_streams; i++) {
736 if (st->probe_packets) {
737 probe_codec(s, st, NULL);
739 av_assert0(st->request_probe <= 0);
744 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
745 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
746 av_log(s, AV_LOG_WARNING,
747 "Dropped corrupted packet (stream = %d)\n",
753 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
754 av_packet_merge_side_data(pkt);
756 if(pkt->stream_index >= (unsigned)s->nb_streams){
757 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
761 st= s->streams[pkt->stream_index];
763 switch(st->codec->codec_type){
764 case AVMEDIA_TYPE_VIDEO:
765 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
767 case AVMEDIA_TYPE_AUDIO:
768 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
770 case AVMEDIA_TYPE_SUBTITLE:
771 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
775 if(!pktl && st->request_probe <= 0)
778 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
779 s->raw_packet_buffer_remaining_size -= pkt->size;
781 probe_codec(s, st, pkt);
785 #if FF_API_READ_PACKET
786 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
788 return ff_read_packet(s, pkt);
793 /**********************************************************/
795 static int determinable_frame_size(AVCodecContext *avctx)
797 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
798 avctx->codec_id == CODEC_ID_MP1 ||
799 avctx->codec_id == CODEC_ID_MP2 ||
800 avctx->codec_id == CODEC_ID_MP3/* ||
801 avctx->codec_id == CODEC_ID_CELT*/)
807 * Get the number of samples of an audio frame. Return -1 on error.
809 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
813 /* give frame_size priority if demuxing */
814 if (!mux && enc->frame_size > 1)
815 return enc->frame_size;
817 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
820 /* fallback to using frame_size if muxing */
821 if (enc->frame_size > 1)
822 return enc->frame_size;
829 * Return the frame duration in seconds. Return 0 if not available.
831 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
832 AVCodecParserContext *pc, AVPacket *pkt)
838 switch(st->codec->codec_type) {
839 case AVMEDIA_TYPE_VIDEO:
840 if (st->r_frame_rate.num && !pc) {
841 *pnum = st->r_frame_rate.den;
842 *pden = st->r_frame_rate.num;
843 } else if(st->time_base.num*1000LL > st->time_base.den) {
844 *pnum = st->time_base.num;
845 *pden = st->time_base.den;
846 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
847 *pnum = st->codec->time_base.num;
848 *pden = st->codec->time_base.den;
849 if (pc && pc->repeat_pict) {
850 *pnum = (*pnum) * (1 + pc->repeat_pict);
852 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
853 //Thus if we have no parser in such case leave duration undefined.
854 if(st->codec->ticks_per_frame>1 && !pc){
859 case AVMEDIA_TYPE_AUDIO:
860 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
861 if (frame_size <= 0 || st->codec->sample_rate <= 0)
864 *pden = st->codec->sample_rate;
871 static int is_intra_only(AVCodecContext *enc){
872 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
874 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
875 switch(enc->codec_id){
877 case CODEC_ID_MJPEGB:
879 case CODEC_ID_PRORES:
880 case CODEC_ID_RAWVIDEO:
882 case CODEC_ID_DVVIDEO:
883 case CODEC_ID_HUFFYUV:
884 case CODEC_ID_FFVHUFF:
889 case CODEC_ID_JPEG2000:
891 case CODEC_ID_UTVIDEO:
899 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
903 if (pktl == s->parse_queue_end)
904 return s->packet_buffer;
908 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
909 int64_t dts, int64_t pts)
911 AVStream *st= s->streams[stream_index];
912 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
914 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
917 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
920 if (is_relative(pts))
921 pts += st->first_dts - RELATIVE_TS_BASE;
923 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
924 if(pktl->pkt.stream_index != stream_index)
926 if(is_relative(pktl->pkt.pts))
927 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
929 if(is_relative(pktl->pkt.dts))
930 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
932 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
933 st->start_time= pktl->pkt.pts;
935 if (st->start_time == AV_NOPTS_VALUE)
936 st->start_time = pts;
939 static void update_initial_durations(AVFormatContext *s, AVStream *st,
940 int stream_index, int duration)
942 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
943 int64_t cur_dts= RELATIVE_TS_BASE;
945 if(st->first_dts != AV_NOPTS_VALUE){
946 cur_dts= st->first_dts;
947 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
948 if(pktl->pkt.stream_index == stream_index){
949 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
954 if(pktl && pktl->pkt.dts != st->first_dts) {
955 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
959 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
962 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
963 st->first_dts = cur_dts;
964 }else if(st->cur_dts != RELATIVE_TS_BASE)
967 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
968 if(pktl->pkt.stream_index != stream_index)
970 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
971 && !pktl->pkt.duration){
972 pktl->pkt.dts= cur_dts;
973 if(!st->codec->has_b_frames)
974 pktl->pkt.pts= cur_dts;
975 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
976 pktl->pkt.duration = duration;
979 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
982 st->cur_dts= cur_dts;
985 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
986 AVCodecParserContext *pc, AVPacket *pkt)
988 int num, den, presentation_delayed, delay, i;
991 if (s->flags & AVFMT_FLAG_NOFILLIN)
994 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
995 pkt->dts= AV_NOPTS_VALUE;
997 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
998 //FIXME Set low_delay = 0 when has_b_frames = 1
999 st->codec->has_b_frames = 1;
1001 /* do we have a video B-frame ? */
1002 delay= st->codec->has_b_frames;
1003 presentation_delayed = 0;
1005 /* XXX: need has_b_frame, but cannot get it if the codec is
1008 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1009 presentation_delayed = 1;
1011 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1012 pkt->dts -= 1LL<<st->pts_wrap_bits;
1015 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1016 // we take the conservative approach and discard both
1017 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1018 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1019 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1020 pkt->dts= AV_NOPTS_VALUE;
1023 if (pkt->duration == 0) {
1024 compute_frame_duration(&num, &den, st, pc, pkt);
1026 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1029 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1030 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1032 /* correct timestamps with byte offset if demuxers only have timestamps
1033 on packet boundaries */
1034 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1035 /* this will estimate bitrate based on this frame's duration and size */
1036 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1037 if(pkt->pts != AV_NOPTS_VALUE)
1039 if(pkt->dts != AV_NOPTS_VALUE)
1043 if (pc && pc->dts_sync_point >= 0) {
1044 // we have synchronization info from the parser
1045 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1047 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1048 if (pkt->dts != AV_NOPTS_VALUE) {
1049 // got DTS from the stream, update reference timestamp
1050 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1051 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1052 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1053 // compute DTS based on reference timestamp
1054 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1055 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1057 if (pc->dts_sync_point > 0)
1058 st->reference_dts = pkt->dts; // new reference
1062 /* This may be redundant, but it should not hurt. */
1063 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1064 presentation_delayed = 1;
1066 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1067 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1068 /* interpolate PTS and DTS if they are not present */
1069 //We skip H264 currently because delay and has_b_frames are not reliably set
1070 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1071 if (presentation_delayed) {
1072 /* DTS = decompression timestamp */
1073 /* PTS = presentation timestamp */
1074 if (pkt->dts == AV_NOPTS_VALUE)
1075 pkt->dts = st->last_IP_pts;
1076 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1077 if (pkt->dts == AV_NOPTS_VALUE)
1078 pkt->dts = st->cur_dts;
1080 /* this is tricky: the dts must be incremented by the duration
1081 of the frame we are displaying, i.e. the last I- or P-frame */
1082 if (st->last_IP_duration == 0)
1083 st->last_IP_duration = pkt->duration;
1084 if(pkt->dts != AV_NOPTS_VALUE)
1085 st->cur_dts = pkt->dts + st->last_IP_duration;
1086 st->last_IP_duration = pkt->duration;
1087 st->last_IP_pts= pkt->pts;
1088 /* cannot compute PTS if not present (we can compute it only
1089 by knowing the future */
1090 } else if (pkt->pts != AV_NOPTS_VALUE ||
1091 pkt->dts != AV_NOPTS_VALUE ||
1093 int duration = pkt->duration;
1095 if(pkt->pts != AV_NOPTS_VALUE && duration){
1096 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1097 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1098 if( old_diff < new_diff && old_diff < (duration>>3)
1099 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO
1100 && (!strcmp(s->iformat->name, "mpeg") ||
1101 !strcmp(s->iformat->name, "mpegts"))){
1102 pkt->pts += duration;
1103 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1104 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1105 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1109 /* presentation is not delayed : PTS and DTS are the same */
1110 if (pkt->pts == AV_NOPTS_VALUE)
1111 pkt->pts = pkt->dts;
1112 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1114 if (pkt->pts == AV_NOPTS_VALUE)
1115 pkt->pts = st->cur_dts;
1116 pkt->dts = pkt->pts;
1117 if (pkt->pts != AV_NOPTS_VALUE)
1118 st->cur_dts = pkt->pts + duration;
1122 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1123 st->pts_buffer[0]= pkt->pts;
1124 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1125 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1126 if(pkt->dts == AV_NOPTS_VALUE)
1127 pkt->dts= st->pts_buffer[0];
1128 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1129 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1131 if(pkt->dts > st->cur_dts)
1132 st->cur_dts = pkt->dts;
1135 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1136 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1139 if(is_intra_only(st->codec))
1140 pkt->flags |= AV_PKT_FLAG_KEY;
1142 pkt->convergence_duration = pc->convergence_duration;
1145 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1148 AVPacketList *pktl = *pkt_buf;
1149 *pkt_buf = pktl->next;
1150 av_free_packet(&pktl->pkt);
1153 *pkt_buf_end = NULL;
1157 * Parse a packet, add all split parts to parse_queue
1159 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1161 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1163 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1164 AVStream *st = s->streams[stream_index];
1165 uint8_t *data = pkt ? pkt->data : NULL;
1166 int size = pkt ? pkt->size : 0;
1167 int ret = 0, got_output = 0;
1170 av_init_packet(&flush_pkt);
1173 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1174 // preserve 0-size sync packets
1175 compute_pkt_fields(s, st, st->parser, pkt);
1178 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1181 av_init_packet(&out_pkt);
1182 len = av_parser_parse2(st->parser, st->codec,
1183 &out_pkt.data, &out_pkt.size, data, size,
1184 pkt->pts, pkt->dts, pkt->pos);
1186 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1187 /* increment read pointer */
1191 got_output = !!out_pkt.size;
1196 /* set the duration */
1197 out_pkt.duration = 0;
1198 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1199 if (st->codec->sample_rate > 0) {
1200 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1201 (AVRational){ 1, st->codec->sample_rate },
1205 } else if (st->codec->time_base.num != 0 &&
1206 st->codec->time_base.den != 0) {
1207 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1208 st->codec->time_base,
1213 out_pkt.stream_index = st->index;
1214 out_pkt.pts = st->parser->pts;
1215 out_pkt.dts = st->parser->dts;
1216 out_pkt.pos = st->parser->pos;
1218 if (st->parser->key_frame == 1 ||
1219 (st->parser->key_frame == -1 &&
1220 st->parser->pict_type == AV_PICTURE_TYPE_I))
1221 out_pkt.flags |= AV_PKT_FLAG_KEY;
1223 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1224 out_pkt.flags |= AV_PKT_FLAG_KEY;
1226 compute_pkt_fields(s, st, st->parser, &out_pkt);
1228 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1229 out_pkt.flags & AV_PKT_FLAG_KEY) {
1230 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1231 ff_reduce_index(s, st->index);
1232 av_add_index_entry(st, pos, out_pkt.dts,
1233 0, 0, AVINDEX_KEYFRAME);
1236 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1237 out_pkt.destruct = pkt->destruct;
1238 pkt->destruct = NULL;
1240 if ((ret = av_dup_packet(&out_pkt)) < 0)
1243 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1244 av_free_packet(&out_pkt);
1245 ret = AVERROR(ENOMEM);
1251 /* end of the stream => close and free the parser */
1252 if (pkt == &flush_pkt) {
1253 av_parser_close(st->parser);
1258 av_free_packet(pkt);
1262 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1263 AVPacketList **pkt_buffer_end,
1267 av_assert0(*pkt_buffer);
1270 *pkt_buffer = pktl->next;
1272 *pkt_buffer_end = NULL;
1277 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1279 int ret = 0, i, got_packet = 0;
1281 av_init_packet(pkt);
1283 while (!got_packet && !s->parse_queue) {
1287 /* read next packet */
1288 ret = ff_read_packet(s, &cur_pkt);
1290 if (ret == AVERROR(EAGAIN))
1292 /* flush the parsers */
1293 for(i = 0; i < s->nb_streams; i++) {
1295 if (st->parser && st->need_parsing)
1296 parse_packet(s, NULL, st->index);
1298 /* all remaining packets are now in parse_queue =>
1299 * really terminate parsing */
1303 st = s->streams[cur_pkt.stream_index];
1305 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1306 cur_pkt.dts != AV_NOPTS_VALUE &&
1307 cur_pkt.pts < cur_pkt.dts) {
1308 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1309 cur_pkt.stream_index,
1310 av_ts2str(cur_pkt.pts),
1311 av_ts2str(cur_pkt.dts),
1314 if (s->debug & FF_FDEBUG_TS)
1315 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1316 cur_pkt.stream_index,
1317 av_ts2str(cur_pkt.pts),
1318 av_ts2str(cur_pkt.dts),
1323 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1324 st->parser = av_parser_init(st->codec->codec_id);
1326 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1327 "%s, packets or times may be invalid.\n",
1328 avcodec_get_name(st->codec->codec_id));
1329 /* no parser available: just output the raw packets */
1330 st->need_parsing = AVSTREAM_PARSE_NONE;
1331 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1332 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1333 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1334 st->parser->flags |= PARSER_FLAG_ONCE;
1335 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1336 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1340 if (!st->need_parsing || !st->parser) {
1341 /* no parsing needed: we just output the packet as is */
1343 compute_pkt_fields(s, st, NULL, pkt);
1344 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1345 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1346 ff_reduce_index(s, st->index);
1347 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1350 } else if (st->discard < AVDISCARD_ALL) {
1351 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1355 av_free_packet(&cur_pkt);
1357 if (pkt->flags & AV_PKT_FLAG_KEY)
1358 st->skip_to_keyframe = 0;
1359 if (st->skip_to_keyframe) {
1360 av_free_packet(&cur_pkt);
1365 if (!got_packet && s->parse_queue)
1366 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1368 if(s->debug & FF_FDEBUG_TS)
1369 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1371 av_ts2str(pkt->pts),
1372 av_ts2str(pkt->dts),
1380 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1382 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1387 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1388 &s->packet_buffer_end,
1390 read_frame_internal(s, pkt);
1395 AVPacketList *pktl = s->packet_buffer;
1398 AVPacket *next_pkt = &pktl->pkt;
1400 if (next_pkt->dts != AV_NOPTS_VALUE) {
1401 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1402 // last dts seen for this stream. if any of packets following
1403 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1404 int64_t last_dts = next_pkt->dts;
1405 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1406 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1407 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1408 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1409 next_pkt->pts = pktl->pkt.dts;
1411 if (last_dts != AV_NOPTS_VALUE) {
1412 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1413 last_dts = pktl->pkt.dts;
1418 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1419 // Fixing the last reference frame had none pts issue (For MXF etc).
1420 // We only do this when
1422 // 2. we are not able to resolve a pts value for current packet.
1423 // 3. the packets for this stream at the end of the files had valid dts.
1424 next_pkt->pts = last_dts + next_pkt->duration;
1426 pktl = s->packet_buffer;
1429 /* read packet from packet buffer, if there is data */
1430 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1431 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1432 ret = read_from_packet_buffer(&s->packet_buffer,
1433 &s->packet_buffer_end, pkt);
1438 ret = read_frame_internal(s, pkt);
1440 if (pktl && ret != AVERROR(EAGAIN)) {
1447 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1448 &s->packet_buffer_end)) < 0)
1449 return AVERROR(ENOMEM);
1453 if (is_relative(pkt->dts))
1454 pkt->dts -= RELATIVE_TS_BASE;
1455 if (is_relative(pkt->pts))
1456 pkt->pts -= RELATIVE_TS_BASE;
1460 /* XXX: suppress the packet queue */
1461 static void flush_packet_queue(AVFormatContext *s)
1463 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1464 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1465 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1467 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1470 /*******************************************************/
1473 int av_find_default_stream_index(AVFormatContext *s)
1475 int first_audio_index = -1;
1479 if (s->nb_streams <= 0)
1481 for(i = 0; i < s->nb_streams; i++) {
1483 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1484 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1487 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1488 first_audio_index = i;
1490 return first_audio_index >= 0 ? first_audio_index : 0;
1494 * Flush the frame reader.
1496 void ff_read_frame_flush(AVFormatContext *s)
1501 flush_packet_queue(s);
1503 /* for each stream, reset read state */
1504 for(i = 0; i < s->nb_streams; i++) {
1508 av_parser_close(st->parser);
1511 st->last_IP_pts = AV_NOPTS_VALUE;
1512 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1513 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1514 st->reference_dts = AV_NOPTS_VALUE;
1516 st->probe_packets = MAX_PROBE_PACKETS;
1518 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1519 st->pts_buffer[j]= AV_NOPTS_VALUE;
1523 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1527 for(i = 0; i < s->nb_streams; i++) {
1528 AVStream *st = s->streams[i];
1530 st->cur_dts = av_rescale(timestamp,
1531 st->time_base.den * (int64_t)ref_st->time_base.num,
1532 st->time_base.num * (int64_t)ref_st->time_base.den);
1536 void ff_reduce_index(AVFormatContext *s, int stream_index)
1538 AVStream *st= s->streams[stream_index];
1539 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1541 if((unsigned)st->nb_index_entries >= max_entries){
1543 for(i=0; 2*i<st->nb_index_entries; i++)
1544 st->index_entries[i]= st->index_entries[2*i];
1545 st->nb_index_entries= i;
1549 int ff_add_index_entry(AVIndexEntry **index_entries,
1550 int *nb_index_entries,
1551 unsigned int *index_entries_allocated_size,
1552 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1554 AVIndexEntry *entries, *ie;
1557 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1560 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1561 timestamp -= RELATIVE_TS_BASE;
1563 entries = av_fast_realloc(*index_entries,
1564 index_entries_allocated_size,
1565 (*nb_index_entries + 1) *
1566 sizeof(AVIndexEntry));
1570 *index_entries= entries;
1572 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1575 index= (*nb_index_entries)++;
1576 ie= &entries[index];
1577 assert(index==0 || ie[-1].timestamp < timestamp);
1579 ie= &entries[index];
1580 if(ie->timestamp != timestamp){
1581 if(ie->timestamp <= timestamp)
1583 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1584 (*nb_index_entries)++;
1585 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1586 distance= ie->min_distance;
1590 ie->timestamp = timestamp;
1591 ie->min_distance= distance;
1598 int av_add_index_entry(AVStream *st,
1599 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1601 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1602 &st->index_entries_allocated_size, pos,
1603 timestamp, size, distance, flags);
1606 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1607 int64_t wanted_timestamp, int flags)
1615 //optimize appending index entries at the end
1616 if(b && entries[b-1].timestamp < wanted_timestamp)
1621 timestamp = entries[m].timestamp;
1622 if(timestamp >= wanted_timestamp)
1624 if(timestamp <= wanted_timestamp)
1627 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1629 if(!(flags & AVSEEK_FLAG_ANY)){
1630 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1631 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1640 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1643 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1644 wanted_timestamp, flags);
1647 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1649 AVInputFormat *avif= s->iformat;
1650 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1651 int64_t ts_min, ts_max, ts;
1656 if (stream_index < 0)
1659 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1662 ts_min= AV_NOPTS_VALUE;
1663 pos_limit= -1; //gcc falsely says it may be uninitialized
1665 st= s->streams[stream_index];
1666 if(st->index_entries){
1669 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1670 index= FFMAX(index, 0);
1671 e= &st->index_entries[index];
1673 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1675 ts_min= e->timestamp;
1676 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1677 pos_min, av_ts2str(ts_min));
1682 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1683 assert(index < st->nb_index_entries);
1685 e= &st->index_entries[index];
1686 assert(e->timestamp >= target_ts);
1688 ts_max= e->timestamp;
1689 pos_limit= pos_max - e->min_distance;
1690 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1691 pos_max, pos_limit, av_ts2str(ts_max));
1695 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1700 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1703 ff_read_frame_flush(s);
1704 ff_update_cur_dts(s, st, ts);
1709 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1710 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1711 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1712 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1715 int64_t start_pos, filesize;
1718 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1720 if(ts_min == AV_NOPTS_VALUE){
1721 pos_min = s->data_offset;
1722 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1723 if (ts_min == AV_NOPTS_VALUE)
1727 if(ts_min >= target_ts){
1732 if(ts_max == AV_NOPTS_VALUE){
1734 filesize = avio_size(s->pb);
1735 pos_max = filesize - 1;
1738 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1740 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1741 if (ts_max == AV_NOPTS_VALUE)
1745 int64_t tmp_pos= pos_max + 1;
1746 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1747 if(tmp_ts == AV_NOPTS_VALUE)
1751 if(tmp_pos >= filesize)
1757 if(ts_max <= target_ts){
1762 if(ts_min > ts_max){
1764 }else if(ts_min == ts_max){
1769 while (pos_min < pos_limit) {
1770 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1771 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1772 assert(pos_limit <= pos_max);
1775 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1776 // interpolate position (better than dichotomy)
1777 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1778 + pos_min - approximate_keyframe_distance;
1779 }else if(no_change==1){
1780 // bisection, if interpolation failed to change min or max pos last time
1781 pos = (pos_min + pos_limit)>>1;
1783 /* linear search if bisection failed, can only happen if there
1784 are very few or no keyframes between min/max */
1789 else if(pos > pos_limit)
1793 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1798 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1799 pos_min, pos, pos_max,
1800 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1801 pos_limit, start_pos, no_change);
1802 if(ts == AV_NOPTS_VALUE){
1803 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1806 assert(ts != AV_NOPTS_VALUE);
1807 if (target_ts <= ts) {
1808 pos_limit = start_pos - 1;
1812 if (target_ts >= ts) {
1818 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1819 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1822 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1824 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1825 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1826 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1832 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1833 int64_t pos_min, pos_max;
1835 pos_min = s->data_offset;
1836 pos_max = avio_size(s->pb) - 1;
1838 if (pos < pos_min) pos= pos_min;
1839 else if(pos > pos_max) pos= pos_max;
1841 avio_seek(s->pb, pos, SEEK_SET);
1846 static int seek_frame_generic(AVFormatContext *s,
1847 int stream_index, int64_t timestamp, int flags)
1854 st = s->streams[stream_index];
1856 index = av_index_search_timestamp(st, timestamp, flags);
1858 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1861 if(index < 0 || index==st->nb_index_entries-1){
1865 if(st->nb_index_entries){
1866 assert(st->index_entries);
1867 ie= &st->index_entries[st->nb_index_entries-1];
1868 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1870 ff_update_cur_dts(s, st, ie->timestamp);
1872 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1878 read_status = av_read_frame(s, &pkt);
1879 } while (read_status == AVERROR(EAGAIN));
1880 if (read_status < 0)
1882 av_free_packet(&pkt);
1883 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1884 if(pkt.flags & AV_PKT_FLAG_KEY)
1886 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1887 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1892 index = av_index_search_timestamp(st, timestamp, flags);
1897 ff_read_frame_flush(s);
1898 AV_NOWARN_DEPRECATED(
1899 if (s->iformat->read_seek){
1900 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1904 ie = &st->index_entries[index];
1905 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1907 ff_update_cur_dts(s, st, ie->timestamp);
1912 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1913 int64_t timestamp, int flags)
1918 if (flags & AVSEEK_FLAG_BYTE) {
1919 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1921 ff_read_frame_flush(s);
1922 return seek_frame_byte(s, stream_index, timestamp, flags);
1925 if(stream_index < 0){
1926 stream_index= av_find_default_stream_index(s);
1927 if(stream_index < 0)
1930 st= s->streams[stream_index];
1931 /* timestamp for default must be expressed in AV_TIME_BASE units */
1932 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1935 /* first, we try the format specific seek */
1936 AV_NOWARN_DEPRECATED(
1937 if (s->iformat->read_seek) {
1938 ff_read_frame_flush(s);
1939 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1947 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1948 ff_read_frame_flush(s);
1949 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1950 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1951 ff_read_frame_flush(s);
1952 return seek_frame_generic(s, stream_index, timestamp, flags);
1958 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1960 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1963 queue_attached_pictures(s);
1968 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1970 if(min_ts > ts || max_ts < ts)
1973 if (s->iformat->read_seek2) {
1975 ff_read_frame_flush(s);
1976 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1979 queue_attached_pictures(s);
1983 if(s->iformat->read_timestamp){
1984 //try to seek via read_timestamp()
1987 //Fallback to old API if new is not implemented but old is
1988 //Note the old has somewat different sematics
1989 AV_NOWARN_DEPRECATED(
1990 if (s->iformat->read_seek || 1) {
1991 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1992 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1993 if (ret<0 && ts != min_ts && max_ts != ts) {
1994 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1996 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2002 // try some generic seek like seek_frame_generic() but with new ts semantics
2005 /*******************************************************/
2008 * Return TRUE if the stream has accurate duration in any stream.
2010 * @return TRUE if the stream has accurate duration for at least one component.
2012 static int has_duration(AVFormatContext *ic)
2017 for(i = 0;i < ic->nb_streams; i++) {
2018 st = ic->streams[i];
2019 if (st->duration != AV_NOPTS_VALUE)
2022 if (ic->duration != AV_NOPTS_VALUE)
2028 * Estimate the stream timings from the one of each components.
2030 * Also computes the global bitrate if possible.
2032 static void update_stream_timings(AVFormatContext *ic)
2034 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2035 int64_t duration, duration1, filesize;
2039 start_time = INT64_MAX;
2040 start_time_text = INT64_MAX;
2041 end_time = INT64_MIN;
2042 duration = INT64_MIN;
2043 for(i = 0;i < ic->nb_streams; i++) {
2044 st = ic->streams[i];
2045 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2046 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2047 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2048 if (start_time1 < start_time_text)
2049 start_time_text = start_time1;
2051 start_time = FFMIN(start_time, start_time1);
2052 if (st->duration != AV_NOPTS_VALUE) {
2053 end_time1 = start_time1
2054 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2055 end_time = FFMAX(end_time, end_time1);
2058 if (st->duration != AV_NOPTS_VALUE) {
2059 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2060 duration = FFMAX(duration, duration1);
2063 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2064 start_time = start_time_text;
2065 else if(start_time > start_time_text)
2066 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2068 if (start_time != INT64_MAX) {
2069 ic->start_time = start_time;
2070 if (end_time != INT64_MIN)
2071 duration = FFMAX(duration, end_time - start_time);
2073 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2074 ic->duration = duration;
2076 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2077 /* compute the bitrate */
2078 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2079 (double)ic->duration;
2083 static void fill_all_stream_timings(AVFormatContext *ic)
2088 update_stream_timings(ic);
2089 for(i = 0;i < ic->nb_streams; i++) {
2090 st = ic->streams[i];
2091 if (st->start_time == AV_NOPTS_VALUE) {
2092 if(ic->start_time != AV_NOPTS_VALUE)
2093 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2094 if(ic->duration != AV_NOPTS_VALUE)
2095 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2100 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2102 int64_t filesize, duration;
2106 /* if bit_rate is already set, we believe it */
2107 if (ic->bit_rate <= 0) {
2109 for(i=0;i<ic->nb_streams;i++) {
2110 st = ic->streams[i];
2111 if (st->codec->bit_rate > 0)
2112 bit_rate += st->codec->bit_rate;
2114 ic->bit_rate = bit_rate;
2117 /* if duration is already set, we believe it */
2118 if (ic->duration == AV_NOPTS_VALUE &&
2119 ic->bit_rate != 0) {
2120 filesize = ic->pb ? avio_size(ic->pb) : 0;
2122 for(i = 0; i < ic->nb_streams; i++) {
2123 st = ic->streams[i];
2124 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2125 if (st->duration == AV_NOPTS_VALUE)
2126 st->duration = duration;
2132 #define DURATION_MAX_READ_SIZE 250000
2133 #define DURATION_MAX_RETRY 3
2135 /* only usable for MPEG-PS streams */
2136 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2138 AVPacket pkt1, *pkt = &pkt1;
2140 int read_size, i, ret;
2142 int64_t filesize, offset, duration;
2145 /* flush packet queue */
2146 flush_packet_queue(ic);
2148 for (i=0; i<ic->nb_streams; i++) {
2149 st = ic->streams[i];
2150 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2151 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2154 av_parser_close(st->parser);
2159 /* estimate the end time (duration) */
2160 /* XXX: may need to support wrapping */
2161 filesize = ic->pb ? avio_size(ic->pb) : 0;
2162 end_time = AV_NOPTS_VALUE;
2164 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2168 avio_seek(ic->pb, offset, SEEK_SET);
2171 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2175 ret = ff_read_packet(ic, pkt);
2176 } while(ret == AVERROR(EAGAIN));
2179 read_size += pkt->size;
2180 st = ic->streams[pkt->stream_index];
2181 if (pkt->pts != AV_NOPTS_VALUE &&
2182 (st->start_time != AV_NOPTS_VALUE ||
2183 st->first_dts != AV_NOPTS_VALUE)) {
2184 duration = end_time = pkt->pts;
2185 if (st->start_time != AV_NOPTS_VALUE)
2186 duration -= st->start_time;
2188 duration -= st->first_dts;
2190 duration += 1LL<<st->pts_wrap_bits;
2192 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2193 st->duration = duration;
2196 av_free_packet(pkt);
2198 }while( end_time==AV_NOPTS_VALUE
2199 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2200 && ++retry <= DURATION_MAX_RETRY);
2202 fill_all_stream_timings(ic);
2204 avio_seek(ic->pb, old_offset, SEEK_SET);
2205 for (i=0; i<ic->nb_streams; i++) {
2207 st->cur_dts= st->first_dts;
2208 st->last_IP_pts = AV_NOPTS_VALUE;
2209 st->reference_dts = AV_NOPTS_VALUE;
2213 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2217 /* get the file size, if possible */
2218 if (ic->iformat->flags & AVFMT_NOFILE) {
2221 file_size = avio_size(ic->pb);
2222 file_size = FFMAX(0, file_size);
2225 if ((!strcmp(ic->iformat->name, "mpeg") ||
2226 !strcmp(ic->iformat->name, "mpegts")) &&
2227 file_size && ic->pb->seekable) {
2228 /* get accurate estimate from the PTSes */
2229 estimate_timings_from_pts(ic, old_offset);
2230 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2231 } else if (has_duration(ic)) {
2232 /* at least one component has timings - we use them for all
2234 fill_all_stream_timings(ic);
2235 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2237 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2238 /* less precise: use bitrate info */
2239 estimate_timings_from_bit_rate(ic);
2240 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2242 update_stream_timings(ic);
2246 AVStream av_unused *st;
2247 for(i = 0;i < ic->nb_streams; i++) {
2248 st = ic->streams[i];
2249 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2250 (double) st->start_time / AV_TIME_BASE,
2251 (double) st->duration / AV_TIME_BASE);
2253 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2254 (double) ic->start_time / AV_TIME_BASE,
2255 (double) ic->duration / AV_TIME_BASE,
2256 ic->bit_rate / 1000);
2260 static int has_codec_parameters(AVStream *st)
2262 AVCodecContext *avctx = st->codec;
2264 switch (avctx->codec_type) {
2265 case AVMEDIA_TYPE_AUDIO:
2266 val = avctx->sample_rate && avctx->channels;
2267 if (!avctx->frame_size && determinable_frame_size(avctx))
2269 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2272 case AVMEDIA_TYPE_VIDEO:
2274 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2277 case AVMEDIA_TYPE_DATA:
2278 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2283 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2286 static int has_decode_delay_been_guessed(AVStream *st)
2288 if(st->codec->codec_id != CODEC_ID_H264) return 1;
2289 #if CONFIG_H264_DECODER
2290 if(st->codec->has_b_frames &&
2291 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
2294 if(st->codec->has_b_frames<3)
2295 return st->info->nb_decoded_frames >= 6;
2296 else if(st->codec->has_b_frames<4)
2297 return st->info->nb_decoded_frames >= 18;
2299 return st->info->nb_decoded_frames >= 20;
2302 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2303 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2306 int got_picture = 1, ret = 0;
2308 AVPacket pkt = *avpkt;
2310 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2311 AVDictionary *thread_opt = NULL;
2313 codec = st->codec->codec ? st->codec->codec :
2314 avcodec_find_decoder(st->codec->codec_id);
2317 st->info->found_decoder = -1;
2321 /* force thread count to 1 since the h264 decoder will not extract SPS
2322 * and PPS to extradata during multi-threaded decoding */
2323 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2324 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2326 av_dict_free(&thread_opt);
2328 st->info->found_decoder = -1;
2331 st->info->found_decoder = 1;
2332 } else if (!st->info->found_decoder)
2333 st->info->found_decoder = 1;
2335 if (st->info->found_decoder < 0)
2338 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2340 (!has_codec_parameters(st) ||
2341 !has_decode_delay_been_guessed(st) ||
2342 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2344 avcodec_get_frame_defaults(&picture);
2345 switch(st->codec->codec_type) {
2346 case AVMEDIA_TYPE_VIDEO:
2347 ret = avcodec_decode_video2(st->codec, &picture,
2348 &got_picture, &pkt);
2350 case AVMEDIA_TYPE_AUDIO:
2351 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2358 st->info->nb_decoded_frames++;
2364 if(!pkt.data && !got_picture)
2369 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2371 while (tags->id != CODEC_ID_NONE) {
2379 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2382 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2383 if(tag == tags[i].tag)
2386 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2387 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2390 return CODEC_ID_NONE;
2393 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2396 for(i=0; tags && tags[i]; i++){
2397 int tag= ff_codec_get_tag(tags[i], id);
2403 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2406 for(i=0; tags && tags[i]; i++){
2407 enum CodecID id= ff_codec_get_id(tags[i], tag);
2408 if(id!=CODEC_ID_NONE) return id;
2410 return CODEC_ID_NONE;
2413 static void compute_chapters_end(AVFormatContext *s)
2416 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2418 for (i = 0; i < s->nb_chapters; i++)
2419 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2420 AVChapter *ch = s->chapters[i];
2421 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2424 for (j = 0; j < s->nb_chapters; j++) {
2425 AVChapter *ch1 = s->chapters[j];
2426 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2427 if (j != i && next_start > ch->start && next_start < end)
2430 ch->end = (end == INT64_MAX) ? ch->start : end;
2434 static int get_std_framerate(int i){
2435 if(i<60*12) return (i+1)*1001;
2436 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2440 * Is the time base unreliable.
2441 * This is a heuristic to balance between quick acceptance of the values in
2442 * the headers vs. some extra checks.
2443 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2444 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2445 * And there are "variable" fps files this needs to detect as well.
2447 static int tb_unreliable(AVCodecContext *c){
2448 if( c->time_base.den >= 101L*c->time_base.num
2449 || c->time_base.den < 5L*c->time_base.num
2450 /* || c->codec_tag == AV_RL32("DIVX")
2451 || c->codec_tag == AV_RL32("XVID")*/
2452 || c->codec_id == CODEC_ID_MPEG2VIDEO
2453 || c->codec_id == CODEC_ID_H264
2459 #if FF_API_FORMAT_PARAMETERS
2460 int av_find_stream_info(AVFormatContext *ic)
2462 return avformat_find_stream_info(ic, NULL);
2466 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2468 int i, count, ret, read_size, j;
2470 AVPacket pkt1, *pkt;
2471 int64_t old_offset = avio_tell(ic->pb);
2472 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2473 int flush_codecs = 1;
2476 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2478 for(i=0;i<ic->nb_streams;i++) {
2480 AVDictionary *thread_opt = NULL;
2481 st = ic->streams[i];
2483 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2484 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2485 /* if(!st->time_base.num)
2487 if(!st->codec->time_base.num)
2488 st->codec->time_base= st->time_base;
2490 //only for the split stuff
2491 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2492 st->parser = av_parser_init(st->codec->codec_id);
2494 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2495 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2496 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2497 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2499 } else if (st->need_parsing) {
2500 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2501 "%s, packets or times may be invalid.\n",
2502 avcodec_get_name(st->codec->codec_id));
2505 codec = st->codec->codec ? st->codec->codec :
2506 avcodec_find_decoder(st->codec->codec_id);
2508 /* force thread count to 1 since the h264 decoder will not extract SPS
2509 * and PPS to extradata during multi-threaded decoding */
2510 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2512 /* Ensure that subtitle_header is properly set. */
2513 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2514 && codec && !st->codec->codec)
2515 avcodec_open2(st->codec, codec, options ? &options[i]
2518 //try to just open decoders, in case this is enough to get parameters
2519 if (!has_codec_parameters(st)) {
2520 if (codec && !st->codec->codec)
2521 avcodec_open2(st->codec, codec, options ? &options[i]
2525 av_dict_free(&thread_opt);
2528 for (i=0; i<ic->nb_streams; i++) {
2529 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2535 if (ff_check_interrupt(&ic->interrupt_callback)){
2537 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2541 /* check if one codec still needs to be handled */
2542 for(i=0;i<ic->nb_streams;i++) {
2543 int fps_analyze_framecount = 20;
2545 st = ic->streams[i];
2546 if (!has_codec_parameters(st))
2548 /* if the timebase is coarse (like the usual millisecond precision
2549 of mkv), we need to analyze more frames to reliably arrive at
2551 if (av_q2d(st->time_base) > 0.0005)
2552 fps_analyze_framecount *= 2;
2553 if (ic->fps_probe_size >= 0)
2554 fps_analyze_framecount = ic->fps_probe_size;
2555 /* variable fps and no guess at the real fps */
2556 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2557 && st->info->duration_count < fps_analyze_framecount
2558 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2560 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2562 if (st->first_dts == AV_NOPTS_VALUE &&
2563 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2564 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2567 if (i == ic->nb_streams) {
2568 /* NOTE: if the format has no header, then we need to read
2569 some packets to get most of the streams, so we cannot
2571 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2572 /* if we found the info for all the codecs, we can stop */
2574 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2579 /* we did not get all the codec info, but we read too much data */
2580 if (read_size >= ic->probesize) {
2582 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2583 for (i = 0; i < ic->nb_streams; i++)
2584 if (!ic->streams[i]->r_frame_rate.num &&
2585 ic->streams[i]->info->duration_count <= 1)
2586 av_log(ic, AV_LOG_WARNING,
2587 "Stream #%d: not enough frames to estimate rate; "
2588 "consider increasing probesize\n", i);
2592 /* NOTE: a new stream can be added there if no header in file
2593 (AVFMTCTX_NOHEADER) */
2594 ret = read_frame_internal(ic, &pkt1);
2595 if (ret == AVERROR(EAGAIN))
2603 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2604 if ((ret = av_dup_packet(pkt)) < 0)
2605 goto find_stream_info_err;
2607 read_size += pkt->size;
2609 st = ic->streams[pkt->stream_index];
2610 if (st->codec_info_nb_frames>1) {
2612 if (st->time_base.den > 0)
2613 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2614 if (st->avg_frame_rate.num > 0)
2615 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2617 if (t >= ic->max_analyze_duration) {
2618 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2621 st->info->codec_info_duration += pkt->duration;
2624 int64_t last = st->info->last_dts;
2626 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2627 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2628 int64_t duration= pkt->dts - last;
2630 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2631 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2632 for (i=0; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2633 int framerate= get_std_framerate(i);
2634 double sdts= dts*framerate/(1001*12);
2636 int ticks= lrintf(sdts+j*0.5);
2637 double error= sdts - ticks + j*0.5;
2638 st->info->duration_error[j][0][i] += error;
2639 st->info->duration_error[j][1][i] += error*error;
2642 st->info->duration_count++;
2643 // ignore the first 4 values, they might have some random jitter
2644 if (st->info->duration_count > 3)
2645 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2647 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2648 st->info->last_dts = pkt->dts;
2650 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2651 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2652 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2653 st->codec->extradata_size= i;
2654 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2655 if (!st->codec->extradata)
2656 return AVERROR(ENOMEM);
2657 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2658 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2662 /* if still no information, we try to open the codec and to
2663 decompress the frame. We try to avoid that in most cases as
2664 it takes longer and uses more memory. For MPEG-4, we need to
2665 decompress for QuickTime.
2667 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2668 least one frame of codec data, this makes sure the codec initializes
2669 the channel configuration and does not only trust the values from the container.
2671 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2673 st->codec_info_nb_frames++;
2678 AVPacket empty_pkt = { 0 };
2680 av_init_packet(&empty_pkt);
2682 ret = -1; /* we could not have all the codec parameters before EOF */
2683 for(i=0;i<ic->nb_streams;i++) {
2684 st = ic->streams[i];
2686 /* flush the decoders */
2687 if (st->info->found_decoder == 1) {
2689 err = try_decode_frame(st, &empty_pkt,
2690 (options && i < orig_nb_streams) ?
2691 &options[i] : NULL);
2692 } while (err > 0 && !has_codec_parameters(st));
2695 av_log(ic, AV_LOG_INFO,
2696 "decoding for stream %d failed\n", st->index);
2700 if (!has_codec_parameters(st)){
2702 avcodec_string(buf, sizeof(buf), st->codec, 0);
2703 av_log(ic, AV_LOG_WARNING,
2704 "Could not find codec parameters (%s)\n", buf);
2711 // close codecs which were opened in try_decode_frame()
2712 for(i=0;i<ic->nb_streams;i++) {
2713 st = ic->streams[i];
2714 avcodec_close(st->codec);
2716 for(i=0;i<ic->nb_streams;i++) {
2717 st = ic->streams[i];
2718 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2719 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2720 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2721 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2722 st->codec->codec_tag= tag;
2725 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2726 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2727 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2728 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2729 // the check for tb_unreliable() is not completely correct, since this is not about handling
2730 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2731 // ipmovie.c produces.
2732 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2733 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2734 if (st->info->duration_count && !st->r_frame_rate.num
2735 && tb_unreliable(st->codec) /*&&
2736 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2737 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2739 double best_error= 0.01;
2741 for (j=0; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2744 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2746 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2749 int n= st->info->duration_count;
2750 double a= st->info->duration_error[k][0][j] / n;
2751 double error= st->info->duration_error[k][1][j]/n - a*a;
2753 if(error < best_error && best_error> 0.000000001){
2755 num = get_std_framerate(j);
2758 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2761 // do not increase frame rate by more than 1 % in order to match a standard rate.
2762 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2763 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2766 if (!st->r_frame_rate.num){
2767 if( st->codec->time_base.den * (int64_t)st->time_base.num
2768 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2769 st->r_frame_rate.num = st->codec->time_base.den;
2770 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2772 st->r_frame_rate.num = st->time_base.den;
2773 st->r_frame_rate.den = st->time_base.num;
2776 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2777 if(!st->codec->bits_per_coded_sample)
2778 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2779 // set stream disposition based on audio service type
2780 switch (st->codec->audio_service_type) {
2781 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2782 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2783 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2784 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2785 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2786 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2787 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2788 st->disposition = AV_DISPOSITION_COMMENT; break;
2789 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2790 st->disposition = AV_DISPOSITION_KARAOKE; break;
2795 estimate_timings(ic, old_offset);
2797 compute_chapters_end(ic);
2799 find_stream_info_err:
2800 for (i=0; i < ic->nb_streams; i++) {
2801 if (ic->streams[i]->codec)
2802 ic->streams[i]->codec->thread_count = 0;
2803 av_freep(&ic->streams[i]->info);
2806 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2810 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2814 for (i = 0; i < ic->nb_programs; i++) {
2815 if (ic->programs[i] == last) {
2819 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2820 if (ic->programs[i]->stream_index[j] == s)
2821 return ic->programs[i];
2827 int av_find_best_stream(AVFormatContext *ic,
2828 enum AVMediaType type,
2829 int wanted_stream_nb,
2831 AVCodec **decoder_ret,
2834 int i, nb_streams = ic->nb_streams;
2835 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2836 unsigned *program = NULL;
2837 AVCodec *decoder = NULL, *best_decoder = NULL;
2839 if (related_stream >= 0 && wanted_stream_nb < 0) {
2840 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2842 program = p->stream_index;
2843 nb_streams = p->nb_stream_indexes;
2846 for (i = 0; i < nb_streams; i++) {
2847 int real_stream_index = program ? program[i] : i;
2848 AVStream *st = ic->streams[real_stream_index];
2849 AVCodecContext *avctx = st->codec;
2850 if (avctx->codec_type != type)
2852 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2854 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2857 decoder = avcodec_find_decoder(st->codec->codec_id);
2860 ret = AVERROR_DECODER_NOT_FOUND;
2864 if (best_count >= st->codec_info_nb_frames)
2866 best_count = st->codec_info_nb_frames;
2867 ret = real_stream_index;
2868 best_decoder = decoder;
2869 if (program && i == nb_streams - 1 && ret < 0) {
2871 nb_streams = ic->nb_streams;
2872 i = 0; /* no related stream found, try again with everything */
2876 *decoder_ret = best_decoder;
2880 /*******************************************************/
2882 int av_read_play(AVFormatContext *s)
2884 if (s->iformat->read_play)
2885 return s->iformat->read_play(s);
2887 return avio_pause(s->pb, 0);
2888 return AVERROR(ENOSYS);
2891 int av_read_pause(AVFormatContext *s)
2893 if (s->iformat->read_pause)
2894 return s->iformat->read_pause(s);
2896 return avio_pause(s->pb, 1);
2897 return AVERROR(ENOSYS);
2900 void avformat_free_context(AVFormatContext *s)
2906 if (s->iformat && s->iformat->priv_class && s->priv_data)
2907 av_opt_free(s->priv_data);
2909 for(i=0;i<s->nb_streams;i++) {
2910 /* free all data in a stream component */
2913 av_parser_close(st->parser);
2915 if (st->attached_pic.data)
2916 av_free_packet(&st->attached_pic);
2917 av_dict_free(&st->metadata);
2918 av_freep(&st->index_entries);
2919 av_freep(&st->codec->extradata);
2920 av_freep(&st->codec->subtitle_header);
2921 av_freep(&st->codec);
2922 av_freep(&st->priv_data);
2923 av_freep(&st->info);
2926 for(i=s->nb_programs-1; i>=0; i--) {
2927 av_dict_free(&s->programs[i]->metadata);
2928 av_freep(&s->programs[i]->stream_index);
2929 av_freep(&s->programs[i]);
2931 av_freep(&s->programs);
2932 av_freep(&s->priv_data);
2933 while(s->nb_chapters--) {
2934 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2935 av_freep(&s->chapters[s->nb_chapters]);
2937 av_freep(&s->chapters);
2938 av_dict_free(&s->metadata);
2939 av_freep(&s->streams);
2943 #if FF_API_CLOSE_INPUT_FILE
2944 void av_close_input_file(AVFormatContext *s)
2946 avformat_close_input(&s);
2950 void avformat_close_input(AVFormatContext **ps)
2952 AVFormatContext *s = *ps;
2953 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2955 flush_packet_queue(s);
2956 if (s->iformat && (s->iformat->read_close))
2957 s->iformat->read_close(s);
2958 avformat_free_context(s);
2964 #if FF_API_NEW_STREAM
2965 AVStream *av_new_stream(AVFormatContext *s, int id)
2967 AVStream *st = avformat_new_stream(s, NULL);
2974 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2980 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2982 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2985 s->streams = streams;
2987 st = av_mallocz(sizeof(AVStream));
2990 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2994 st->info->last_dts = AV_NOPTS_VALUE;
2996 st->codec = avcodec_alloc_context3(c);
2998 /* no default bitrate if decoding */
2999 st->codec->bit_rate = 0;
3001 st->index = s->nb_streams;
3002 st->start_time = AV_NOPTS_VALUE;
3003 st->duration = AV_NOPTS_VALUE;
3004 /* we set the current DTS to 0 so that formats without any timestamps
3005 but durations get some timestamps, formats with some unknown
3006 timestamps have their first few packets buffered and the
3007 timestamps corrected before they are returned to the user */
3008 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3009 st->first_dts = AV_NOPTS_VALUE;
3010 st->probe_packets = MAX_PROBE_PACKETS;
3012 /* default pts setting is MPEG-like */
3013 avpriv_set_pts_info(st, 33, 1, 90000);
3014 st->last_IP_pts = AV_NOPTS_VALUE;
3015 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3016 st->pts_buffer[i]= AV_NOPTS_VALUE;
3017 st->reference_dts = AV_NOPTS_VALUE;
3019 st->sample_aspect_ratio = (AVRational){0,1};
3021 s->streams[s->nb_streams++] = st;
3025 AVProgram *av_new_program(AVFormatContext *ac, int id)
3027 AVProgram *program=NULL;
3030 av_dlog(ac, "new_program: id=0x%04x\n", id);
3032 for(i=0; i<ac->nb_programs; i++)
3033 if(ac->programs[i]->id == id)
3034 program = ac->programs[i];
3037 program = av_mallocz(sizeof(AVProgram));
3040 dynarray_add(&ac->programs, &ac->nb_programs, program);
3041 program->discard = AVDISCARD_NONE;
3048 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3050 AVChapter *chapter = NULL;
3053 for(i=0; i<s->nb_chapters; i++)
3054 if(s->chapters[i]->id == id)
3055 chapter = s->chapters[i];
3058 chapter= av_mallocz(sizeof(AVChapter));
3061 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3063 av_dict_set(&chapter->metadata, "title", title, 0);
3065 chapter->time_base= time_base;
3066 chapter->start = start;
3072 /************************************************************/
3073 /* output media file */
3075 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3076 const char *format, const char *filename)
3078 AVFormatContext *s = avformat_alloc_context();
3087 oformat = av_guess_format(format, NULL, NULL);
3089 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3090 ret = AVERROR(EINVAL);
3094 oformat = av_guess_format(NULL, filename, NULL);
3096 ret = AVERROR(EINVAL);
3097 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3104 s->oformat = oformat;
3105 if (s->oformat->priv_data_size > 0) {
3106 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3109 if (s->oformat->priv_class) {
3110 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3111 av_opt_set_defaults(s->priv_data);
3114 s->priv_data = NULL;
3117 av_strlcpy(s->filename, filename, sizeof(s->filename));
3121 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3122 ret = AVERROR(ENOMEM);
3124 avformat_free_context(s);
3128 #if FF_API_ALLOC_OUTPUT_CONTEXT
3129 AVFormatContext *avformat_alloc_output_context(const char *format,
3130 AVOutputFormat *oformat, const char *filename)
3132 AVFormatContext *avctx;
3133 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3134 return ret < 0 ? NULL : avctx;
3138 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3140 const AVCodecTag *avctag;
3142 enum CodecID id = CODEC_ID_NONE;
3143 unsigned int tag = 0;
3146 * Check that tag + id is in the table
3147 * If neither is in the table -> OK
3148 * If tag is in the table with another id -> FAIL
3149 * If id is in the table with another tag -> FAIL unless strict < normal
3151 for (n = 0; s->oformat->codec_tag[n]; n++) {
3152 avctag = s->oformat->codec_tag[n];
3153 while (avctag->id != CODEC_ID_NONE) {
3154 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3156 if (id == st->codec->codec_id)
3159 if (avctag->id == st->codec->codec_id)
3164 if (id != CODEC_ID_NONE)
3166 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3171 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3175 AVDictionary *tmp = NULL;
3178 av_dict_copy(&tmp, *options, 0);
3179 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3181 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3182 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3185 // some sanity checks
3186 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3187 av_log(s, AV_LOG_ERROR, "no streams\n");
3188 ret = AVERROR(EINVAL);
3192 for(i=0;i<s->nb_streams;i++) {
3195 switch (st->codec->codec_type) {
3196 case AVMEDIA_TYPE_AUDIO:
3197 if(st->codec->sample_rate<=0){
3198 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3199 ret = AVERROR(EINVAL);
3202 if(!st->codec->block_align)
3203 st->codec->block_align = st->codec->channels *
3204 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3206 case AVMEDIA_TYPE_VIDEO:
3207 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3208 av_log(s, AV_LOG_ERROR, "time base not set\n");
3209 ret = AVERROR(EINVAL);
3212 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3213 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3214 ret = AVERROR(EINVAL);
3217 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3218 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3220 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3221 "(%d/%d) and encoder layer (%d/%d)\n",
3222 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3223 st->codec->sample_aspect_ratio.num,
3224 st->codec->sample_aspect_ratio.den);
3225 ret = AVERROR(EINVAL);
3231 if(s->oformat->codec_tag){
3232 if( st->codec->codec_tag
3233 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3234 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3235 && !validate_codec_tag(s, st)){
3236 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3237 st->codec->codec_tag= 0;
3239 if(st->codec->codec_tag){
3240 if (!validate_codec_tag(s, st)) {
3241 char tagbuf[32], cortag[32];
3242 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3243 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3244 av_log(s, AV_LOG_ERROR,
3245 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3246 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3247 ret = AVERROR_INVALIDDATA;
3251 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3254 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3255 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3256 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3259 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3260 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3261 if (!s->priv_data) {
3262 ret = AVERROR(ENOMEM);
3265 if (s->oformat->priv_class) {
3266 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3267 av_opt_set_defaults(s->priv_data);
3268 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3273 /* set muxer identification string */
3274 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3275 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3278 if(s->oformat->write_header){
3279 ret = s->oformat->write_header(s);
3284 /* init PTS generation */
3285 for(i=0;i<s->nb_streams;i++) {
3286 int64_t den = AV_NOPTS_VALUE;
3289 switch (st->codec->codec_type) {
3290 case AVMEDIA_TYPE_AUDIO:
3291 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3293 case AVMEDIA_TYPE_VIDEO:
3294 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3299 if (den != AV_NOPTS_VALUE) {
3301 ret = AVERROR_INVALIDDATA;
3304 frac_init(&st->pts, 0, 0, den);
3309 av_dict_free(options);
3318 //FIXME merge with compute_pkt_fields
3319 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3320 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3321 int num, den, frame_size, i;
3323 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3324 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3326 /* duration field */
3327 if (pkt->duration == 0) {
3328 compute_frame_duration(&num, &den, st, NULL, pkt);
3330 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3334 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3337 //XXX/FIXME this is a temporary hack until all encoders output pts
3338 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3341 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3345 // pkt->pts= st->cur_dts;
3346 pkt->pts= st->pts.val;
3349 //calculate dts from pts
3350 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3351 st->pts_buffer[0]= pkt->pts;
3352 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3353 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3354 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3355 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3357 pkt->dts= st->pts_buffer[0];
3360 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3361 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3362 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3363 av_log(s, AV_LOG_ERROR,
3364 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3365 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3366 return AVERROR(EINVAL);
3368 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3369 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3370 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3371 return AVERROR(EINVAL);
3374 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3375 st->cur_dts= pkt->dts;
3376 st->pts.val= pkt->dts;
3379 switch (st->codec->codec_type) {
3380 case AVMEDIA_TYPE_AUDIO:
3381 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3383 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3384 likely equal to the encoder delay, but it would be better if we
3385 had the real timestamps from the encoder */
3386 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3387 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3390 case AVMEDIA_TYPE_VIDEO:
3391 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3399 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3404 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3405 return s->oformat->write_packet(s, pkt);
3409 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3411 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3414 ret= s->oformat->write_packet(s, pkt);
3417 s->streams[pkt->stream_index]->nb_frames++;
3421 #define CHUNK_START 0x1000
3423 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3424 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3426 AVPacketList **next_point, *this_pktl;
3427 AVStream *st= s->streams[pkt->stream_index];
3428 int chunked= s->max_chunk_size || s->max_chunk_duration;
3430 this_pktl = av_mallocz(sizeof(AVPacketList));
3432 return AVERROR(ENOMEM);
3433 this_pktl->pkt= *pkt;
3434 pkt->destruct= NULL; // do not free original but only the copy
3435 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3437 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3438 next_point = &(st->last_in_packet_buffer->next);
3440 next_point = &s->packet_buffer;
3445 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3446 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3447 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3448 st->interleaver_chunk_size += pkt->size;
3449 st->interleaver_chunk_duration += pkt->duration;
3452 st->interleaver_chunk_size =
3453 st->interleaver_chunk_duration = 0;
3454 this_pktl->pkt.flags |= CHUNK_START;
3458 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3460 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3461 || !compare(s, &(*next_point)->pkt, pkt))){
3462 next_point= &(*next_point)->next;
3467 next_point = &(s->packet_buffer_end->next);
3470 assert(!*next_point);
3472 s->packet_buffer_end= this_pktl;
3475 this_pktl->next= *next_point;
3477 s->streams[pkt->stream_index]->last_in_packet_buffer=
3478 *next_point= this_pktl;
3482 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3484 AVStream *st = s->streams[ pkt ->stream_index];
3485 AVStream *st2= s->streams[ next->stream_index];
3486 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3488 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3489 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3490 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3492 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3493 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3496 comp= (ts>ts2) - (ts<ts2);
3500 return pkt->stream_index < next->stream_index;
3504 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3505 AVPacket *pkt, int flush)
3508 int stream_count=0, noninterleaved_count=0;
3509 int64_t delta_dts_max = 0;
3513 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3518 for(i=0; i < s->nb_streams; i++) {
3519 if (s->streams[i]->last_in_packet_buffer) {
3521 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3522 ++noninterleaved_count;
3526 if (s->nb_streams == stream_count) {
3529 for(i=0; i < s->nb_streams; i++) {
3530 if (s->streams[i]->last_in_packet_buffer) {
3532 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3533 s->streams[i]->time_base,
3535 av_rescale_q(s->packet_buffer->pkt.dts,
3536 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3538 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3541 if(s->nb_streams == stream_count+noninterleaved_count &&
3542 delta_dts_max > 20*AV_TIME_BASE) {
3543 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3547 if(stream_count && flush){
3548 pktl= s->packet_buffer;
3551 s->packet_buffer= pktl->next;
3552 if(!s->packet_buffer)
3553 s->packet_buffer_end= NULL;
3555 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3556 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3560 av_init_packet(out);
3565 #if FF_API_INTERLEAVE_PACKET
3566 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3567 AVPacket *pkt, int flush)
3569 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3574 * Interleave an AVPacket correctly so it can be muxed.
3575 * @param out the interleaved packet will be output here
3576 * @param in the input packet
3577 * @param flush 1 if no further packets are available as input and all
3578 * remaining packets should be output
3579 * @return 1 if a packet was output, 0 if no packet could be output,
3580 * < 0 if an error occurred
3582 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3583 if (s->oformat->interleave_packet) {
3584 int ret = s->oformat->interleave_packet(s, out, in, flush);
3589 return ff_interleave_packet_per_dts(s, out, in, flush);
3592 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3596 AVStream *st= s->streams[ pkt->stream_index];
3598 //FIXME/XXX/HACK drop zero sized packets
3599 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3602 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3603 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3604 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3607 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3608 return AVERROR(EINVAL);
3610 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3616 int ret= interleave_packet(s, &opkt, pkt, flush);
3617 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3620 ret= s->oformat->write_packet(s, &opkt);
3622 s->streams[opkt.stream_index]->nb_frames++;
3624 av_free_packet(&opkt);
3629 if(s->pb && s->pb->error)
3630 return s->pb->error;
3634 int av_write_trailer(AVFormatContext *s)
3640 ret= interleave_packet(s, &pkt, NULL, 1);
3641 if(ret<0) //FIXME cleanup needed for ret<0 ?
3646 ret= s->oformat->write_packet(s, &pkt);
3648 s->streams[pkt.stream_index]->nb_frames++;
3650 av_free_packet(&pkt);
3654 if(s->pb && s->pb->error)
3658 if(s->oformat->write_trailer)
3659 ret = s->oformat->write_trailer(s);
3664 ret = s->pb ? s->pb->error : 0;
3665 for(i=0;i<s->nb_streams;i++) {
3666 av_freep(&s->streams[i]->priv_data);
3667 av_freep(&s->streams[i]->index_entries);
3669 if (s->oformat->priv_class)
3670 av_opt_free(s->priv_data);
3671 av_freep(&s->priv_data);
3675 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3676 int64_t *dts, int64_t *wall)
3678 if (!s->oformat || !s->oformat->get_output_timestamp)
3679 return AVERROR(ENOSYS);
3680 s->oformat->get_output_timestamp(s, stream, dts, wall);
3684 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3687 AVProgram *program=NULL;
3690 if (idx >= ac->nb_streams) {
3691 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3695 for(i=0; i<ac->nb_programs; i++){
3696 if(ac->programs[i]->id != progid)
3698 program = ac->programs[i];
3699 for(j=0; j<program->nb_stream_indexes; j++)
3700 if(program->stream_index[j] == idx)
3703 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3706 program->stream_index = tmp;
3707 program->stream_index[program->nb_stream_indexes++] = idx;
3712 static void print_fps(double d, const char *postfix){
3713 uint64_t v= lrintf(d*100);
3714 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3715 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3716 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3719 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3721 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3722 AVDictionaryEntry *tag=NULL;
3724 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3725 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3726 if(strcmp("language", tag->key)){
3727 const char *p = tag->value;
3728 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3731 size_t len = strcspn(p, "\xd\xa");
3732 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3733 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3735 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3736 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3739 av_log(ctx, AV_LOG_INFO, "\n");
3745 /* "user interface" functions */
3746 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3749 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3750 AVStream *st = ic->streams[i];
3751 int g = av_gcd(st->time_base.num, st->time_base.den);
3752 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3753 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3754 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3755 /* the pid is an important information, so we display it */
3756 /* XXX: add a generic system */
3757 if (flags & AVFMT_SHOW_IDS)
3758 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3760 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3761 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3762 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3763 if (st->sample_aspect_ratio.num && // default
3764 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3765 AVRational display_aspect_ratio;
3766 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3767 st->codec->width*st->sample_aspect_ratio.num,
3768 st->codec->height*st->sample_aspect_ratio.den,
3770 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3771 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3772 display_aspect_ratio.num, display_aspect_ratio.den);
3774 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3775 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3776 print_fps(av_q2d(st->avg_frame_rate), "fps");
3777 if(st->r_frame_rate.den && st->r_frame_rate.num)
3778 print_fps(av_q2d(st->r_frame_rate), "tbr");
3779 if(st->time_base.den && st->time_base.num)
3780 print_fps(1/av_q2d(st->time_base), "tbn");
3781 if(st->codec->time_base.den && st->codec->time_base.num)
3782 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3784 if (st->disposition & AV_DISPOSITION_DEFAULT)
3785 av_log(NULL, AV_LOG_INFO, " (default)");
3786 if (st->disposition & AV_DISPOSITION_DUB)
3787 av_log(NULL, AV_LOG_INFO, " (dub)");
3788 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3789 av_log(NULL, AV_LOG_INFO, " (original)");
3790 if (st->disposition & AV_DISPOSITION_COMMENT)
3791 av_log(NULL, AV_LOG_INFO, " (comment)");
3792 if (st->disposition & AV_DISPOSITION_LYRICS)
3793 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3794 if (st->disposition & AV_DISPOSITION_KARAOKE)
3795 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3796 if (st->disposition & AV_DISPOSITION_FORCED)
3797 av_log(NULL, AV_LOG_INFO, " (forced)");
3798 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3799 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3800 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3801 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3802 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3803 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3804 av_log(NULL, AV_LOG_INFO, "\n");
3805 dump_metadata(NULL, st->metadata, " ");
3808 void av_dump_format(AVFormatContext *ic,
3814 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3815 if (ic->nb_streams && !printed)
3818 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3819 is_output ? "Output" : "Input",
3821 is_output ? ic->oformat->name : ic->iformat->name,
3822 is_output ? "to" : "from", url);
3823 dump_metadata(NULL, ic->metadata, " ");
3825 av_log(NULL, AV_LOG_INFO, " Duration: ");
3826 if (ic->duration != AV_NOPTS_VALUE) {
3827 int hours, mins, secs, us;
3828 secs = ic->duration / AV_TIME_BASE;
3829 us = ic->duration % AV_TIME_BASE;
3834 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3835 (100 * us) / AV_TIME_BASE);
3837 av_log(NULL, AV_LOG_INFO, "N/A");
3839 if (ic->start_time != AV_NOPTS_VALUE) {
3841 av_log(NULL, AV_LOG_INFO, ", start: ");
3842 secs = ic->start_time / AV_TIME_BASE;
3843 us = abs(ic->start_time % AV_TIME_BASE);
3844 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3845 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3847 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3849 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3851 av_log(NULL, AV_LOG_INFO, "N/A");
3853 av_log(NULL, AV_LOG_INFO, "\n");
3855 for (i = 0; i < ic->nb_chapters; i++) {
3856 AVChapter *ch = ic->chapters[i];
3857 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3858 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3859 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3861 dump_metadata(NULL, ch->metadata, " ");
3863 if(ic->nb_programs) {
3864 int j, k, total = 0;
3865 for(j=0; j<ic->nb_programs; j++) {
3866 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3868 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3869 name ? name->value : "");
3870 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3871 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3872 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3873 printed[ic->programs[j]->stream_index[k]] = 1;
3875 total += ic->programs[j]->nb_stream_indexes;
3877 if (total < ic->nb_streams)
3878 av_log(NULL, AV_LOG_INFO, " No Program\n");
3880 for(i=0;i<ic->nb_streams;i++)
3882 dump_stream_format(ic, i, index, is_output);
3887 #if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
3888 FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
3890 return av_gettime();
3894 uint64_t ff_ntp_time(void)
3896 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3899 int av_get_frame_filename(char *buf, int buf_size,
3900 const char *path, int number)
3903 char *q, buf1[20], c;
3904 int nd, len, percentd_found;
3916 while (isdigit(*p)) {
3917 nd = nd * 10 + *p++ - '0';
3920 } while (isdigit(c));
3929 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3931 if ((q - buf + len) > buf_size - 1)
3933 memcpy(q, buf1, len);
3941 if ((q - buf) < buf_size - 1)
3945 if (!percentd_found)
3954 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3958 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3960 for(i=0;i<size;i+=16) {
3967 PRINT(" %02x", buf[i+j]);
3972 for(j=0;j<len;j++) {
3974 if (c < ' ' || c > '~')
3983 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3985 hex_dump_internal(NULL, f, 0, buf, size);
3988 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3990 hex_dump_internal(avcl, NULL, level, buf, size);
3993 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3996 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3997 PRINT("stream #%d:\n", pkt->stream_index);
3998 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3999 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
4000 /* DTS is _always_ valid after av_read_frame() */
4002 if (pkt->dts == AV_NOPTS_VALUE)
4005 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
4006 /* PTS may not be known if B-frames are present. */
4008 if (pkt->pts == AV_NOPTS_VALUE)
4011 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
4013 PRINT(" size=%d\n", pkt->size);
4016 av_hex_dump(f, pkt->data, pkt->size);
4020 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
4022 AVRational tb = { 1, AV_TIME_BASE };
4023 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
4027 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
4029 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
4033 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
4035 AVRational tb = { 1, AV_TIME_BASE };
4036 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4040 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4043 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4046 void av_url_split(char *proto, int proto_size,
4047 char *authorization, int authorization_size,
4048 char *hostname, int hostname_size,
4050 char *path, int path_size,
4053 const char *p, *ls, *at, *col, *brk;
4055 if (port_ptr) *port_ptr = -1;
4056 if (proto_size > 0) proto[0] = 0;
4057 if (authorization_size > 0) authorization[0] = 0;
4058 if (hostname_size > 0) hostname[0] = 0;
4059 if (path_size > 0) path[0] = 0;
4061 /* parse protocol */
4062 if ((p = strchr(url, ':'))) {
4063 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4068 /* no protocol means plain filename */
4069 av_strlcpy(path, url, path_size);
4073 /* separate path from hostname */
4074 ls = strchr(p, '/');
4076 ls = strchr(p, '?');
4078 av_strlcpy(path, ls, path_size);
4080 ls = &p[strlen(p)]; // XXX
4082 /* the rest is hostname, use that to parse auth/port */
4084 /* authorization (user[:pass]@hostname) */
4085 if ((at = strchr(p, '@')) && at < ls) {
4086 av_strlcpy(authorization, p,
4087 FFMIN(authorization_size, at + 1 - p));
4088 p = at + 1; /* skip '@' */
4091 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4093 av_strlcpy(hostname, p + 1,
4094 FFMIN(hostname_size, brk - p));
4095 if (brk[1] == ':' && port_ptr)
4096 *port_ptr = atoi(brk + 2);
4097 } else if ((col = strchr(p, ':')) && col < ls) {
4098 av_strlcpy(hostname, p,
4099 FFMIN(col + 1 - p, hostname_size));
4100 if (port_ptr) *port_ptr = atoi(col + 1);
4102 av_strlcpy(hostname, p,
4103 FFMIN(ls + 1 - p, hostname_size));
4107 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4110 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4113 'C', 'D', 'E', 'F' };
4114 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4117 'c', 'd', 'e', 'f' };
4118 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4120 for(i = 0; i < s; i++) {
4121 buff[i * 2] = hex_table[src[i] >> 4];
4122 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4128 int ff_hex_to_data(uint8_t *data, const char *p)
4135 p += strspn(p, SPACE_CHARS);
4138 c = toupper((unsigned char) *p++);
4139 if (c >= '0' && c <= '9')
4141 else if (c >= 'A' && c <= 'F')
4156 #if FF_API_SET_PTS_INFO
4157 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4158 unsigned int pts_num, unsigned int pts_den)
4160 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4164 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4165 unsigned int pts_num, unsigned int pts_den)
4168 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4169 if(new_tb.num != pts_num)
4170 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4172 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4174 if(new_tb.num <= 0 || new_tb.den <= 0) {
4175 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4178 s->time_base = new_tb;
4179 s->pts_wrap_bits = pts_wrap_bits;
4182 int ff_url_join(char *str, int size, const char *proto,
4183 const char *authorization, const char *hostname,
4184 int port, const char *fmt, ...)
4187 struct addrinfo hints = { 0 }, *ai;
4192 av_strlcatf(str, size, "%s://", proto);
4193 if (authorization && authorization[0])
4194 av_strlcatf(str, size, "%s@", authorization);
4195 #if CONFIG_NETWORK && defined(AF_INET6)
4196 /* Determine if hostname is a numerical IPv6 address,
4197 * properly escape it within [] in that case. */
4198 hints.ai_flags = AI_NUMERICHOST;
4199 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4200 if (ai->ai_family == AF_INET6) {
4201 av_strlcat(str, "[", size);
4202 av_strlcat(str, hostname, size);
4203 av_strlcat(str, "]", size);
4205 av_strlcat(str, hostname, size);
4210 /* Not an IPv6 address, just output the plain string. */
4211 av_strlcat(str, hostname, size);
4214 av_strlcatf(str, size, ":%d", port);
4217 int len = strlen(str);
4220 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4226 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4227 AVFormatContext *src)
4232 local_pkt.stream_index = dst_stream;
4233 if (pkt->pts != AV_NOPTS_VALUE)
4234 local_pkt.pts = av_rescale_q(pkt->pts,
4235 src->streams[pkt->stream_index]->time_base,
4236 dst->streams[dst_stream]->time_base);
4237 if (pkt->dts != AV_NOPTS_VALUE)
4238 local_pkt.dts = av_rescale_q(pkt->dts,
4239 src->streams[pkt->stream_index]->time_base,
4240 dst->streams[dst_stream]->time_base);
4241 return av_write_frame(dst, &local_pkt);
4244 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4247 const char *ptr = str;
4249 /* Parse key=value pairs. */
4252 char *dest = NULL, *dest_end;
4253 int key_len, dest_len = 0;
4255 /* Skip whitespace and potential commas. */
4256 while (*ptr && (isspace(*ptr) || *ptr == ','))
4263 if (!(ptr = strchr(key, '=')))
4266 key_len = ptr - key;
4268 callback_get_buf(context, key, key_len, &dest, &dest_len);
4269 dest_end = dest + dest_len - 1;
4273 while (*ptr && *ptr != '\"') {
4277 if (dest && dest < dest_end)
4281 if (dest && dest < dest_end)
4289 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4290 if (dest && dest < dest_end)
4298 int ff_find_stream_index(AVFormatContext *s, int id)
4301 for (i = 0; i < s->nb_streams; i++) {
4302 if (s->streams[i]->id == id)
4308 void ff_make_absolute_url(char *buf, int size, const char *base,
4312 /* Absolute path, relative to the current server */
4313 if (base && strstr(base, "://") && rel[0] == '/') {
4315 av_strlcpy(buf, base, size);
4316 sep = strstr(buf, "://");
4319 sep = strchr(sep, '/');
4323 av_strlcat(buf, rel, size);
4326 /* If rel actually is an absolute url, just copy it */
4327 if (!base || strstr(rel, "://") || rel[0] == '/') {
4328 av_strlcpy(buf, rel, size);
4332 av_strlcpy(buf, base, size);
4333 /* Remove the file name from the base url */
4334 sep = strrchr(buf, '/');
4339 while (av_strstart(rel, "../", NULL) && sep) {
4340 /* Remove the path delimiter at the end */
4342 sep = strrchr(buf, '/');
4343 /* If the next directory name to pop off is "..", break here */
4344 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4345 /* Readd the slash we just removed */
4346 av_strlcat(buf, "/", size);
4349 /* Cut off the directory name */
4356 av_strlcat(buf, rel, size);
4359 int64_t ff_iso8601_to_unix_time(const char *datestr)
4362 struct tm time1 = {0}, time2 = {0};
4364 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4365 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4367 return av_timegm(&time2);
4369 return av_timegm(&time1);
4371 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4372 "the date string.\n");
4377 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4380 if (ofmt->query_codec)
4381 return ofmt->query_codec(codec_id, std_compliance);
4382 else if (ofmt->codec_tag)
4383 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4384 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4385 codec_id == ofmt->subtitle_codec)
4388 return AVERROR_PATCHWELCOME;
4391 int avformat_network_init(void)
4395 ff_network_inited_globally = 1;
4396 if ((ret = ff_network_init()) < 0)
4403 int avformat_network_deinit(void)
4412 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4413 uint64_t channel_layout, int32_t sample_rate,
4414 int32_t width, int32_t height)
4420 return AVERROR(EINVAL);
4423 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4425 if (channel_layout) {
4427 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4431 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4433 if (width || height) {
4435 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4437 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4439 return AVERROR(ENOMEM);
4440 bytestream_put_le32(&data, flags);
4442 bytestream_put_le32(&data, channels);
4444 bytestream_put_le64(&data, channel_layout);
4446 bytestream_put_le32(&data, sample_rate);
4447 if (width || height) {
4448 bytestream_put_le32(&data, width);
4449 bytestream_put_le32(&data, height);
4454 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4456 return ff_codec_bmp_tags;
4458 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4460 return ff_codec_wav_tags;
4463 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4465 AVRational undef = {0, 1};
4466 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4467 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4468 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4470 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4471 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4472 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4473 stream_sample_aspect_ratio = undef;
4475 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4476 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4477 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4478 frame_sample_aspect_ratio = undef;
4480 if (stream_sample_aspect_ratio.num)
4481 return stream_sample_aspect_ratio;
4483 return frame_sample_aspect_ratio;