2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
56 * various utility functions for use within FFmpeg
59 unsigned avformat_version(void)
61 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
62 return LIBAVFORMAT_VERSION_INT;
65 const char *avformat_configuration(void)
67 return FFMPEG_CONFIGURATION;
70 const char *avformat_license(void)
72 #define LICENSE_PREFIX "libavformat license: "
73 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
76 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
78 static int is_relative(int64_t ts) {
79 return ts > (RELATIVE_TS_BASE - (1LL<<48));
82 /* fraction handling */
85 * f = val + (num / den) + 0.5.
87 * 'num' is normalized so that it is such as 0 <= num < den.
89 * @param f fractional number
90 * @param val integer value
91 * @param num must be >= 0
92 * @param den must be >= 1
94 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
107 * Fractional addition to f: f = f + (incr / f->den).
109 * @param f fractional number
110 * @param incr increment, can be positive or negative
112 static void frac_add(AVFrac *f, int64_t incr)
125 } else if (num >= den) {
132 /** head of registered input format linked list */
133 static AVInputFormat *first_iformat = NULL;
134 /** head of registered output format linked list */
135 static AVOutputFormat *first_oformat = NULL;
137 AVInputFormat *av_iformat_next(AVInputFormat *f)
139 if(f) return f->next;
140 else return first_iformat;
143 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
145 if(f) return f->next;
146 else return first_oformat;
149 void av_register_input_format(AVInputFormat *format)
153 while (*p != NULL) p = &(*p)->next;
158 void av_register_output_format(AVOutputFormat *format)
162 while (*p != NULL) p = &(*p)->next;
167 int av_match_ext(const char *filename, const char *extensions)
175 ext = strrchr(filename, '.');
181 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
184 if (!av_strcasecmp(ext1, ext))
194 static int match_format(const char *name, const char *names)
202 namelen = strlen(name);
203 while ((p = strchr(names, ','))) {
204 len = FFMAX(p - names, namelen);
205 if (!av_strncasecmp(name, names, len))
209 return !av_strcasecmp(name, names);
212 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
213 const char *mime_type)
215 AVOutputFormat *fmt = NULL, *fmt_found;
216 int score_max, score;
218 /* specific test for image sequences */
219 #if CONFIG_IMAGE2_MUXER
220 if (!short_name && filename &&
221 av_filename_number_test(filename) &&
222 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
223 return av_guess_format("image2", NULL, NULL);
226 /* Find the proper file type. */
229 while ((fmt = av_oformat_next(fmt))) {
231 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
233 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
235 if (filename && fmt->extensions &&
236 av_match_ext(filename, fmt->extensions)) {
239 if (score > score_max) {
247 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
248 const char *filename, const char *mime_type, enum AVMediaType type){
249 if(type == AVMEDIA_TYPE_VIDEO){
250 enum CodecID codec_id= CODEC_ID_NONE;
252 #if CONFIG_IMAGE2_MUXER
253 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
254 codec_id= ff_guess_image2_codec(filename);
257 if(codec_id == CODEC_ID_NONE)
258 codec_id= fmt->video_codec;
260 }else if(type == AVMEDIA_TYPE_AUDIO)
261 return fmt->audio_codec;
262 else if (type == AVMEDIA_TYPE_SUBTITLE)
263 return fmt->subtitle_codec;
265 return CODEC_ID_NONE;
268 AVInputFormat *av_find_input_format(const char *short_name)
270 AVInputFormat *fmt = NULL;
271 while ((fmt = av_iformat_next(fmt))) {
272 if (match_format(short_name, fmt->name))
278 int ffio_limit(AVIOContext *s, int size)
281 int64_t remaining= s->maxsize - avio_tell(s);
282 if(remaining < size){
283 int64_t newsize= avio_size(s);
284 if(!s->maxsize || s->maxsize<newsize)
285 s->maxsize= newsize - !newsize;
286 remaining= s->maxsize - avio_tell(s);
287 remaining= FFMAX(remaining, 0);
290 if(s->maxsize>=0 && remaining+1 < size){
291 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
298 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
301 int orig_size = size;
302 size= ffio_limit(s, size);
304 ret= av_new_packet(pkt, size);
309 pkt->pos= avio_tell(s);
311 ret= avio_read(s, pkt->data, size);
315 av_shrink_packet(pkt, ret);
316 if (pkt->size < orig_size)
317 pkt->flags |= AV_PKT_FLAG_CORRUPT;
322 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
327 return av_get_packet(s, pkt, size);
328 old_size = pkt->size;
329 ret = av_grow_packet(pkt, size);
332 ret = avio_read(s, pkt->data + old_size, size);
333 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
338 int av_filename_number_test(const char *filename)
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
344 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
346 AVProbeData lpd = *pd;
347 AVInputFormat *fmt1 = NULL, *fmt;
348 int score, nodat = 0, score_max=0;
350 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
351 int id3len = ff_id3v2_tag_len(lpd.buf);
352 if (lpd.buf_size > id3len + 16) {
354 lpd.buf_size -= id3len;
360 while ((fmt1 = av_iformat_next(fmt1))) {
361 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
364 if (fmt1->read_probe) {
365 score = fmt1->read_probe(&lpd);
366 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
367 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
368 } else if (fmt1->extensions) {
369 if (av_match_ext(lpd.filename, fmt1->extensions)) {
373 if (score > score_max) {
376 }else if (score == score_max)
379 *score_ret= score_max;
384 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
387 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
388 if(score_ret > *score_max){
389 *score_max= score_ret;
395 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
397 return av_probe_input_format2(pd, is_opened, &score);
400 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
402 static const struct {
403 const char *name; enum CodecID id; enum AVMediaType type;
405 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
406 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
407 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
408 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
409 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
410 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
411 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
412 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
413 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
417 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
421 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
422 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
423 for (i = 0; fmt_id_type[i].name; i++) {
424 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
425 st->codec->codec_id = fmt_id_type[i].id;
426 st->codec->codec_type = fmt_id_type[i].type;
434 /************************************************************/
435 /* input media file */
437 int av_demuxer_open(AVFormatContext *ic){
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic);
446 if (ic->pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
453 /** size of probe buffer, for guessing file type from file contents */
454 #define PROBE_BUF_MIN 2048
455 #define PROBE_BUF_MAX (1<<20)
457 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
458 const char *filename, void *logctx,
459 unsigned int offset, unsigned int max_probe_size)
461 AVProbeData pd = { filename ? filename : "", NULL, -offset };
462 unsigned char *buf = NULL;
463 int ret = 0, probe_size;
465 if (!max_probe_size) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size > PROBE_BUF_MAX) {
468 max_probe_size = PROBE_BUF_MAX;
469 } else if (max_probe_size < PROBE_BUF_MIN) {
470 return AVERROR(EINVAL);
473 if (offset >= max_probe_size) {
474 return AVERROR(EINVAL);
477 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
478 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
479 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
480 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
483 if (probe_size < offset) {
487 /* read probe data */
488 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
491 return AVERROR(ENOMEM);
494 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
495 /* fail if error was not end of file, otherwise, lower score */
496 if (ret != AVERROR_EOF) {
501 ret = 0; /* error was end of file, nothing read */
504 pd.buf = &buf[offset];
506 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
508 /* guess file format */
509 *fmt = av_probe_input_format2(&pd, 1, &score);
511 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
512 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
514 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
520 return AVERROR_INVALIDDATA;
523 /* rewind. reuse probe buffer to avoid seeking */
524 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
530 /* open input file and probe the format if necessary */
531 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
534 AVProbeData pd = {filename, NULL, 0};
537 s->flags |= AVFMT_FLAG_CUSTOM_IO;
539 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
540 else if (s->iformat->flags & AVFMT_NOFILE)
541 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
542 "will be ignored with AVFMT_NOFILE format.\n");
546 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
547 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
550 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
551 &s->interrupt_callback, options)) < 0)
555 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
558 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
559 AVPacketList **plast_pktl){
560 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
565 (*plast_pktl)->next = pktl;
567 *packet_buffer = pktl;
569 /* add the packet in the buffered packet list */
575 static void queue_attached_pictures(AVFormatContext *s)
578 for (i = 0; i < s->nb_streams; i++)
579 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
580 s->streams[i]->discard < AVDISCARD_ALL) {
581 AVPacket copy = s->streams[i]->attached_pic;
582 copy.destruct = NULL;
583 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
587 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
589 AVFormatContext *s = *ps;
591 AVDictionary *tmp = NULL;
592 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
594 if (!s && !(s = avformat_alloc_context()))
595 return AVERROR(ENOMEM);
597 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
598 return AVERROR(EINVAL);
604 av_dict_copy(&tmp, *options, 0);
606 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
609 if ((ret = init_input(s, filename, &tmp)) < 0)
612 /* check filename in case an image number is expected */
613 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
614 if (!av_filename_number_test(filename)) {
615 ret = AVERROR(EINVAL);
620 s->duration = s->start_time = AV_NOPTS_VALUE;
621 av_strlcpy(s->filename, filename, sizeof(s->filename));
623 /* allocate private data */
624 if (s->iformat->priv_data_size > 0) {
625 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
626 ret = AVERROR(ENOMEM);
629 if (s->iformat->priv_class) {
630 *(const AVClass**)s->priv_data = s->iformat->priv_class;
631 av_opt_set_defaults(s->priv_data);
632 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
637 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
639 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
641 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
642 if ((ret = s->iformat->read_header(s)) < 0)
645 if (id3v2_extra_meta &&
646 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
648 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
650 queue_attached_pictures(s);
652 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
653 s->data_offset = avio_tell(s->pb);
655 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
658 av_dict_free(options);
665 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
667 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
669 avformat_free_context(s);
674 /*******************************************************/
676 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
682 AVPacketList *pktl = s->raw_packet_buffer;
686 if(s->streams[pkt->stream_index]->request_probe <= 0){
687 s->raw_packet_buffer = pktl->next;
688 s->raw_packet_buffer_remaining_size += pkt->size;
695 ret= s->iformat->read_packet(s, pkt);
697 if (!pktl || ret == AVERROR(EAGAIN))
699 for (i = 0; i < s->nb_streams; i++)
700 if(s->streams[i]->request_probe > 0)
701 s->streams[i]->request_probe = -1;
705 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
706 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
707 av_log(s, AV_LOG_WARNING,
708 "Dropped corrupted packet (stream = %d)\n",
714 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
715 av_packet_merge_side_data(pkt);
717 if(pkt->stream_index >= (unsigned)s->nb_streams){
718 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
722 st= s->streams[pkt->stream_index];
724 switch(st->codec->codec_type){
725 case AVMEDIA_TYPE_VIDEO:
726 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
728 case AVMEDIA_TYPE_AUDIO:
729 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
731 case AVMEDIA_TYPE_SUBTITLE:
732 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
736 if(!pktl && st->request_probe <= 0)
739 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
740 s->raw_packet_buffer_remaining_size -= pkt->size;
742 if(st->request_probe>0){
743 AVProbeData *pd = &st->probe_data;
745 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
748 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
749 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
750 pd->buf_size += pkt->size;
751 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
753 end= s->raw_packet_buffer_remaining_size <= 0
754 || st->probe_packets<=0;
756 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
757 int score= set_codec_from_probe_data(s, st, pd);
758 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
762 st->request_probe= -1;
763 if(st->codec->codec_id != CODEC_ID_NONE){
764 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
766 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
773 #if FF_API_READ_PACKET
774 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
776 return ff_read_packet(s, pkt);
781 /**********************************************************/
783 static int determinable_frame_size(AVCodecContext *avctx)
785 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
786 avctx->codec_id == CODEC_ID_MP1 ||
787 avctx->codec_id == CODEC_ID_MP2 ||
788 avctx->codec_id == CODEC_ID_MP3/* ||
789 avctx->codec_id == CODEC_ID_CELT*/)
795 * Get the number of samples of an audio frame. Return -1 on error.
797 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
801 /* give frame_size priority if demuxing */
802 if (!mux && enc->frame_size > 1)
803 return enc->frame_size;
805 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
808 /* fallback to using frame_size if muxing */
809 if (enc->frame_size > 1)
810 return enc->frame_size;
817 * Return the frame duration in seconds. Return 0 if not available.
819 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
820 AVCodecParserContext *pc, AVPacket *pkt)
826 switch(st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 if (st->r_frame_rate.num && !pc) {
829 *pnum = st->r_frame_rate.den;
830 *pden = st->r_frame_rate.num;
831 } else if(st->time_base.num*1000LL > st->time_base.den) {
832 *pnum = st->time_base.num;
833 *pden = st->time_base.den;
834 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
835 *pnum = st->codec->time_base.num;
836 *pden = st->codec->time_base.den;
837 if (pc && pc->repeat_pict) {
838 *pnum = (*pnum) * (1 + pc->repeat_pict);
840 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
841 //Thus if we have no parser in such case leave duration undefined.
842 if(st->codec->ticks_per_frame>1 && !pc){
847 case AVMEDIA_TYPE_AUDIO:
848 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
849 if (frame_size <= 0 || st->codec->sample_rate <= 0)
852 *pden = st->codec->sample_rate;
859 static int is_intra_only(AVCodecContext *enc){
860 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
862 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
863 switch(enc->codec_id){
865 case CODEC_ID_MJPEGB:
867 case CODEC_ID_PRORES:
868 case CODEC_ID_RAWVIDEO:
870 case CODEC_ID_DVVIDEO:
871 case CODEC_ID_HUFFYUV:
872 case CODEC_ID_FFVHUFF:
877 case CODEC_ID_JPEG2000:
878 case CODEC_ID_UTVIDEO:
886 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
890 if (pktl == s->parse_queue_end)
891 return s->packet_buffer;
895 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
896 int64_t dts, int64_t pts)
898 AVStream *st= s->streams[stream_index];
899 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
901 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
904 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
907 if (is_relative(pts))
908 pts += st->first_dts - RELATIVE_TS_BASE;
910 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
911 if(pktl->pkt.stream_index != stream_index)
913 if(is_relative(pktl->pkt.pts))
914 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
916 if(is_relative(pktl->pkt.dts))
917 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
919 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
920 st->start_time= pktl->pkt.pts;
922 if (st->start_time == AV_NOPTS_VALUE)
923 st->start_time = pts;
926 static void update_initial_durations(AVFormatContext *s, AVStream *st,
927 int stream_index, int duration)
929 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
930 int64_t cur_dts= RELATIVE_TS_BASE;
932 if(st->first_dts != AV_NOPTS_VALUE){
933 cur_dts= st->first_dts;
934 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
935 if(pktl->pkt.stream_index == stream_index){
936 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
941 if(pktl && pktl->pkt.dts != st->first_dts) {
942 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
946 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
949 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
950 st->first_dts = cur_dts;
951 }else if(st->cur_dts != RELATIVE_TS_BASE)
954 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
955 if(pktl->pkt.stream_index != stream_index)
957 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
958 && !pktl->pkt.duration){
959 pktl->pkt.dts= cur_dts;
960 if(!st->codec->has_b_frames)
961 pktl->pkt.pts= cur_dts;
962 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
963 pktl->pkt.duration = duration;
966 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
969 st->cur_dts= cur_dts;
972 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
973 AVCodecParserContext *pc, AVPacket *pkt)
975 int num, den, presentation_delayed, delay, i;
978 if (s->flags & AVFMT_FLAG_NOFILLIN)
981 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
982 pkt->dts= AV_NOPTS_VALUE;
984 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
985 //FIXME Set low_delay = 0 when has_b_frames = 1
986 st->codec->has_b_frames = 1;
988 /* do we have a video B-frame ? */
989 delay= st->codec->has_b_frames;
990 presentation_delayed = 0;
992 /* XXX: need has_b_frame, but cannot get it if the codec is
995 pc && pc->pict_type != AV_PICTURE_TYPE_B)
996 presentation_delayed = 1;
998 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
999 pkt->dts -= 1LL<<st->pts_wrap_bits;
1002 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1003 // we take the conservative approach and discard both
1004 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1005 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1006 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1007 pkt->dts= AV_NOPTS_VALUE;
1010 if (pkt->duration == 0) {
1011 compute_frame_duration(&num, &den, st, pc, pkt);
1013 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1016 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1017 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1019 /* correct timestamps with byte offset if demuxers only have timestamps
1020 on packet boundaries */
1021 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1022 /* this will estimate bitrate based on this frame's duration and size */
1023 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1024 if(pkt->pts != AV_NOPTS_VALUE)
1026 if(pkt->dts != AV_NOPTS_VALUE)
1030 if (pc && pc->dts_sync_point >= 0) {
1031 // we have synchronization info from the parser
1032 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1034 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1035 if (pkt->dts != AV_NOPTS_VALUE) {
1036 // got DTS from the stream, update reference timestamp
1037 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1038 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1039 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1040 // compute DTS based on reference timestamp
1041 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1042 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1044 if (pc->dts_sync_point > 0)
1045 st->reference_dts = pkt->dts; // new reference
1049 /* This may be redundant, but it should not hurt. */
1050 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1051 presentation_delayed = 1;
1053 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1054 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1055 /* interpolate PTS and DTS if they are not present */
1056 //We skip H264 currently because delay and has_b_frames are not reliably set
1057 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1058 if (presentation_delayed) {
1059 /* DTS = decompression timestamp */
1060 /* PTS = presentation timestamp */
1061 if (pkt->dts == AV_NOPTS_VALUE)
1062 pkt->dts = st->last_IP_pts;
1063 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1064 if (pkt->dts == AV_NOPTS_VALUE)
1065 pkt->dts = st->cur_dts;
1067 /* this is tricky: the dts must be incremented by the duration
1068 of the frame we are displaying, i.e. the last I- or P-frame */
1069 if (st->last_IP_duration == 0)
1070 st->last_IP_duration = pkt->duration;
1071 if(pkt->dts != AV_NOPTS_VALUE)
1072 st->cur_dts = pkt->dts + st->last_IP_duration;
1073 st->last_IP_duration = pkt->duration;
1074 st->last_IP_pts= pkt->pts;
1075 /* cannot compute PTS if not present (we can compute it only
1076 by knowing the future */
1077 } else if (pkt->pts != AV_NOPTS_VALUE ||
1078 pkt->dts != AV_NOPTS_VALUE ||
1080 int duration = pkt->duration;
1082 if(pkt->pts != AV_NOPTS_VALUE && duration){
1083 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1084 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1085 if( old_diff < new_diff && old_diff < (duration>>3)
1086 && (!strcmp(s->iformat->name, "mpeg") ||
1087 !strcmp(s->iformat->name, "mpegts"))){
1088 pkt->pts += duration;
1089 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1090 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1091 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1095 /* presentation is not delayed : PTS and DTS are the same */
1096 if (pkt->pts == AV_NOPTS_VALUE)
1097 pkt->pts = pkt->dts;
1098 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1100 if (pkt->pts == AV_NOPTS_VALUE)
1101 pkt->pts = st->cur_dts;
1102 pkt->dts = pkt->pts;
1103 if (pkt->pts != AV_NOPTS_VALUE)
1104 st->cur_dts = pkt->pts + duration;
1108 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1109 st->pts_buffer[0]= pkt->pts;
1110 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1111 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1112 if(pkt->dts == AV_NOPTS_VALUE)
1113 pkt->dts= st->pts_buffer[0];
1114 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1115 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1117 if(pkt->dts > st->cur_dts)
1118 st->cur_dts = pkt->dts;
1121 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1122 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1125 if(is_intra_only(st->codec))
1126 pkt->flags |= AV_PKT_FLAG_KEY;
1128 pkt->convergence_duration = pc->convergence_duration;
1131 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1134 AVPacketList *pktl = *pkt_buf;
1135 *pkt_buf = pktl->next;
1136 av_free_packet(&pktl->pkt);
1139 *pkt_buf_end = NULL;
1143 * Parse a packet, add all split parts to parse_queue
1145 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1147 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1149 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1150 AVStream *st = s->streams[stream_index];
1151 uint8_t *data = pkt ? pkt->data : NULL;
1152 int size = pkt ? pkt->size : 0;
1153 int ret = 0, got_output = 0;
1156 av_init_packet(&flush_pkt);
1159 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1160 // preserve 0-size sync packets
1161 compute_pkt_fields(s, st, st->parser, pkt);
1164 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1167 av_init_packet(&out_pkt);
1168 len = av_parser_parse2(st->parser, st->codec,
1169 &out_pkt.data, &out_pkt.size, data, size,
1170 pkt->pts, pkt->dts, pkt->pos);
1172 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1173 /* increment read pointer */
1177 got_output = !!out_pkt.size;
1182 /* set the duration */
1183 out_pkt.duration = 0;
1184 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1185 if (st->codec->sample_rate > 0) {
1186 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1187 (AVRational){ 1, st->codec->sample_rate },
1191 } else if (st->codec->time_base.num != 0 &&
1192 st->codec->time_base.den != 0) {
1193 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1194 st->codec->time_base,
1199 out_pkt.stream_index = st->index;
1200 out_pkt.pts = st->parser->pts;
1201 out_pkt.dts = st->parser->dts;
1202 out_pkt.pos = st->parser->pos;
1204 if (st->parser->key_frame == 1 ||
1205 (st->parser->key_frame == -1 &&
1206 st->parser->pict_type == AV_PICTURE_TYPE_I))
1207 out_pkt.flags |= AV_PKT_FLAG_KEY;
1209 compute_pkt_fields(s, st, st->parser, &out_pkt);
1211 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1212 out_pkt.flags & AV_PKT_FLAG_KEY) {
1213 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1214 ff_reduce_index(s, st->index);
1215 av_add_index_entry(st, pos, out_pkt.dts,
1216 0, 0, AVINDEX_KEYFRAME);
1219 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1220 out_pkt.destruct = pkt->destruct;
1221 pkt->destruct = NULL;
1223 if ((ret = av_dup_packet(&out_pkt)) < 0)
1226 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1227 av_free_packet(&out_pkt);
1228 ret = AVERROR(ENOMEM);
1234 /* end of the stream => close and free the parser */
1235 if (pkt == &flush_pkt) {
1236 av_parser_close(st->parser);
1241 av_free_packet(pkt);
1245 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1246 AVPacketList **pkt_buffer_end,
1250 av_assert0(*pkt_buffer);
1253 *pkt_buffer = pktl->next;
1255 *pkt_buffer_end = NULL;
1260 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1262 int ret = 0, i, got_packet = 0;
1264 av_init_packet(pkt);
1266 while (!got_packet && !s->parse_queue) {
1270 /* read next packet */
1271 ret = ff_read_packet(s, &cur_pkt);
1273 if (ret == AVERROR(EAGAIN))
1275 /* flush the parsers */
1276 for(i = 0; i < s->nb_streams; i++) {
1278 if (st->parser && st->need_parsing)
1279 parse_packet(s, NULL, st->index);
1281 /* all remaining packets are now in parse_queue =>
1282 * really terminate parsing */
1286 st = s->streams[cur_pkt.stream_index];
1288 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1289 cur_pkt.dts != AV_NOPTS_VALUE &&
1290 cur_pkt.pts < cur_pkt.dts) {
1291 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1292 cur_pkt.stream_index,
1293 av_ts2str(cur_pkt.pts),
1294 av_ts2str(cur_pkt.dts),
1297 if (s->debug & FF_FDEBUG_TS)
1298 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1299 cur_pkt.stream_index,
1300 av_ts2str(cur_pkt.pts),
1301 av_ts2str(cur_pkt.dts),
1306 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1307 st->parser = av_parser_init(st->codec->codec_id);
1309 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1310 "%s, packets or times may be invalid.\n",
1311 avcodec_get_name(st->codec->codec_id));
1312 /* no parser available: just output the raw packets */
1313 st->need_parsing = AVSTREAM_PARSE_NONE;
1314 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1315 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1316 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1317 st->parser->flags |= PARSER_FLAG_ONCE;
1321 if (!st->need_parsing || !st->parser) {
1322 /* no parsing needed: we just output the packet as is */
1324 compute_pkt_fields(s, st, NULL, pkt);
1325 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1326 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1327 ff_reduce_index(s, st->index);
1328 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1331 } else if (st->discard < AVDISCARD_ALL) {
1332 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1336 av_free_packet(&cur_pkt);
1338 if (pkt->flags & AV_PKT_FLAG_KEY)
1339 st->skip_to_keyframe = 0;
1340 if (st->skip_to_keyframe) {
1341 av_free_packet(&cur_pkt);
1346 if (!got_packet && s->parse_queue)
1347 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1349 if(s->debug & FF_FDEBUG_TS)
1350 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1352 av_ts2str(pkt->pts),
1353 av_ts2str(pkt->dts),
1361 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1363 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1368 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1369 &s->packet_buffer_end,
1371 read_frame_internal(s, pkt);
1376 AVPacketList *pktl = s->packet_buffer;
1379 AVPacket *next_pkt = &pktl->pkt;
1381 if (next_pkt->dts != AV_NOPTS_VALUE) {
1382 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1383 // last dts seen for this stream. if any of packets following
1384 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1385 int64_t last_dts = next_pkt->dts;
1386 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1387 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1388 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1389 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1390 next_pkt->pts = pktl->pkt.dts;
1392 if (last_dts != AV_NOPTS_VALUE) {
1393 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1394 last_dts = pktl->pkt.dts;
1399 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1400 // Fixing the last reference frame had none pts issue (For MXF etc).
1401 // We only do this when
1403 // 2. we are not able to resolve a pts value for current packet.
1404 // 3. the packets for this stream at the end of the files had valid dts.
1405 next_pkt->pts = last_dts + next_pkt->duration;
1407 pktl = s->packet_buffer;
1410 /* read packet from packet buffer, if there is data */
1411 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1412 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1413 ret = read_from_packet_buffer(&s->packet_buffer,
1414 &s->packet_buffer_end, pkt);
1419 ret = read_frame_internal(s, pkt);
1421 if (pktl && ret != AVERROR(EAGAIN)) {
1428 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1429 &s->packet_buffer_end)) < 0)
1430 return AVERROR(ENOMEM);
1434 if (is_relative(pkt->dts))
1435 pkt->dts -= RELATIVE_TS_BASE;
1436 if (is_relative(pkt->pts))
1437 pkt->pts -= RELATIVE_TS_BASE;
1441 /* XXX: suppress the packet queue */
1442 static void flush_packet_queue(AVFormatContext *s)
1444 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1445 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1446 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1448 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1451 /*******************************************************/
1454 int av_find_default_stream_index(AVFormatContext *s)
1456 int first_audio_index = -1;
1460 if (s->nb_streams <= 0)
1462 for(i = 0; i < s->nb_streams; i++) {
1464 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1465 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1468 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1469 first_audio_index = i;
1471 return first_audio_index >= 0 ? first_audio_index : 0;
1475 * Flush the frame reader.
1477 void ff_read_frame_flush(AVFormatContext *s)
1482 flush_packet_queue(s);
1484 /* for each stream, reset read state */
1485 for(i = 0; i < s->nb_streams; i++) {
1489 av_parser_close(st->parser);
1492 st->last_IP_pts = AV_NOPTS_VALUE;
1493 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1494 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1495 st->reference_dts = AV_NOPTS_VALUE;
1497 st->probe_packets = MAX_PROBE_PACKETS;
1499 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1500 st->pts_buffer[j]= AV_NOPTS_VALUE;
1504 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1508 for(i = 0; i < s->nb_streams; i++) {
1509 AVStream *st = s->streams[i];
1511 st->cur_dts = av_rescale(timestamp,
1512 st->time_base.den * (int64_t)ref_st->time_base.num,
1513 st->time_base.num * (int64_t)ref_st->time_base.den);
1517 void ff_reduce_index(AVFormatContext *s, int stream_index)
1519 AVStream *st= s->streams[stream_index];
1520 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1522 if((unsigned)st->nb_index_entries >= max_entries){
1524 for(i=0; 2*i<st->nb_index_entries; i++)
1525 st->index_entries[i]= st->index_entries[2*i];
1526 st->nb_index_entries= i;
1530 int ff_add_index_entry(AVIndexEntry **index_entries,
1531 int *nb_index_entries,
1532 unsigned int *index_entries_allocated_size,
1533 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1535 AVIndexEntry *entries, *ie;
1538 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1541 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1542 timestamp -= RELATIVE_TS_BASE;
1544 entries = av_fast_realloc(*index_entries,
1545 index_entries_allocated_size,
1546 (*nb_index_entries + 1) *
1547 sizeof(AVIndexEntry));
1551 *index_entries= entries;
1553 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1556 index= (*nb_index_entries)++;
1557 ie= &entries[index];
1558 assert(index==0 || ie[-1].timestamp < timestamp);
1560 ie= &entries[index];
1561 if(ie->timestamp != timestamp){
1562 if(ie->timestamp <= timestamp)
1564 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1565 (*nb_index_entries)++;
1566 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1567 distance= ie->min_distance;
1571 ie->timestamp = timestamp;
1572 ie->min_distance= distance;
1579 int av_add_index_entry(AVStream *st,
1580 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1582 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1583 &st->index_entries_allocated_size, pos,
1584 timestamp, size, distance, flags);
1587 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1588 int64_t wanted_timestamp, int flags)
1596 //optimize appending index entries at the end
1597 if(b && entries[b-1].timestamp < wanted_timestamp)
1602 timestamp = entries[m].timestamp;
1603 if(timestamp >= wanted_timestamp)
1605 if(timestamp <= wanted_timestamp)
1608 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1610 if(!(flags & AVSEEK_FLAG_ANY)){
1611 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1612 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1621 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1624 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1625 wanted_timestamp, flags);
1628 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1630 AVInputFormat *avif= s->iformat;
1631 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1632 int64_t ts_min, ts_max, ts;
1637 if (stream_index < 0)
1640 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1643 ts_min= AV_NOPTS_VALUE;
1644 pos_limit= -1; //gcc falsely says it may be uninitialized
1646 st= s->streams[stream_index];
1647 if(st->index_entries){
1650 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1651 index= FFMAX(index, 0);
1652 e= &st->index_entries[index];
1654 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1656 ts_min= e->timestamp;
1657 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1658 pos_min, av_ts2str(ts_min));
1663 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1664 assert(index < st->nb_index_entries);
1666 e= &st->index_entries[index];
1667 assert(e->timestamp >= target_ts);
1669 ts_max= e->timestamp;
1670 pos_limit= pos_max - e->min_distance;
1671 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1672 pos_max, pos_limit, av_ts2str(ts_max));
1676 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1681 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1684 ff_read_frame_flush(s);
1685 ff_update_cur_dts(s, st, ts);
1690 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1691 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1692 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1693 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1696 int64_t start_pos, filesize;
1699 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1701 if(ts_min == AV_NOPTS_VALUE){
1702 pos_min = s->data_offset;
1703 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1704 if (ts_min == AV_NOPTS_VALUE)
1708 if(ts_min >= target_ts){
1713 if(ts_max == AV_NOPTS_VALUE){
1715 filesize = avio_size(s->pb);
1716 pos_max = filesize - 1;
1719 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1721 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1722 if (ts_max == AV_NOPTS_VALUE)
1726 int64_t tmp_pos= pos_max + 1;
1727 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1728 if(tmp_ts == AV_NOPTS_VALUE)
1732 if(tmp_pos >= filesize)
1738 if(ts_max <= target_ts){
1743 if(ts_min > ts_max){
1745 }else if(ts_min == ts_max){
1750 while (pos_min < pos_limit) {
1751 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1752 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1753 assert(pos_limit <= pos_max);
1756 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1757 // interpolate position (better than dichotomy)
1758 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1759 + pos_min - approximate_keyframe_distance;
1760 }else if(no_change==1){
1761 // bisection, if interpolation failed to change min or max pos last time
1762 pos = (pos_min + pos_limit)>>1;
1764 /* linear search if bisection failed, can only happen if there
1765 are very few or no keyframes between min/max */
1770 else if(pos > pos_limit)
1774 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1779 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1780 pos_min, pos, pos_max,
1781 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1782 pos_limit, start_pos, no_change);
1783 if(ts == AV_NOPTS_VALUE){
1784 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1787 assert(ts != AV_NOPTS_VALUE);
1788 if (target_ts <= ts) {
1789 pos_limit = start_pos - 1;
1793 if (target_ts >= ts) {
1799 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1800 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1803 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1805 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1806 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1807 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1813 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1814 int64_t pos_min, pos_max;
1816 pos_min = s->data_offset;
1817 pos_max = avio_size(s->pb) - 1;
1819 if (pos < pos_min) pos= pos_min;
1820 else if(pos > pos_max) pos= pos_max;
1822 avio_seek(s->pb, pos, SEEK_SET);
1827 static int seek_frame_generic(AVFormatContext *s,
1828 int stream_index, int64_t timestamp, int flags)
1835 st = s->streams[stream_index];
1837 index = av_index_search_timestamp(st, timestamp, flags);
1839 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1842 if(index < 0 || index==st->nb_index_entries-1){
1846 if(st->nb_index_entries){
1847 assert(st->index_entries);
1848 ie= &st->index_entries[st->nb_index_entries-1];
1849 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1851 ff_update_cur_dts(s, st, ie->timestamp);
1853 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1859 read_status = av_read_frame(s, &pkt);
1860 } while (read_status == AVERROR(EAGAIN));
1861 if (read_status < 0)
1863 av_free_packet(&pkt);
1864 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1865 if(pkt.flags & AV_PKT_FLAG_KEY)
1867 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1868 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1873 index = av_index_search_timestamp(st, timestamp, flags);
1878 ff_read_frame_flush(s);
1879 AV_NOWARN_DEPRECATED(
1880 if (s->iformat->read_seek){
1881 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1885 ie = &st->index_entries[index];
1886 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1888 ff_update_cur_dts(s, st, ie->timestamp);
1893 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1894 int64_t timestamp, int flags)
1899 if (flags & AVSEEK_FLAG_BYTE) {
1900 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1902 ff_read_frame_flush(s);
1903 return seek_frame_byte(s, stream_index, timestamp, flags);
1906 if(stream_index < 0){
1907 stream_index= av_find_default_stream_index(s);
1908 if(stream_index < 0)
1911 st= s->streams[stream_index];
1912 /* timestamp for default must be expressed in AV_TIME_BASE units */
1913 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1916 /* first, we try the format specific seek */
1917 AV_NOWARN_DEPRECATED(
1918 if (s->iformat->read_seek) {
1919 ff_read_frame_flush(s);
1920 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1928 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1929 ff_read_frame_flush(s);
1930 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1931 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1932 ff_read_frame_flush(s);
1933 return seek_frame_generic(s, stream_index, timestamp, flags);
1939 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1941 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1944 queue_attached_pictures(s);
1949 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1951 if(min_ts > ts || max_ts < ts)
1954 if (s->iformat->read_seek2) {
1956 ff_read_frame_flush(s);
1957 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1960 queue_attached_pictures(s);
1964 if(s->iformat->read_timestamp){
1965 //try to seek via read_timestamp()
1968 //Fallback to old API if new is not implemented but old is
1969 //Note the old has somewat different sematics
1970 AV_NOWARN_DEPRECATED(
1971 if (s->iformat->read_seek || 1) {
1972 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1973 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1974 if (ret<0 && ts != min_ts && max_ts != ts) {
1975 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1977 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
1983 // try some generic seek like seek_frame_generic() but with new ts semantics
1986 /*******************************************************/
1989 * Return TRUE if the stream has accurate duration in any stream.
1991 * @return TRUE if the stream has accurate duration for at least one component.
1993 static int has_duration(AVFormatContext *ic)
1998 for(i = 0;i < ic->nb_streams; i++) {
1999 st = ic->streams[i];
2000 if (st->duration != AV_NOPTS_VALUE)
2003 if (ic->duration != AV_NOPTS_VALUE)
2009 * Estimate the stream timings from the one of each components.
2011 * Also computes the global bitrate if possible.
2013 static void update_stream_timings(AVFormatContext *ic)
2015 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2016 int64_t duration, duration1, filesize;
2020 start_time = INT64_MAX;
2021 start_time_text = INT64_MAX;
2022 end_time = INT64_MIN;
2023 duration = INT64_MIN;
2024 for(i = 0;i < ic->nb_streams; i++) {
2025 st = ic->streams[i];
2026 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2027 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2028 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2029 if (start_time1 < start_time_text)
2030 start_time_text = start_time1;
2032 start_time = FFMIN(start_time, start_time1);
2033 if (st->duration != AV_NOPTS_VALUE) {
2034 end_time1 = start_time1
2035 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2036 end_time = FFMAX(end_time, end_time1);
2039 if (st->duration != AV_NOPTS_VALUE) {
2040 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2041 duration = FFMAX(duration, duration1);
2044 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2045 start_time = start_time_text;
2046 if (start_time != INT64_MAX) {
2047 ic->start_time = start_time;
2048 if (end_time != INT64_MIN)
2049 duration = FFMAX(duration, end_time - start_time);
2051 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2052 ic->duration = duration;
2054 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2055 /* compute the bitrate */
2056 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2057 (double)ic->duration;
2061 static void fill_all_stream_timings(AVFormatContext *ic)
2066 update_stream_timings(ic);
2067 for(i = 0;i < ic->nb_streams; i++) {
2068 st = ic->streams[i];
2069 if (st->start_time == AV_NOPTS_VALUE) {
2070 if(ic->start_time != AV_NOPTS_VALUE)
2071 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2072 if(ic->duration != AV_NOPTS_VALUE)
2073 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2078 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2080 int64_t filesize, duration;
2084 /* if bit_rate is already set, we believe it */
2085 if (ic->bit_rate <= 0) {
2087 for(i=0;i<ic->nb_streams;i++) {
2088 st = ic->streams[i];
2089 if (st->codec->bit_rate > 0)
2090 bit_rate += st->codec->bit_rate;
2092 ic->bit_rate = bit_rate;
2095 /* if duration is already set, we believe it */
2096 if (ic->duration == AV_NOPTS_VALUE &&
2097 ic->bit_rate != 0) {
2098 filesize = ic->pb ? avio_size(ic->pb) : 0;
2100 for(i = 0; i < ic->nb_streams; i++) {
2101 st = ic->streams[i];
2102 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2103 if (st->duration == AV_NOPTS_VALUE)
2104 st->duration = duration;
2110 #define DURATION_MAX_READ_SIZE 250000
2111 #define DURATION_MAX_RETRY 3
2113 /* only usable for MPEG-PS streams */
2114 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2116 AVPacket pkt1, *pkt = &pkt1;
2118 int read_size, i, ret;
2120 int64_t filesize, offset, duration;
2123 /* flush packet queue */
2124 flush_packet_queue(ic);
2126 for (i=0; i<ic->nb_streams; i++) {
2127 st = ic->streams[i];
2128 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2129 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2132 av_parser_close(st->parser);
2137 /* estimate the end time (duration) */
2138 /* XXX: may need to support wrapping */
2139 filesize = ic->pb ? avio_size(ic->pb) : 0;
2140 end_time = AV_NOPTS_VALUE;
2142 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2146 avio_seek(ic->pb, offset, SEEK_SET);
2149 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2153 ret = ff_read_packet(ic, pkt);
2154 } while(ret == AVERROR(EAGAIN));
2157 read_size += pkt->size;
2158 st = ic->streams[pkt->stream_index];
2159 if (pkt->pts != AV_NOPTS_VALUE &&
2160 (st->start_time != AV_NOPTS_VALUE ||
2161 st->first_dts != AV_NOPTS_VALUE)) {
2162 duration = end_time = pkt->pts;
2163 if (st->start_time != AV_NOPTS_VALUE)
2164 duration -= st->start_time;
2166 duration -= st->first_dts;
2168 duration += 1LL<<st->pts_wrap_bits;
2170 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2171 st->duration = duration;
2174 av_free_packet(pkt);
2176 }while( end_time==AV_NOPTS_VALUE
2177 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2178 && ++retry <= DURATION_MAX_RETRY);
2180 fill_all_stream_timings(ic);
2182 avio_seek(ic->pb, old_offset, SEEK_SET);
2183 for (i=0; i<ic->nb_streams; i++) {
2185 st->cur_dts= st->first_dts;
2186 st->last_IP_pts = AV_NOPTS_VALUE;
2187 st->reference_dts = AV_NOPTS_VALUE;
2191 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2195 /* get the file size, if possible */
2196 if (ic->iformat->flags & AVFMT_NOFILE) {
2199 file_size = avio_size(ic->pb);
2200 file_size = FFMAX(0, file_size);
2203 if ((!strcmp(ic->iformat->name, "mpeg") ||
2204 !strcmp(ic->iformat->name, "mpegts")) &&
2205 file_size && ic->pb->seekable) {
2206 /* get accurate estimate from the PTSes */
2207 estimate_timings_from_pts(ic, old_offset);
2208 } else if (has_duration(ic)) {
2209 /* at least one component has timings - we use them for all
2211 fill_all_stream_timings(ic);
2213 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2214 /* less precise: use bitrate info */
2215 estimate_timings_from_bit_rate(ic);
2217 update_stream_timings(ic);
2221 AVStream av_unused *st;
2222 for(i = 0;i < ic->nb_streams; i++) {
2223 st = ic->streams[i];
2224 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2225 (double) st->start_time / AV_TIME_BASE,
2226 (double) st->duration / AV_TIME_BASE);
2228 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2229 (double) ic->start_time / AV_TIME_BASE,
2230 (double) ic->duration / AV_TIME_BASE,
2231 ic->bit_rate / 1000);
2235 static int has_codec_parameters(AVStream *st)
2237 AVCodecContext *avctx = st->codec;
2239 switch (avctx->codec_type) {
2240 case AVMEDIA_TYPE_AUDIO:
2241 val = avctx->sample_rate && avctx->channels;
2242 if (!avctx->frame_size && determinable_frame_size(avctx))
2244 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2247 case AVMEDIA_TYPE_VIDEO:
2249 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2252 case AVMEDIA_TYPE_DATA:
2253 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2258 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2261 static int has_decode_delay_been_guessed(AVStream *st)
2263 return st->codec->codec_id != CODEC_ID_H264 ||
2264 st->info->nb_decoded_frames >= 6;
2267 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2268 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2271 int got_picture = 1, ret = 0;
2273 AVPacket pkt = *avpkt;
2275 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2276 AVDictionary *thread_opt = NULL;
2278 codec = st->codec->codec ? st->codec->codec :
2279 avcodec_find_decoder(st->codec->codec_id);
2282 st->info->found_decoder = -1;
2286 /* force thread count to 1 since the h264 decoder will not extract SPS
2287 * and PPS to extradata during multi-threaded decoding */
2288 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2289 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2291 av_dict_free(&thread_opt);
2293 st->info->found_decoder = -1;
2296 st->info->found_decoder = 1;
2297 } else if (!st->info->found_decoder)
2298 st->info->found_decoder = 1;
2300 if (st->info->found_decoder < 0)
2303 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2305 (!has_codec_parameters(st) ||
2306 !has_decode_delay_been_guessed(st) ||
2307 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2309 avcodec_get_frame_defaults(&picture);
2310 switch(st->codec->codec_type) {
2311 case AVMEDIA_TYPE_VIDEO:
2312 ret = avcodec_decode_video2(st->codec, &picture,
2313 &got_picture, &pkt);
2315 case AVMEDIA_TYPE_AUDIO:
2316 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2323 st->info->nb_decoded_frames++;
2329 if(!pkt.data && !got_picture)
2334 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2336 while (tags->id != CODEC_ID_NONE) {
2344 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2347 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2348 if(tag == tags[i].tag)
2351 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2352 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2355 return CODEC_ID_NONE;
2358 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2361 for(i=0; tags && tags[i]; i++){
2362 int tag= ff_codec_get_tag(tags[i], id);
2368 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2371 for(i=0; tags && tags[i]; i++){
2372 enum CodecID id= ff_codec_get_id(tags[i], tag);
2373 if(id!=CODEC_ID_NONE) return id;
2375 return CODEC_ID_NONE;
2378 static void compute_chapters_end(AVFormatContext *s)
2381 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2383 for (i = 0; i < s->nb_chapters; i++)
2384 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2385 AVChapter *ch = s->chapters[i];
2386 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2389 for (j = 0; j < s->nb_chapters; j++) {
2390 AVChapter *ch1 = s->chapters[j];
2391 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2392 if (j != i && next_start > ch->start && next_start < end)
2395 ch->end = (end == INT64_MAX) ? ch->start : end;
2399 static int get_std_framerate(int i){
2400 if(i<60*12) return i*1001;
2401 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2405 * Is the time base unreliable.
2406 * This is a heuristic to balance between quick acceptance of the values in
2407 * the headers vs. some extra checks.
2408 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2409 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2410 * And there are "variable" fps files this needs to detect as well.
2412 static int tb_unreliable(AVCodecContext *c){
2413 if( c->time_base.den >= 101L*c->time_base.num
2414 || c->time_base.den < 5L*c->time_base.num
2415 /* || c->codec_tag == AV_RL32("DIVX")
2416 || c->codec_tag == AV_RL32("XVID")*/
2417 || c->codec_id == CODEC_ID_MPEG2VIDEO
2418 || c->codec_id == CODEC_ID_H264
2424 #if FF_API_FORMAT_PARAMETERS
2425 int av_find_stream_info(AVFormatContext *ic)
2427 return avformat_find_stream_info(ic, NULL);
2431 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2433 int i, count, ret, read_size, j;
2435 AVPacket pkt1, *pkt;
2436 int64_t old_offset = avio_tell(ic->pb);
2437 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2438 int flush_codecs = 1;
2441 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2443 for(i=0;i<ic->nb_streams;i++) {
2445 AVDictionary *thread_opt = NULL;
2446 st = ic->streams[i];
2448 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2449 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2450 /* if(!st->time_base.num)
2452 if(!st->codec->time_base.num)
2453 st->codec->time_base= st->time_base;
2455 //only for the split stuff
2456 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2457 st->parser = av_parser_init(st->codec->codec_id);
2458 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2459 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2462 codec = st->codec->codec ? st->codec->codec :
2463 avcodec_find_decoder(st->codec->codec_id);
2465 /* force thread count to 1 since the h264 decoder will not extract SPS
2466 * and PPS to extradata during multi-threaded decoding */
2467 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2469 /* Ensure that subtitle_header is properly set. */
2470 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2471 && codec && !st->codec->codec)
2472 avcodec_open2(st->codec, codec, options ? &options[i]
2475 //try to just open decoders, in case this is enough to get parameters
2476 if (!has_codec_parameters(st)) {
2477 if (codec && !st->codec->codec)
2478 avcodec_open2(st->codec, codec, options ? &options[i]
2482 av_dict_free(&thread_opt);
2485 for (i=0; i<ic->nb_streams; i++) {
2486 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2492 if (ff_check_interrupt(&ic->interrupt_callback)){
2494 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2498 /* check if one codec still needs to be handled */
2499 for(i=0;i<ic->nb_streams;i++) {
2500 int fps_analyze_framecount = 20;
2502 st = ic->streams[i];
2503 if (!has_codec_parameters(st))
2505 /* if the timebase is coarse (like the usual millisecond precision
2506 of mkv), we need to analyze more frames to reliably arrive at
2508 if (av_q2d(st->time_base) > 0.0005)
2509 fps_analyze_framecount *= 2;
2510 if (ic->fps_probe_size >= 0)
2511 fps_analyze_framecount = ic->fps_probe_size;
2512 /* variable fps and no guess at the real fps */
2513 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2514 && st->info->duration_count < fps_analyze_framecount
2515 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2517 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2519 if (st->first_dts == AV_NOPTS_VALUE &&
2520 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2521 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2524 if (i == ic->nb_streams) {
2525 /* NOTE: if the format has no header, then we need to read
2526 some packets to get most of the streams, so we cannot
2528 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2529 /* if we found the info for all the codecs, we can stop */
2531 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2536 /* we did not get all the codec info, but we read too much data */
2537 if (read_size >= ic->probesize) {
2539 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2540 for (i = 0; i < ic->nb_streams; i++)
2541 if (!ic->streams[i]->r_frame_rate.num &&
2542 ic->streams[i]->info->duration_count <= 1)
2543 av_log(ic, AV_LOG_WARNING,
2544 "Stream #%d: not enough frames to estimate rate; "
2545 "consider increasing probesize\n", i);
2549 /* NOTE: a new stream can be added there if no header in file
2550 (AVFMTCTX_NOHEADER) */
2551 ret = read_frame_internal(ic, &pkt1);
2552 if (ret == AVERROR(EAGAIN))
2560 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2561 if ((ret = av_dup_packet(pkt)) < 0)
2562 goto find_stream_info_err;
2564 read_size += pkt->size;
2566 st = ic->streams[pkt->stream_index];
2567 if (st->codec_info_nb_frames>1) {
2569 if (st->time_base.den > 0)
2570 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2571 if (st->avg_frame_rate.num > 0)
2572 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2574 if (t >= ic->max_analyze_duration) {
2575 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2578 st->info->codec_info_duration += pkt->duration;
2581 int64_t last = st->info->last_dts;
2583 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2584 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2585 int64_t duration= pkt->dts - last;
2587 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2588 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2589 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2590 int framerate= get_std_framerate(i);
2591 double sdts= dts*framerate/(1001*12);
2593 int ticks= lrintf(sdts+j*0.5);
2594 double error= sdts - ticks + j*0.5;
2595 st->info->duration_error[j][0][i] += error;
2596 st->info->duration_error[j][1][i] += error*error;
2599 st->info->duration_count++;
2600 // ignore the first 4 values, they might have some random jitter
2601 if (st->info->duration_count > 3)
2602 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2604 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2605 st->info->last_dts = pkt->dts;
2607 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2608 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2609 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2610 st->codec->extradata_size= i;
2611 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2612 if (!st->codec->extradata)
2613 return AVERROR(ENOMEM);
2614 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2615 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2619 /* if still no information, we try to open the codec and to
2620 decompress the frame. We try to avoid that in most cases as
2621 it takes longer and uses more memory. For MPEG-4, we need to
2622 decompress for QuickTime.
2624 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2625 least one frame of codec data, this makes sure the codec initializes
2626 the channel configuration and does not only trust the values from the container.
2628 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2630 st->codec_info_nb_frames++;
2635 AVPacket empty_pkt = { 0 };
2637 av_init_packet(&empty_pkt);
2639 ret = -1; /* we could not have all the codec parameters before EOF */
2640 for(i=0;i<ic->nb_streams;i++) {
2641 st = ic->streams[i];
2643 /* flush the decoders */
2644 if (st->info->found_decoder == 1) {
2646 err = try_decode_frame(st, &empty_pkt,
2647 (options && i < orig_nb_streams) ?
2648 &options[i] : NULL);
2649 } while (err > 0 && !has_codec_parameters(st));
2652 av_log(ic, AV_LOG_INFO,
2653 "decoding for stream %d failed\n", st->index);
2657 if (!has_codec_parameters(st)){
2659 avcodec_string(buf, sizeof(buf), st->codec, 0);
2660 av_log(ic, AV_LOG_WARNING,
2661 "Could not find codec parameters (%s)\n", buf);
2668 // close codecs which were opened in try_decode_frame()
2669 for(i=0;i<ic->nb_streams;i++) {
2670 st = ic->streams[i];
2671 avcodec_close(st->codec);
2673 for(i=0;i<ic->nb_streams;i++) {
2674 st = ic->streams[i];
2675 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2676 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2677 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2678 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2679 st->codec->codec_tag= tag;
2682 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2683 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2684 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2685 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2686 // the check for tb_unreliable() is not completely correct, since this is not about handling
2687 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2688 // ipmovie.c produces.
2689 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2690 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2691 if (st->info->duration_count && !st->r_frame_rate.num
2692 && tb_unreliable(st->codec) /*&&
2693 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2694 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2696 double best_error= 0.01;
2698 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2701 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2703 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2706 int n= st->info->duration_count;
2707 double a= st->info->duration_error[k][0][j] / n;
2708 double error= st->info->duration_error[k][1][j]/n - a*a;
2710 if(error < best_error && best_error> 0.000000001){
2712 num = get_std_framerate(j);
2715 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2718 // do not increase frame rate by more than 1 % in order to match a standard rate.
2719 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2720 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2723 if (!st->r_frame_rate.num){
2724 if( st->codec->time_base.den * (int64_t)st->time_base.num
2725 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2726 st->r_frame_rate.num = st->codec->time_base.den;
2727 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2729 st->r_frame_rate.num = st->time_base.den;
2730 st->r_frame_rate.den = st->time_base.num;
2733 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2734 if(!st->codec->bits_per_coded_sample)
2735 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2736 // set stream disposition based on audio service type
2737 switch (st->codec->audio_service_type) {
2738 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2739 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2740 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2741 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2742 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2743 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2744 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2745 st->disposition = AV_DISPOSITION_COMMENT; break;
2746 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2747 st->disposition = AV_DISPOSITION_KARAOKE; break;
2752 estimate_timings(ic, old_offset);
2754 compute_chapters_end(ic);
2756 find_stream_info_err:
2757 for (i=0; i < ic->nb_streams; i++) {
2758 if (ic->streams[i]->codec)
2759 ic->streams[i]->codec->thread_count = 0;
2760 av_freep(&ic->streams[i]->info);
2763 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2767 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2771 for (i = 0; i < ic->nb_programs; i++) {
2772 if (ic->programs[i] == last) {
2776 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2777 if (ic->programs[i]->stream_index[j] == s)
2778 return ic->programs[i];
2784 int av_find_best_stream(AVFormatContext *ic,
2785 enum AVMediaType type,
2786 int wanted_stream_nb,
2788 AVCodec **decoder_ret,
2791 int i, nb_streams = ic->nb_streams;
2792 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2793 unsigned *program = NULL;
2794 AVCodec *decoder = NULL, *best_decoder = NULL;
2796 if (related_stream >= 0 && wanted_stream_nb < 0) {
2797 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2799 program = p->stream_index;
2800 nb_streams = p->nb_stream_indexes;
2803 for (i = 0; i < nb_streams; i++) {
2804 int real_stream_index = program ? program[i] : i;
2805 AVStream *st = ic->streams[real_stream_index];
2806 AVCodecContext *avctx = st->codec;
2807 if (avctx->codec_type != type)
2809 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2811 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2814 decoder = avcodec_find_decoder(st->codec->codec_id);
2817 ret = AVERROR_DECODER_NOT_FOUND;
2821 if (best_count >= st->codec_info_nb_frames)
2823 best_count = st->codec_info_nb_frames;
2824 ret = real_stream_index;
2825 best_decoder = decoder;
2826 if (program && i == nb_streams - 1 && ret < 0) {
2828 nb_streams = ic->nb_streams;
2829 i = 0; /* no related stream found, try again with everything */
2833 *decoder_ret = best_decoder;
2837 /*******************************************************/
2839 int av_read_play(AVFormatContext *s)
2841 if (s->iformat->read_play)
2842 return s->iformat->read_play(s);
2844 return avio_pause(s->pb, 0);
2845 return AVERROR(ENOSYS);
2848 int av_read_pause(AVFormatContext *s)
2850 if (s->iformat->read_pause)
2851 return s->iformat->read_pause(s);
2853 return avio_pause(s->pb, 1);
2854 return AVERROR(ENOSYS);
2857 void avformat_free_context(AVFormatContext *s)
2863 if (s->iformat && s->iformat->priv_class && s->priv_data)
2864 av_opt_free(s->priv_data);
2866 for(i=0;i<s->nb_streams;i++) {
2867 /* free all data in a stream component */
2870 av_parser_close(st->parser);
2872 if (st->attached_pic.data)
2873 av_free_packet(&st->attached_pic);
2874 av_dict_free(&st->metadata);
2875 av_freep(&st->index_entries);
2876 av_freep(&st->codec->extradata);
2877 av_freep(&st->codec->subtitle_header);
2878 av_freep(&st->codec);
2879 av_freep(&st->priv_data);
2880 av_freep(&st->info);
2883 for(i=s->nb_programs-1; i>=0; i--) {
2884 av_dict_free(&s->programs[i]->metadata);
2885 av_freep(&s->programs[i]->stream_index);
2886 av_freep(&s->programs[i]);
2888 av_freep(&s->programs);
2889 av_freep(&s->priv_data);
2890 while(s->nb_chapters--) {
2891 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2892 av_freep(&s->chapters[s->nb_chapters]);
2894 av_freep(&s->chapters);
2895 av_dict_free(&s->metadata);
2896 av_freep(&s->streams);
2900 #if FF_API_CLOSE_INPUT_FILE
2901 void av_close_input_file(AVFormatContext *s)
2903 avformat_close_input(&s);
2907 void avformat_close_input(AVFormatContext **ps)
2909 AVFormatContext *s = *ps;
2910 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2912 flush_packet_queue(s);
2913 if (s->iformat && (s->iformat->read_close))
2914 s->iformat->read_close(s);
2915 avformat_free_context(s);
2921 #if FF_API_NEW_STREAM
2922 AVStream *av_new_stream(AVFormatContext *s, int id)
2924 AVStream *st = avformat_new_stream(s, NULL);
2931 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2937 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2939 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2942 s->streams = streams;
2944 st = av_mallocz(sizeof(AVStream));
2947 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2951 st->info->last_dts = AV_NOPTS_VALUE;
2953 st->codec = avcodec_alloc_context3(c);
2955 /* no default bitrate if decoding */
2956 st->codec->bit_rate = 0;
2958 st->index = s->nb_streams;
2959 st->start_time = AV_NOPTS_VALUE;
2960 st->duration = AV_NOPTS_VALUE;
2961 /* we set the current DTS to 0 so that formats without any timestamps
2962 but durations get some timestamps, formats with some unknown
2963 timestamps have their first few packets buffered and the
2964 timestamps corrected before they are returned to the user */
2965 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
2966 st->first_dts = AV_NOPTS_VALUE;
2967 st->probe_packets = MAX_PROBE_PACKETS;
2969 /* default pts setting is MPEG-like */
2970 avpriv_set_pts_info(st, 33, 1, 90000);
2971 st->last_IP_pts = AV_NOPTS_VALUE;
2972 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2973 st->pts_buffer[i]= AV_NOPTS_VALUE;
2974 st->reference_dts = AV_NOPTS_VALUE;
2976 st->sample_aspect_ratio = (AVRational){0,1};
2978 s->streams[s->nb_streams++] = st;
2982 AVProgram *av_new_program(AVFormatContext *ac, int id)
2984 AVProgram *program=NULL;
2987 av_dlog(ac, "new_program: id=0x%04x\n", id);
2989 for(i=0; i<ac->nb_programs; i++)
2990 if(ac->programs[i]->id == id)
2991 program = ac->programs[i];
2994 program = av_mallocz(sizeof(AVProgram));
2997 dynarray_add(&ac->programs, &ac->nb_programs, program);
2998 program->discard = AVDISCARD_NONE;
3005 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3007 AVChapter *chapter = NULL;
3010 for(i=0; i<s->nb_chapters; i++)
3011 if(s->chapters[i]->id == id)
3012 chapter = s->chapters[i];
3015 chapter= av_mallocz(sizeof(AVChapter));
3018 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3020 av_dict_set(&chapter->metadata, "title", title, 0);
3022 chapter->time_base= time_base;
3023 chapter->start = start;
3029 /************************************************************/
3030 /* output media file */
3032 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3033 const char *format, const char *filename)
3035 AVFormatContext *s = avformat_alloc_context();
3044 oformat = av_guess_format(format, NULL, NULL);
3046 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3047 ret = AVERROR(EINVAL);
3051 oformat = av_guess_format(NULL, filename, NULL);
3053 ret = AVERROR(EINVAL);
3054 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3061 s->oformat = oformat;
3062 if (s->oformat->priv_data_size > 0) {
3063 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3066 if (s->oformat->priv_class) {
3067 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3068 av_opt_set_defaults(s->priv_data);
3071 s->priv_data = NULL;
3074 av_strlcpy(s->filename, filename, sizeof(s->filename));
3078 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3079 ret = AVERROR(ENOMEM);
3081 avformat_free_context(s);
3085 #if FF_API_ALLOC_OUTPUT_CONTEXT
3086 AVFormatContext *avformat_alloc_output_context(const char *format,
3087 AVOutputFormat *oformat, const char *filename)
3089 AVFormatContext *avctx;
3090 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3091 return ret < 0 ? NULL : avctx;
3095 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3097 const AVCodecTag *avctag;
3099 enum CodecID id = CODEC_ID_NONE;
3100 unsigned int tag = 0;
3103 * Check that tag + id is in the table
3104 * If neither is in the table -> OK
3105 * If tag is in the table with another id -> FAIL
3106 * If id is in the table with another tag -> FAIL unless strict < normal
3108 for (n = 0; s->oformat->codec_tag[n]; n++) {
3109 avctag = s->oformat->codec_tag[n];
3110 while (avctag->id != CODEC_ID_NONE) {
3111 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3113 if (id == st->codec->codec_id)
3116 if (avctag->id == st->codec->codec_id)
3121 if (id != CODEC_ID_NONE)
3123 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3128 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3132 AVDictionary *tmp = NULL;
3135 av_dict_copy(&tmp, *options, 0);
3136 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3138 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3139 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3142 // some sanity checks
3143 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3144 av_log(s, AV_LOG_ERROR, "no streams\n");
3145 ret = AVERROR(EINVAL);
3149 for(i=0;i<s->nb_streams;i++) {
3152 switch (st->codec->codec_type) {
3153 case AVMEDIA_TYPE_AUDIO:
3154 if(st->codec->sample_rate<=0){
3155 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3156 ret = AVERROR(EINVAL);
3159 if(!st->codec->block_align)
3160 st->codec->block_align = st->codec->channels *
3161 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3163 case AVMEDIA_TYPE_VIDEO:
3164 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3165 av_log(s, AV_LOG_ERROR, "time base not set\n");
3166 ret = AVERROR(EINVAL);
3169 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3170 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3171 ret = AVERROR(EINVAL);
3174 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3175 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3177 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3178 "(%d/%d) and encoder layer (%d/%d)\n",
3179 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3180 st->codec->sample_aspect_ratio.num,
3181 st->codec->sample_aspect_ratio.den);
3182 ret = AVERROR(EINVAL);
3188 if(s->oformat->codec_tag){
3189 if( st->codec->codec_tag
3190 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3191 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3192 && !validate_codec_tag(s, st)){
3193 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3194 st->codec->codec_tag= 0;
3196 if(st->codec->codec_tag){
3197 if (!validate_codec_tag(s, st)) {
3198 char tagbuf[32], cortag[32];
3199 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3200 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3201 av_log(s, AV_LOG_ERROR,
3202 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3203 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3204 ret = AVERROR_INVALIDDATA;
3208 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3211 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3212 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3213 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3216 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3217 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3218 if (!s->priv_data) {
3219 ret = AVERROR(ENOMEM);
3222 if (s->oformat->priv_class) {
3223 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3224 av_opt_set_defaults(s->priv_data);
3225 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3230 /* set muxer identification string */
3231 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3232 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3235 if(s->oformat->write_header){
3236 ret = s->oformat->write_header(s);
3241 /* init PTS generation */
3242 for(i=0;i<s->nb_streams;i++) {
3243 int64_t den = AV_NOPTS_VALUE;
3246 switch (st->codec->codec_type) {
3247 case AVMEDIA_TYPE_AUDIO:
3248 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3250 case AVMEDIA_TYPE_VIDEO:
3251 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3256 if (den != AV_NOPTS_VALUE) {
3258 ret = AVERROR_INVALIDDATA;
3261 frac_init(&st->pts, 0, 0, den);
3266 av_dict_free(options);
3275 //FIXME merge with compute_pkt_fields
3276 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3277 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3278 int num, den, frame_size, i;
3280 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3281 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3283 /* duration field */
3284 if (pkt->duration == 0) {
3285 compute_frame_duration(&num, &den, st, NULL, pkt);
3287 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3291 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3294 //XXX/FIXME this is a temporary hack until all encoders output pts
3295 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3298 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3302 // pkt->pts= st->cur_dts;
3303 pkt->pts= st->pts.val;
3306 //calculate dts from pts
3307 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3308 st->pts_buffer[0]= pkt->pts;
3309 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3310 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3311 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3312 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3314 pkt->dts= st->pts_buffer[0];
3317 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){
3318 av_log(s, AV_LOG_ERROR,
3319 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3320 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3321 return AVERROR(EINVAL);
3323 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3324 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3325 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3326 return AVERROR(EINVAL);
3329 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3330 st->cur_dts= pkt->dts;
3331 st->pts.val= pkt->dts;
3334 switch (st->codec->codec_type) {
3335 case AVMEDIA_TYPE_AUDIO:
3336 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3338 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3339 likely equal to the encoder delay, but it would be better if we
3340 had the real timestamps from the encoder */
3341 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3342 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3345 case AVMEDIA_TYPE_VIDEO:
3346 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3354 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3359 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3360 return s->oformat->write_packet(s, pkt);
3364 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3366 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3369 ret= s->oformat->write_packet(s, pkt);
3372 s->streams[pkt->stream_index]->nb_frames++;
3376 #define CHUNK_START 0x1000
3378 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3379 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3381 AVPacketList **next_point, *this_pktl;
3382 AVStream *st= s->streams[pkt->stream_index];
3383 int chunked= s->max_chunk_size || s->max_chunk_duration;
3385 this_pktl = av_mallocz(sizeof(AVPacketList));
3387 return AVERROR(ENOMEM);
3388 this_pktl->pkt= *pkt;
3389 pkt->destruct= NULL; // do not free original but only the copy
3390 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3392 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3393 next_point = &(st->last_in_packet_buffer->next);
3395 next_point = &s->packet_buffer;
3400 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3401 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3402 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3403 st->interleaver_chunk_size += pkt->size;
3404 st->interleaver_chunk_duration += pkt->duration;
3407 st->interleaver_chunk_size =
3408 st->interleaver_chunk_duration = 0;
3409 this_pktl->pkt.flags |= CHUNK_START;
3413 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3415 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3416 || !compare(s, &(*next_point)->pkt, pkt))){
3417 next_point= &(*next_point)->next;
3422 next_point = &(s->packet_buffer_end->next);
3425 assert(!*next_point);
3427 s->packet_buffer_end= this_pktl;
3430 this_pktl->next= *next_point;
3432 s->streams[pkt->stream_index]->last_in_packet_buffer=
3433 *next_point= this_pktl;
3437 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3439 AVStream *st = s->streams[ pkt ->stream_index];
3440 AVStream *st2= s->streams[ next->stream_index];
3441 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3443 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3444 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3445 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3447 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3448 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3451 comp= (ts>ts2) - (ts<ts2);
3455 return pkt->stream_index < next->stream_index;
3459 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3460 AVPacket *pkt, int flush)
3463 int stream_count=0, noninterleaved_count=0;
3464 int64_t delta_dts_max = 0;
3468 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3473 for(i=0; i < s->nb_streams; i++) {
3474 if (s->streams[i]->last_in_packet_buffer) {
3476 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3477 ++noninterleaved_count;
3481 if (s->nb_streams == stream_count) {
3484 for(i=0; i < s->nb_streams; i++) {
3485 if (s->streams[i]->last_in_packet_buffer) {
3487 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3488 s->streams[i]->time_base,
3490 av_rescale_q(s->packet_buffer->pkt.dts,
3491 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3493 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3496 if(s->nb_streams == stream_count+noninterleaved_count &&
3497 delta_dts_max > 20*AV_TIME_BASE) {
3498 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3502 if(stream_count && flush){
3503 pktl= s->packet_buffer;
3506 s->packet_buffer= pktl->next;
3507 if(!s->packet_buffer)
3508 s->packet_buffer_end= NULL;
3510 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3511 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3515 av_init_packet(out);
3520 #if FF_API_INTERLEAVE_PACKET
3521 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3522 AVPacket *pkt, int flush)
3524 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3529 * Interleave an AVPacket correctly so it can be muxed.
3530 * @param out the interleaved packet will be output here
3531 * @param in the input packet
3532 * @param flush 1 if no further packets are available as input and all
3533 * remaining packets should be output
3534 * @return 1 if a packet was output, 0 if no packet could be output,
3535 * < 0 if an error occurred
3537 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3538 if (s->oformat->interleave_packet) {
3539 int ret = s->oformat->interleave_packet(s, out, in, flush);
3544 return ff_interleave_packet_per_dts(s, out, in, flush);
3547 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3551 AVStream *st= s->streams[ pkt->stream_index];
3553 //FIXME/XXX/HACK drop zero sized packets
3554 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3557 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3558 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3559 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3562 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3563 return AVERROR(EINVAL);
3565 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3571 int ret= interleave_packet(s, &opkt, pkt, flush);
3572 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3575 ret= s->oformat->write_packet(s, &opkt);
3577 s->streams[opkt.stream_index]->nb_frames++;
3579 av_free_packet(&opkt);
3584 if(s->pb && s->pb->error)
3585 return s->pb->error;
3589 int av_write_trailer(AVFormatContext *s)
3595 ret= interleave_packet(s, &pkt, NULL, 1);
3596 if(ret<0) //FIXME cleanup needed for ret<0 ?
3601 ret= s->oformat->write_packet(s, &pkt);
3603 s->streams[pkt.stream_index]->nb_frames++;
3605 av_free_packet(&pkt);
3609 if(s->pb && s->pb->error)
3613 if(s->oformat->write_trailer)
3614 ret = s->oformat->write_trailer(s);
3619 ret = s->pb ? s->pb->error : 0;
3620 for(i=0;i<s->nb_streams;i++) {
3621 av_freep(&s->streams[i]->priv_data);
3622 av_freep(&s->streams[i]->index_entries);
3624 if (s->oformat->priv_class)
3625 av_opt_free(s->priv_data);
3626 av_freep(&s->priv_data);
3630 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3631 int64_t *dts, int64_t *wall)
3633 if (!s->oformat || !s->oformat->get_output_timestamp)
3634 return AVERROR(ENOSYS);
3635 s->oformat->get_output_timestamp(s, stream, dts, wall);
3639 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3642 AVProgram *program=NULL;
3645 if (idx >= ac->nb_streams) {
3646 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3650 for(i=0; i<ac->nb_programs; i++){
3651 if(ac->programs[i]->id != progid)
3653 program = ac->programs[i];
3654 for(j=0; j<program->nb_stream_indexes; j++)
3655 if(program->stream_index[j] == idx)
3658 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3661 program->stream_index = tmp;
3662 program->stream_index[program->nb_stream_indexes++] = idx;
3667 static void print_fps(double d, const char *postfix){
3668 uint64_t v= lrintf(d*100);
3669 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3670 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3671 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3674 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3676 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3677 AVDictionaryEntry *tag=NULL;
3679 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3680 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3681 if(strcmp("language", tag->key)){
3682 const char *p = tag->value;
3683 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3686 size_t len = strcspn(p, "\xd\xa");
3687 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3688 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3690 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3691 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3694 av_log(ctx, AV_LOG_INFO, "\n");
3700 /* "user interface" functions */
3701 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3704 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3705 AVStream *st = ic->streams[i];
3706 int g = av_gcd(st->time_base.num, st->time_base.den);
3707 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3708 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3709 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3710 /* the pid is an important information, so we display it */
3711 /* XXX: add a generic system */
3712 if (flags & AVFMT_SHOW_IDS)
3713 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3715 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3716 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3717 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3718 if (st->sample_aspect_ratio.num && // default
3719 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3720 AVRational display_aspect_ratio;
3721 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3722 st->codec->width*st->sample_aspect_ratio.num,
3723 st->codec->height*st->sample_aspect_ratio.den,
3725 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3726 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3727 display_aspect_ratio.num, display_aspect_ratio.den);
3729 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3730 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3731 print_fps(av_q2d(st->avg_frame_rate), "fps");
3732 if(st->r_frame_rate.den && st->r_frame_rate.num)
3733 print_fps(av_q2d(st->r_frame_rate), "tbr");
3734 if(st->time_base.den && st->time_base.num)
3735 print_fps(1/av_q2d(st->time_base), "tbn");
3736 if(st->codec->time_base.den && st->codec->time_base.num)
3737 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3739 if (st->disposition & AV_DISPOSITION_DEFAULT)
3740 av_log(NULL, AV_LOG_INFO, " (default)");
3741 if (st->disposition & AV_DISPOSITION_DUB)
3742 av_log(NULL, AV_LOG_INFO, " (dub)");
3743 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3744 av_log(NULL, AV_LOG_INFO, " (original)");
3745 if (st->disposition & AV_DISPOSITION_COMMENT)
3746 av_log(NULL, AV_LOG_INFO, " (comment)");
3747 if (st->disposition & AV_DISPOSITION_LYRICS)
3748 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3749 if (st->disposition & AV_DISPOSITION_KARAOKE)
3750 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3751 if (st->disposition & AV_DISPOSITION_FORCED)
3752 av_log(NULL, AV_LOG_INFO, " (forced)");
3753 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3754 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3755 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3756 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3757 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3758 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3759 av_log(NULL, AV_LOG_INFO, "\n");
3760 dump_metadata(NULL, st->metadata, " ");
3763 void av_dump_format(AVFormatContext *ic,
3769 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3770 if (ic->nb_streams && !printed)
3773 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3774 is_output ? "Output" : "Input",
3776 is_output ? ic->oformat->name : ic->iformat->name,
3777 is_output ? "to" : "from", url);
3778 dump_metadata(NULL, ic->metadata, " ");
3780 av_log(NULL, AV_LOG_INFO, " Duration: ");
3781 if (ic->duration != AV_NOPTS_VALUE) {
3782 int hours, mins, secs, us;
3783 secs = ic->duration / AV_TIME_BASE;
3784 us = ic->duration % AV_TIME_BASE;
3789 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3790 (100 * us) / AV_TIME_BASE);
3792 av_log(NULL, AV_LOG_INFO, "N/A");
3794 if (ic->start_time != AV_NOPTS_VALUE) {
3796 av_log(NULL, AV_LOG_INFO, ", start: ");
3797 secs = ic->start_time / AV_TIME_BASE;
3798 us = abs(ic->start_time % AV_TIME_BASE);
3799 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3800 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3802 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3804 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3806 av_log(NULL, AV_LOG_INFO, "N/A");
3808 av_log(NULL, AV_LOG_INFO, "\n");
3810 for (i = 0; i < ic->nb_chapters; i++) {
3811 AVChapter *ch = ic->chapters[i];
3812 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3813 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3814 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3816 dump_metadata(NULL, ch->metadata, " ");
3818 if(ic->nb_programs) {
3819 int j, k, total = 0;
3820 for(j=0; j<ic->nb_programs; j++) {
3821 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3823 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3824 name ? name->value : "");
3825 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3826 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3827 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3828 printed[ic->programs[j]->stream_index[k]] = 1;
3830 total += ic->programs[j]->nb_stream_indexes;
3832 if (total < ic->nb_streams)
3833 av_log(NULL, AV_LOG_INFO, " No Program\n");
3835 for(i=0;i<ic->nb_streams;i++)
3837 dump_stream_format(ic, i, index, is_output);
3842 int64_t av_gettime(void)
3845 gettimeofday(&tv,NULL);
3846 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3849 uint64_t ff_ntp_time(void)
3851 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3854 int av_get_frame_filename(char *buf, int buf_size,
3855 const char *path, int number)
3858 char *q, buf1[20], c;
3859 int nd, len, percentd_found;
3871 while (isdigit(*p)) {
3872 nd = nd * 10 + *p++ - '0';
3875 } while (isdigit(c));
3884 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3886 if ((q - buf + len) > buf_size - 1)
3888 memcpy(q, buf1, len);
3896 if ((q - buf) < buf_size - 1)
3900 if (!percentd_found)
3909 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3913 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3915 for(i=0;i<size;i+=16) {
3922 PRINT(" %02x", buf[i+j]);
3927 for(j=0;j<len;j++) {
3929 if (c < ' ' || c > '~')
3938 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3940 hex_dump_internal(NULL, f, 0, buf, size);
3943 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3945 hex_dump_internal(avcl, NULL, level, buf, size);
3948 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3951 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3952 PRINT("stream #%d:\n", pkt->stream_index);
3953 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3954 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3955 /* DTS is _always_ valid after av_read_frame() */
3957 if (pkt->dts == AV_NOPTS_VALUE)
3960 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3961 /* PTS may not be known if B-frames are present. */
3963 if (pkt->pts == AV_NOPTS_VALUE)
3966 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3968 PRINT(" size=%d\n", pkt->size);
3971 av_hex_dump(f, pkt->data, pkt->size);
3975 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3977 AVRational tb = { 1, AV_TIME_BASE };
3978 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3982 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3984 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3988 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3990 AVRational tb = { 1, AV_TIME_BASE };
3991 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3995 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3998 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4001 void av_url_split(char *proto, int proto_size,
4002 char *authorization, int authorization_size,
4003 char *hostname, int hostname_size,
4005 char *path, int path_size,
4008 const char *p, *ls, *at, *col, *brk;
4010 if (port_ptr) *port_ptr = -1;
4011 if (proto_size > 0) proto[0] = 0;
4012 if (authorization_size > 0) authorization[0] = 0;
4013 if (hostname_size > 0) hostname[0] = 0;
4014 if (path_size > 0) path[0] = 0;
4016 /* parse protocol */
4017 if ((p = strchr(url, ':'))) {
4018 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4023 /* no protocol means plain filename */
4024 av_strlcpy(path, url, path_size);
4028 /* separate path from hostname */
4029 ls = strchr(p, '/');
4031 ls = strchr(p, '?');
4033 av_strlcpy(path, ls, path_size);
4035 ls = &p[strlen(p)]; // XXX
4037 /* the rest is hostname, use that to parse auth/port */
4039 /* authorization (user[:pass]@hostname) */
4040 if ((at = strchr(p, '@')) && at < ls) {
4041 av_strlcpy(authorization, p,
4042 FFMIN(authorization_size, at + 1 - p));
4043 p = at + 1; /* skip '@' */
4046 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4048 av_strlcpy(hostname, p + 1,
4049 FFMIN(hostname_size, brk - p));
4050 if (brk[1] == ':' && port_ptr)
4051 *port_ptr = atoi(brk + 2);
4052 } else if ((col = strchr(p, ':')) && col < ls) {
4053 av_strlcpy(hostname, p,
4054 FFMIN(col + 1 - p, hostname_size));
4055 if (port_ptr) *port_ptr = atoi(col + 1);
4057 av_strlcpy(hostname, p,
4058 FFMIN(ls + 1 - p, hostname_size));
4062 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4065 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4068 'C', 'D', 'E', 'F' };
4069 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4072 'c', 'd', 'e', 'f' };
4073 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4075 for(i = 0; i < s; i++) {
4076 buff[i * 2] = hex_table[src[i] >> 4];
4077 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4083 int ff_hex_to_data(uint8_t *data, const char *p)
4090 p += strspn(p, SPACE_CHARS);
4093 c = toupper((unsigned char) *p++);
4094 if (c >= '0' && c <= '9')
4096 else if (c >= 'A' && c <= 'F')
4111 #if FF_API_SET_PTS_INFO
4112 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4113 unsigned int pts_num, unsigned int pts_den)
4115 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4119 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4120 unsigned int pts_num, unsigned int pts_den)
4123 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4124 if(new_tb.num != pts_num)
4125 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4127 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4129 if(new_tb.num <= 0 || new_tb.den <= 0) {
4130 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
4133 s->time_base = new_tb;
4134 s->pts_wrap_bits = pts_wrap_bits;
4137 int ff_url_join(char *str, int size, const char *proto,
4138 const char *authorization, const char *hostname,
4139 int port, const char *fmt, ...)
4142 struct addrinfo hints = { 0 }, *ai;
4147 av_strlcatf(str, size, "%s://", proto);
4148 if (authorization && authorization[0])
4149 av_strlcatf(str, size, "%s@", authorization);
4150 #if CONFIG_NETWORK && defined(AF_INET6)
4151 /* Determine if hostname is a numerical IPv6 address,
4152 * properly escape it within [] in that case. */
4153 hints.ai_flags = AI_NUMERICHOST;
4154 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4155 if (ai->ai_family == AF_INET6) {
4156 av_strlcat(str, "[", size);
4157 av_strlcat(str, hostname, size);
4158 av_strlcat(str, "]", size);
4160 av_strlcat(str, hostname, size);
4165 /* Not an IPv6 address, just output the plain string. */
4166 av_strlcat(str, hostname, size);
4169 av_strlcatf(str, size, ":%d", port);
4172 int len = strlen(str);
4175 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4181 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4182 AVFormatContext *src)
4187 local_pkt.stream_index = dst_stream;
4188 if (pkt->pts != AV_NOPTS_VALUE)
4189 local_pkt.pts = av_rescale_q(pkt->pts,
4190 src->streams[pkt->stream_index]->time_base,
4191 dst->streams[dst_stream]->time_base);
4192 if (pkt->dts != AV_NOPTS_VALUE)
4193 local_pkt.dts = av_rescale_q(pkt->dts,
4194 src->streams[pkt->stream_index]->time_base,
4195 dst->streams[dst_stream]->time_base);
4196 return av_write_frame(dst, &local_pkt);
4199 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4202 const char *ptr = str;
4204 /* Parse key=value pairs. */
4207 char *dest = NULL, *dest_end;
4208 int key_len, dest_len = 0;
4210 /* Skip whitespace and potential commas. */
4211 while (*ptr && (isspace(*ptr) || *ptr == ','))
4218 if (!(ptr = strchr(key, '=')))
4221 key_len = ptr - key;
4223 callback_get_buf(context, key, key_len, &dest, &dest_len);
4224 dest_end = dest + dest_len - 1;
4228 while (*ptr && *ptr != '\"') {
4232 if (dest && dest < dest_end)
4236 if (dest && dest < dest_end)
4244 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4245 if (dest && dest < dest_end)
4253 int ff_find_stream_index(AVFormatContext *s, int id)
4256 for (i = 0; i < s->nb_streams; i++) {
4257 if (s->streams[i]->id == id)
4263 void ff_make_absolute_url(char *buf, int size, const char *base,
4267 /* Absolute path, relative to the current server */
4268 if (base && strstr(base, "://") && rel[0] == '/') {
4270 av_strlcpy(buf, base, size);
4271 sep = strstr(buf, "://");
4274 sep = strchr(sep, '/');
4278 av_strlcat(buf, rel, size);
4281 /* If rel actually is an absolute url, just copy it */
4282 if (!base || strstr(rel, "://") || rel[0] == '/') {
4283 av_strlcpy(buf, rel, size);
4287 av_strlcpy(buf, base, size);
4288 /* Remove the file name from the base url */
4289 sep = strrchr(buf, '/');
4294 while (av_strstart(rel, "../", NULL) && sep) {
4295 /* Remove the path delimiter at the end */
4297 sep = strrchr(buf, '/');
4298 /* If the next directory name to pop off is "..", break here */
4299 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4300 /* Readd the slash we just removed */
4301 av_strlcat(buf, "/", size);
4304 /* Cut off the directory name */
4311 av_strlcat(buf, rel, size);
4314 int64_t ff_iso8601_to_unix_time(const char *datestr)
4317 struct tm time1 = {0}, time2 = {0};
4319 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4320 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4322 return av_timegm(&time2);
4324 return av_timegm(&time1);
4326 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4327 "the date string.\n");
4332 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4335 if (ofmt->query_codec)
4336 return ofmt->query_codec(codec_id, std_compliance);
4337 else if (ofmt->codec_tag)
4338 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4339 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4340 codec_id == ofmt->subtitle_codec)
4343 return AVERROR_PATCHWELCOME;
4346 int avformat_network_init(void)
4350 ff_network_inited_globally = 1;
4351 if ((ret = ff_network_init()) < 0)
4358 int avformat_network_deinit(void)
4367 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4368 uint64_t channel_layout, int32_t sample_rate,
4369 int32_t width, int32_t height)
4375 return AVERROR(EINVAL);
4378 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4380 if (channel_layout) {
4382 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4386 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4388 if (width || height) {
4390 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4392 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4394 return AVERROR(ENOMEM);
4395 bytestream_put_le32(&data, flags);
4397 bytestream_put_le32(&data, channels);
4399 bytestream_put_le64(&data, channel_layout);
4401 bytestream_put_le32(&data, sample_rate);
4402 if (width || height) {
4403 bytestream_put_le32(&data, width);
4404 bytestream_put_le32(&data, height);
4409 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4411 return ff_codec_bmp_tags;
4413 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4415 return ff_codec_wav_tags;
4418 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4420 AVRational undef = {0, 1};
4421 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4422 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : undef;
4424 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4425 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4426 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4427 stream_sample_aspect_ratio = undef;
4429 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4430 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4431 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4432 frame_sample_aspect_ratio = undef;
4434 if (stream_sample_aspect_ratio.num)
4435 return stream_sample_aspect_ratio;
4437 return frame_sample_aspect_ratio;