2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
56 * various utility functions for use within FFmpeg
59 unsigned avformat_version(void)
61 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
62 return LIBAVFORMAT_VERSION_INT;
65 const char *avformat_configuration(void)
67 return FFMPEG_CONFIGURATION;
70 const char *avformat_license(void)
72 #define LICENSE_PREFIX "libavformat license: "
73 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
76 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
78 static int is_relative(int64_t ts) {
79 return ts > (RELATIVE_TS_BASE - (1LL<<48));
82 /* fraction handling */
85 * f = val + (num / den) + 0.5.
87 * 'num' is normalized so that it is such as 0 <= num < den.
89 * @param f fractional number
90 * @param val integer value
91 * @param num must be >= 0
92 * @param den must be >= 1
94 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
107 * Fractional addition to f: f = f + (incr / f->den).
109 * @param f fractional number
110 * @param incr increment, can be positive or negative
112 static void frac_add(AVFrac *f, int64_t incr)
125 } else if (num >= den) {
132 /** head of registered input format linked list */
133 static AVInputFormat *first_iformat = NULL;
134 /** head of registered output format linked list */
135 static AVOutputFormat *first_oformat = NULL;
137 AVInputFormat *av_iformat_next(AVInputFormat *f)
139 if(f) return f->next;
140 else return first_iformat;
143 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
145 if(f) return f->next;
146 else return first_oformat;
149 void av_register_input_format(AVInputFormat *format)
153 while (*p != NULL) p = &(*p)->next;
158 void av_register_output_format(AVOutputFormat *format)
162 while (*p != NULL) p = &(*p)->next;
167 int av_match_ext(const char *filename, const char *extensions)
175 ext = strrchr(filename, '.');
181 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
184 if (!av_strcasecmp(ext1, ext))
194 static int match_format(const char *name, const char *names)
202 namelen = strlen(name);
203 while ((p = strchr(names, ','))) {
204 len = FFMAX(p - names, namelen);
205 if (!av_strncasecmp(name, names, len))
209 return !av_strcasecmp(name, names);
212 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
213 const char *mime_type)
215 AVOutputFormat *fmt = NULL, *fmt_found;
216 int score_max, score;
218 /* specific test for image sequences */
219 #if CONFIG_IMAGE2_MUXER
220 if (!short_name && filename &&
221 av_filename_number_test(filename) &&
222 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
223 return av_guess_format("image2", NULL, NULL);
226 /* Find the proper file type. */
229 while ((fmt = av_oformat_next(fmt))) {
231 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
233 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
235 if (filename && fmt->extensions &&
236 av_match_ext(filename, fmt->extensions)) {
239 if (score > score_max) {
247 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
248 const char *filename, const char *mime_type, enum AVMediaType type){
249 if(type == AVMEDIA_TYPE_VIDEO){
250 enum CodecID codec_id= CODEC_ID_NONE;
252 #if CONFIG_IMAGE2_MUXER
253 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
254 codec_id= ff_guess_image2_codec(filename);
257 if(codec_id == CODEC_ID_NONE)
258 codec_id= fmt->video_codec;
260 }else if(type == AVMEDIA_TYPE_AUDIO)
261 return fmt->audio_codec;
262 else if (type == AVMEDIA_TYPE_SUBTITLE)
263 return fmt->subtitle_codec;
265 return CODEC_ID_NONE;
268 AVInputFormat *av_find_input_format(const char *short_name)
270 AVInputFormat *fmt = NULL;
271 while ((fmt = av_iformat_next(fmt))) {
272 if (match_format(short_name, fmt->name))
278 int ffio_limit(AVIOContext *s, int size)
281 int64_t remaining= s->maxsize - avio_tell(s);
282 if(remaining < size){
283 int64_t newsize= avio_size(s);
284 if(!s->maxsize || s->maxsize<newsize)
285 s->maxsize= newsize - !newsize;
286 remaining= s->maxsize - avio_tell(s);
287 remaining= FFMAX(remaining, 0);
290 if(s->maxsize>=0 && remaining+1 < size){
291 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
298 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
301 int orig_size = size;
302 size= ffio_limit(s, size);
304 ret= av_new_packet(pkt, size);
309 pkt->pos= avio_tell(s);
311 ret= avio_read(s, pkt->data, size);
315 av_shrink_packet(pkt, ret);
316 if (pkt->size < orig_size)
317 pkt->flags |= AV_PKT_FLAG_CORRUPT;
322 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
327 return av_get_packet(s, pkt, size);
328 old_size = pkt->size;
329 ret = av_grow_packet(pkt, size);
332 ret = avio_read(s, pkt->data + old_size, size);
333 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
338 int av_filename_number_test(const char *filename)
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
344 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
346 AVProbeData lpd = *pd;
347 AVInputFormat *fmt1 = NULL, *fmt;
348 int score, nodat = 0, score_max=0;
350 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
351 int id3len = ff_id3v2_tag_len(lpd.buf);
352 if (lpd.buf_size > id3len + 16) {
354 lpd.buf_size -= id3len;
360 while ((fmt1 = av_iformat_next(fmt1))) {
361 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
364 if (fmt1->read_probe) {
365 score = fmt1->read_probe(&lpd);
366 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
367 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
368 } else if (fmt1->extensions) {
369 if (av_match_ext(lpd.filename, fmt1->extensions)) {
373 if (score > score_max) {
376 }else if (score == score_max)
379 *score_ret= score_max;
384 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
387 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
388 if(score_ret > *score_max){
389 *score_max= score_ret;
395 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
397 return av_probe_input_format2(pd, is_opened, &score);
400 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
402 static const struct {
403 const char *name; enum CodecID id; enum AVMediaType type;
405 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
406 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
407 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
408 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
409 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
410 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
411 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
412 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
413 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
417 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
421 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
422 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
423 for (i = 0; fmt_id_type[i].name; i++) {
424 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
425 st->codec->codec_id = fmt_id_type[i].id;
426 st->codec->codec_type = fmt_id_type[i].type;
434 /************************************************************/
435 /* input media file */
437 int av_demuxer_open(AVFormatContext *ic){
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic);
446 if (ic->pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
453 /** size of probe buffer, for guessing file type from file contents */
454 #define PROBE_BUF_MIN 2048
455 #define PROBE_BUF_MAX (1<<20)
457 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
458 const char *filename, void *logctx,
459 unsigned int offset, unsigned int max_probe_size)
461 AVProbeData pd = { filename ? filename : "", NULL, -offset };
462 unsigned char *buf = NULL;
463 int ret = 0, probe_size;
465 if (!max_probe_size) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size > PROBE_BUF_MAX) {
468 max_probe_size = PROBE_BUF_MAX;
469 } else if (max_probe_size < PROBE_BUF_MIN) {
470 return AVERROR(EINVAL);
473 if (offset >= max_probe_size) {
474 return AVERROR(EINVAL);
477 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
478 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
479 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
480 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
483 if (probe_size < offset) {
487 /* read probe data */
488 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
491 return AVERROR(ENOMEM);
494 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
495 /* fail if error was not end of file, otherwise, lower score */
496 if (ret != AVERROR_EOF) {
501 ret = 0; /* error was end of file, nothing read */
504 pd.buf = &buf[offset];
506 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
508 /* guess file format */
509 *fmt = av_probe_input_format2(&pd, 1, &score);
511 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
512 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
514 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
520 return AVERROR_INVALIDDATA;
523 /* rewind. reuse probe buffer to avoid seeking */
524 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
530 /* open input file and probe the format if necessary */
531 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
534 AVProbeData pd = {filename, NULL, 0};
537 s->flags |= AVFMT_FLAG_CUSTOM_IO;
539 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
540 else if (s->iformat->flags & AVFMT_NOFILE)
541 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
542 "will be ignored with AVFMT_NOFILE format.\n");
546 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
547 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
550 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
551 &s->interrupt_callback, options)) < 0)
555 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
558 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
559 AVPacketList **plast_pktl){
560 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
565 (*plast_pktl)->next = pktl;
567 *packet_buffer = pktl;
569 /* add the packet in the buffered packet list */
575 static void queue_attached_pictures(AVFormatContext *s)
578 for (i = 0; i < s->nb_streams; i++)
579 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
580 s->streams[i]->discard < AVDISCARD_ALL) {
581 AVPacket copy = s->streams[i]->attached_pic;
582 copy.destruct = NULL;
583 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
587 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
589 AVFormatContext *s = *ps;
591 AVDictionary *tmp = NULL;
592 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
594 if (!s && !(s = avformat_alloc_context()))
595 return AVERROR(ENOMEM);
597 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
598 return AVERROR(EINVAL);
604 av_dict_copy(&tmp, *options, 0);
606 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
609 if ((ret = init_input(s, filename, &tmp)) < 0)
612 /* check filename in case an image number is expected */
613 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
614 if (!av_filename_number_test(filename)) {
615 ret = AVERROR(EINVAL);
620 s->duration = s->start_time = AV_NOPTS_VALUE;
621 av_strlcpy(s->filename, filename, sizeof(s->filename));
623 /* allocate private data */
624 if (s->iformat->priv_data_size > 0) {
625 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
626 ret = AVERROR(ENOMEM);
629 if (s->iformat->priv_class) {
630 *(const AVClass**)s->priv_data = s->iformat->priv_class;
631 av_opt_set_defaults(s->priv_data);
632 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
637 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
639 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
641 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
642 if ((ret = s->iformat->read_header(s)) < 0)
645 if (id3v2_extra_meta &&
646 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
648 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
650 queue_attached_pictures(s);
652 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
653 s->data_offset = avio_tell(s->pb);
655 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
658 av_dict_free(options);
665 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
667 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
669 avformat_free_context(s);
674 /*******************************************************/
676 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
682 AVPacketList *pktl = s->raw_packet_buffer;
686 if(s->streams[pkt->stream_index]->request_probe <= 0){
687 s->raw_packet_buffer = pktl->next;
688 s->raw_packet_buffer_remaining_size += pkt->size;
695 ret= s->iformat->read_packet(s, pkt);
697 if (!pktl || ret == AVERROR(EAGAIN))
699 for (i = 0; i < s->nb_streams; i++)
700 if(s->streams[i]->request_probe > 0)
701 s->streams[i]->request_probe = -1;
705 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
706 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
707 av_log(s, AV_LOG_WARNING,
708 "Dropped corrupted packet (stream = %d)\n",
714 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
715 av_packet_merge_side_data(pkt);
717 if(pkt->stream_index >= (unsigned)s->nb_streams){
718 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
722 st= s->streams[pkt->stream_index];
724 switch(st->codec->codec_type){
725 case AVMEDIA_TYPE_VIDEO:
726 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
728 case AVMEDIA_TYPE_AUDIO:
729 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
731 case AVMEDIA_TYPE_SUBTITLE:
732 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
736 if(!pktl && st->request_probe <= 0)
739 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
740 s->raw_packet_buffer_remaining_size -= pkt->size;
742 if(st->request_probe>0){
743 AVProbeData *pd = &st->probe_data;
745 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
748 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
749 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
750 pd->buf_size += pkt->size;
751 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
753 end= s->raw_packet_buffer_remaining_size <= 0
754 || st->probe_packets<=0;
756 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
757 int score= set_codec_from_probe_data(s, st, pd);
758 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
762 st->request_probe= -1;
763 if(st->codec->codec_id != CODEC_ID_NONE){
764 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
766 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
773 #if FF_API_READ_PACKET
774 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
776 return ff_read_packet(s, pkt);
781 /**********************************************************/
783 static int determinable_frame_size(AVCodecContext *avctx)
785 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
786 avctx->codec_id == CODEC_ID_MP1 ||
787 avctx->codec_id == CODEC_ID_MP2 ||
788 avctx->codec_id == CODEC_ID_MP3/* ||
789 avctx->codec_id == CODEC_ID_CELT*/)
795 * Get the number of samples of an audio frame. Return -1 on error.
797 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
801 /* give frame_size priority if demuxing */
802 if (!mux && enc->frame_size > 1)
803 return enc->frame_size;
805 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
808 /* fallback to using frame_size if muxing */
809 if (enc->frame_size > 1)
810 return enc->frame_size;
817 * Return the frame duration in seconds. Return 0 if not available.
819 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
820 AVCodecParserContext *pc, AVPacket *pkt)
826 switch(st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 if (st->r_frame_rate.num && !pc) {
829 *pnum = st->r_frame_rate.den;
830 *pden = st->r_frame_rate.num;
831 } else if(st->time_base.num*1000LL > st->time_base.den) {
832 *pnum = st->time_base.num;
833 *pden = st->time_base.den;
834 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
835 *pnum = st->codec->time_base.num;
836 *pden = st->codec->time_base.den;
837 if (pc && pc->repeat_pict) {
838 *pnum = (*pnum) * (1 + pc->repeat_pict);
840 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
841 //Thus if we have no parser in such case leave duration undefined.
842 if(st->codec->ticks_per_frame>1 && !pc){
847 case AVMEDIA_TYPE_AUDIO:
848 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
849 if (frame_size <= 0 || st->codec->sample_rate <= 0)
852 *pden = st->codec->sample_rate;
859 static int is_intra_only(AVCodecContext *enc){
860 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
862 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
863 switch(enc->codec_id){
865 case CODEC_ID_MJPEGB:
867 case CODEC_ID_PRORES:
868 case CODEC_ID_RAWVIDEO:
870 case CODEC_ID_DVVIDEO:
871 case CODEC_ID_HUFFYUV:
872 case CODEC_ID_FFVHUFF:
877 case CODEC_ID_JPEG2000:
879 case CODEC_ID_UTVIDEO:
887 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
891 if (pktl == s->parse_queue_end)
892 return s->packet_buffer;
896 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
897 int64_t dts, int64_t pts)
899 AVStream *st= s->streams[stream_index];
900 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
902 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
905 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
908 if (is_relative(pts))
909 pts += st->first_dts - RELATIVE_TS_BASE;
911 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
912 if(pktl->pkt.stream_index != stream_index)
914 if(is_relative(pktl->pkt.pts))
915 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
917 if(is_relative(pktl->pkt.dts))
918 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
920 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
921 st->start_time= pktl->pkt.pts;
923 if (st->start_time == AV_NOPTS_VALUE)
924 st->start_time = pts;
927 static void update_initial_durations(AVFormatContext *s, AVStream *st,
928 int stream_index, int duration)
930 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
931 int64_t cur_dts= RELATIVE_TS_BASE;
933 if(st->first_dts != AV_NOPTS_VALUE){
934 cur_dts= st->first_dts;
935 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
936 if(pktl->pkt.stream_index == stream_index){
937 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
942 if(pktl && pktl->pkt.dts != st->first_dts) {
943 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
947 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
950 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
951 st->first_dts = cur_dts;
952 }else if(st->cur_dts != RELATIVE_TS_BASE)
955 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
956 if(pktl->pkt.stream_index != stream_index)
958 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
959 && !pktl->pkt.duration){
960 pktl->pkt.dts= cur_dts;
961 if(!st->codec->has_b_frames)
962 pktl->pkt.pts= cur_dts;
963 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
964 pktl->pkt.duration = duration;
967 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
970 st->cur_dts= cur_dts;
973 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
974 AVCodecParserContext *pc, AVPacket *pkt)
976 int num, den, presentation_delayed, delay, i;
979 if (s->flags & AVFMT_FLAG_NOFILLIN)
982 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
983 pkt->dts= AV_NOPTS_VALUE;
985 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
986 //FIXME Set low_delay = 0 when has_b_frames = 1
987 st->codec->has_b_frames = 1;
989 /* do we have a video B-frame ? */
990 delay= st->codec->has_b_frames;
991 presentation_delayed = 0;
993 /* XXX: need has_b_frame, but cannot get it if the codec is
996 pc && pc->pict_type != AV_PICTURE_TYPE_B)
997 presentation_delayed = 1;
999 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1000 pkt->dts -= 1LL<<st->pts_wrap_bits;
1003 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1004 // we take the conservative approach and discard both
1005 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1006 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1007 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1008 pkt->dts= AV_NOPTS_VALUE;
1011 if (pkt->duration == 0) {
1012 compute_frame_duration(&num, &den, st, pc, pkt);
1014 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1017 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1018 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1020 /* correct timestamps with byte offset if demuxers only have timestamps
1021 on packet boundaries */
1022 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1023 /* this will estimate bitrate based on this frame's duration and size */
1024 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1025 if(pkt->pts != AV_NOPTS_VALUE)
1027 if(pkt->dts != AV_NOPTS_VALUE)
1031 if (pc && pc->dts_sync_point >= 0) {
1032 // we have synchronization info from the parser
1033 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1035 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1036 if (pkt->dts != AV_NOPTS_VALUE) {
1037 // got DTS from the stream, update reference timestamp
1038 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1039 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1040 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1041 // compute DTS based on reference timestamp
1042 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1043 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1045 if (pc->dts_sync_point > 0)
1046 st->reference_dts = pkt->dts; // new reference
1050 /* This may be redundant, but it should not hurt. */
1051 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1052 presentation_delayed = 1;
1054 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1055 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1056 /* interpolate PTS and DTS if they are not present */
1057 //We skip H264 currently because delay and has_b_frames are not reliably set
1058 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1059 if (presentation_delayed) {
1060 /* DTS = decompression timestamp */
1061 /* PTS = presentation timestamp */
1062 if (pkt->dts == AV_NOPTS_VALUE)
1063 pkt->dts = st->last_IP_pts;
1064 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1065 if (pkt->dts == AV_NOPTS_VALUE)
1066 pkt->dts = st->cur_dts;
1068 /* this is tricky: the dts must be incremented by the duration
1069 of the frame we are displaying, i.e. the last I- or P-frame */
1070 if (st->last_IP_duration == 0)
1071 st->last_IP_duration = pkt->duration;
1072 if(pkt->dts != AV_NOPTS_VALUE)
1073 st->cur_dts = pkt->dts + st->last_IP_duration;
1074 st->last_IP_duration = pkt->duration;
1075 st->last_IP_pts= pkt->pts;
1076 /* cannot compute PTS if not present (we can compute it only
1077 by knowing the future */
1078 } else if (pkt->pts != AV_NOPTS_VALUE ||
1079 pkt->dts != AV_NOPTS_VALUE ||
1081 int duration = pkt->duration;
1083 if(pkt->pts != AV_NOPTS_VALUE && duration){
1084 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1085 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1086 if( old_diff < new_diff && old_diff < (duration>>3)
1087 && (!strcmp(s->iformat->name, "mpeg") ||
1088 !strcmp(s->iformat->name, "mpegts"))){
1089 pkt->pts += duration;
1090 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1091 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1092 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1096 /* presentation is not delayed : PTS and DTS are the same */
1097 if (pkt->pts == AV_NOPTS_VALUE)
1098 pkt->pts = pkt->dts;
1099 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1101 if (pkt->pts == AV_NOPTS_VALUE)
1102 pkt->pts = st->cur_dts;
1103 pkt->dts = pkt->pts;
1104 if (pkt->pts != AV_NOPTS_VALUE)
1105 st->cur_dts = pkt->pts + duration;
1109 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1110 st->pts_buffer[0]= pkt->pts;
1111 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1112 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1113 if(pkt->dts == AV_NOPTS_VALUE)
1114 pkt->dts= st->pts_buffer[0];
1115 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1116 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1118 if(pkt->dts > st->cur_dts)
1119 st->cur_dts = pkt->dts;
1122 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1123 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1126 if(is_intra_only(st->codec))
1127 pkt->flags |= AV_PKT_FLAG_KEY;
1129 pkt->convergence_duration = pc->convergence_duration;
1132 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1135 AVPacketList *pktl = *pkt_buf;
1136 *pkt_buf = pktl->next;
1137 av_free_packet(&pktl->pkt);
1140 *pkt_buf_end = NULL;
1144 * Parse a packet, add all split parts to parse_queue
1146 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1148 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1150 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1151 AVStream *st = s->streams[stream_index];
1152 uint8_t *data = pkt ? pkt->data : NULL;
1153 int size = pkt ? pkt->size : 0;
1154 int ret = 0, got_output = 0;
1157 av_init_packet(&flush_pkt);
1160 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1161 // preserve 0-size sync packets
1162 compute_pkt_fields(s, st, st->parser, pkt);
1165 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1168 av_init_packet(&out_pkt);
1169 len = av_parser_parse2(st->parser, st->codec,
1170 &out_pkt.data, &out_pkt.size, data, size,
1171 pkt->pts, pkt->dts, pkt->pos);
1173 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1174 /* increment read pointer */
1178 got_output = !!out_pkt.size;
1183 /* set the duration */
1184 out_pkt.duration = 0;
1185 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1186 if (st->codec->sample_rate > 0) {
1187 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1188 (AVRational){ 1, st->codec->sample_rate },
1192 } else if (st->codec->time_base.num != 0 &&
1193 st->codec->time_base.den != 0) {
1194 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1195 st->codec->time_base,
1200 out_pkt.stream_index = st->index;
1201 out_pkt.pts = st->parser->pts;
1202 out_pkt.dts = st->parser->dts;
1203 out_pkt.pos = st->parser->pos;
1205 if (st->parser->key_frame == 1 ||
1206 (st->parser->key_frame == -1 &&
1207 st->parser->pict_type == AV_PICTURE_TYPE_I))
1208 out_pkt.flags |= AV_PKT_FLAG_KEY;
1210 compute_pkt_fields(s, st, st->parser, &out_pkt);
1212 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1213 out_pkt.flags & AV_PKT_FLAG_KEY) {
1214 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1215 ff_reduce_index(s, st->index);
1216 av_add_index_entry(st, pos, out_pkt.dts,
1217 0, 0, AVINDEX_KEYFRAME);
1220 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1221 out_pkt.destruct = pkt->destruct;
1222 pkt->destruct = NULL;
1224 if ((ret = av_dup_packet(&out_pkt)) < 0)
1227 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1228 av_free_packet(&out_pkt);
1229 ret = AVERROR(ENOMEM);
1235 /* end of the stream => close and free the parser */
1236 if (pkt == &flush_pkt) {
1237 av_parser_close(st->parser);
1242 av_free_packet(pkt);
1246 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1247 AVPacketList **pkt_buffer_end,
1251 av_assert0(*pkt_buffer);
1254 *pkt_buffer = pktl->next;
1256 *pkt_buffer_end = NULL;
1261 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1263 int ret = 0, i, got_packet = 0;
1265 av_init_packet(pkt);
1267 while (!got_packet && !s->parse_queue) {
1271 /* read next packet */
1272 ret = ff_read_packet(s, &cur_pkt);
1274 if (ret == AVERROR(EAGAIN))
1276 /* flush the parsers */
1277 for(i = 0; i < s->nb_streams; i++) {
1279 if (st->parser && st->need_parsing)
1280 parse_packet(s, NULL, st->index);
1282 /* all remaining packets are now in parse_queue =>
1283 * really terminate parsing */
1287 st = s->streams[cur_pkt.stream_index];
1289 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1290 cur_pkt.dts != AV_NOPTS_VALUE &&
1291 cur_pkt.pts < cur_pkt.dts) {
1292 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1293 cur_pkt.stream_index,
1294 av_ts2str(cur_pkt.pts),
1295 av_ts2str(cur_pkt.dts),
1298 if (s->debug & FF_FDEBUG_TS)
1299 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1300 cur_pkt.stream_index,
1301 av_ts2str(cur_pkt.pts),
1302 av_ts2str(cur_pkt.dts),
1307 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1308 st->parser = av_parser_init(st->codec->codec_id);
1310 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1311 "%s, packets or times may be invalid.\n",
1312 avcodec_get_name(st->codec->codec_id));
1313 /* no parser available: just output the raw packets */
1314 st->need_parsing = AVSTREAM_PARSE_NONE;
1315 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1316 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1317 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1318 st->parser->flags |= PARSER_FLAG_ONCE;
1322 if (!st->need_parsing || !st->parser) {
1323 /* no parsing needed: we just output the packet as is */
1325 compute_pkt_fields(s, st, NULL, pkt);
1326 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1327 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1328 ff_reduce_index(s, st->index);
1329 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1332 } else if (st->discard < AVDISCARD_ALL) {
1333 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1337 av_free_packet(&cur_pkt);
1339 if (pkt->flags & AV_PKT_FLAG_KEY)
1340 st->skip_to_keyframe = 0;
1341 if (st->skip_to_keyframe) {
1342 av_free_packet(&cur_pkt);
1347 if (!got_packet && s->parse_queue)
1348 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1350 if(s->debug & FF_FDEBUG_TS)
1351 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1353 av_ts2str(pkt->pts),
1354 av_ts2str(pkt->dts),
1362 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1364 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1369 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1370 &s->packet_buffer_end,
1372 read_frame_internal(s, pkt);
1377 AVPacketList *pktl = s->packet_buffer;
1380 AVPacket *next_pkt = &pktl->pkt;
1382 if (next_pkt->dts != AV_NOPTS_VALUE) {
1383 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1384 // last dts seen for this stream. if any of packets following
1385 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1386 int64_t last_dts = next_pkt->dts;
1387 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1388 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1389 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1390 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1391 next_pkt->pts = pktl->pkt.dts;
1393 if (last_dts != AV_NOPTS_VALUE) {
1394 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1395 last_dts = pktl->pkt.dts;
1400 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1401 // Fixing the last reference frame had none pts issue (For MXF etc).
1402 // We only do this when
1404 // 2. we are not able to resolve a pts value for current packet.
1405 // 3. the packets for this stream at the end of the files had valid dts.
1406 next_pkt->pts = last_dts + next_pkt->duration;
1408 pktl = s->packet_buffer;
1411 /* read packet from packet buffer, if there is data */
1412 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1413 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1414 ret = read_from_packet_buffer(&s->packet_buffer,
1415 &s->packet_buffer_end, pkt);
1420 ret = read_frame_internal(s, pkt);
1422 if (pktl && ret != AVERROR(EAGAIN)) {
1429 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1430 &s->packet_buffer_end)) < 0)
1431 return AVERROR(ENOMEM);
1435 if (is_relative(pkt->dts))
1436 pkt->dts -= RELATIVE_TS_BASE;
1437 if (is_relative(pkt->pts))
1438 pkt->pts -= RELATIVE_TS_BASE;
1442 /* XXX: suppress the packet queue */
1443 static void flush_packet_queue(AVFormatContext *s)
1445 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1446 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1447 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1449 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1452 /*******************************************************/
1455 int av_find_default_stream_index(AVFormatContext *s)
1457 int first_audio_index = -1;
1461 if (s->nb_streams <= 0)
1463 for(i = 0; i < s->nb_streams; i++) {
1465 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1466 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1469 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1470 first_audio_index = i;
1472 return first_audio_index >= 0 ? first_audio_index : 0;
1476 * Flush the frame reader.
1478 void ff_read_frame_flush(AVFormatContext *s)
1483 flush_packet_queue(s);
1485 /* for each stream, reset read state */
1486 for(i = 0; i < s->nb_streams; i++) {
1490 av_parser_close(st->parser);
1493 st->last_IP_pts = AV_NOPTS_VALUE;
1494 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1495 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1496 st->reference_dts = AV_NOPTS_VALUE;
1498 st->probe_packets = MAX_PROBE_PACKETS;
1500 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1501 st->pts_buffer[j]= AV_NOPTS_VALUE;
1505 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1509 for(i = 0; i < s->nb_streams; i++) {
1510 AVStream *st = s->streams[i];
1512 st->cur_dts = av_rescale(timestamp,
1513 st->time_base.den * (int64_t)ref_st->time_base.num,
1514 st->time_base.num * (int64_t)ref_st->time_base.den);
1518 void ff_reduce_index(AVFormatContext *s, int stream_index)
1520 AVStream *st= s->streams[stream_index];
1521 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1523 if((unsigned)st->nb_index_entries >= max_entries){
1525 for(i=0; 2*i<st->nb_index_entries; i++)
1526 st->index_entries[i]= st->index_entries[2*i];
1527 st->nb_index_entries= i;
1531 int ff_add_index_entry(AVIndexEntry **index_entries,
1532 int *nb_index_entries,
1533 unsigned int *index_entries_allocated_size,
1534 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1536 AVIndexEntry *entries, *ie;
1539 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1542 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1543 timestamp -= RELATIVE_TS_BASE;
1545 entries = av_fast_realloc(*index_entries,
1546 index_entries_allocated_size,
1547 (*nb_index_entries + 1) *
1548 sizeof(AVIndexEntry));
1552 *index_entries= entries;
1554 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1557 index= (*nb_index_entries)++;
1558 ie= &entries[index];
1559 assert(index==0 || ie[-1].timestamp < timestamp);
1561 ie= &entries[index];
1562 if(ie->timestamp != timestamp){
1563 if(ie->timestamp <= timestamp)
1565 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1566 (*nb_index_entries)++;
1567 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1568 distance= ie->min_distance;
1572 ie->timestamp = timestamp;
1573 ie->min_distance= distance;
1580 int av_add_index_entry(AVStream *st,
1581 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1583 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1584 &st->index_entries_allocated_size, pos,
1585 timestamp, size, distance, flags);
1588 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1589 int64_t wanted_timestamp, int flags)
1597 //optimize appending index entries at the end
1598 if(b && entries[b-1].timestamp < wanted_timestamp)
1603 timestamp = entries[m].timestamp;
1604 if(timestamp >= wanted_timestamp)
1606 if(timestamp <= wanted_timestamp)
1609 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1611 if(!(flags & AVSEEK_FLAG_ANY)){
1612 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1613 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1622 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1625 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1626 wanted_timestamp, flags);
1629 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1631 AVInputFormat *avif= s->iformat;
1632 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1633 int64_t ts_min, ts_max, ts;
1638 if (stream_index < 0)
1641 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1644 ts_min= AV_NOPTS_VALUE;
1645 pos_limit= -1; //gcc falsely says it may be uninitialized
1647 st= s->streams[stream_index];
1648 if(st->index_entries){
1651 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1652 index= FFMAX(index, 0);
1653 e= &st->index_entries[index];
1655 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1657 ts_min= e->timestamp;
1658 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1659 pos_min, av_ts2str(ts_min));
1664 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1665 assert(index < st->nb_index_entries);
1667 e= &st->index_entries[index];
1668 assert(e->timestamp >= target_ts);
1670 ts_max= e->timestamp;
1671 pos_limit= pos_max - e->min_distance;
1672 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1673 pos_max, pos_limit, av_ts2str(ts_max));
1677 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1682 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1685 ff_read_frame_flush(s);
1686 ff_update_cur_dts(s, st, ts);
1691 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1692 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1693 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1694 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1697 int64_t start_pos, filesize;
1700 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1702 if(ts_min == AV_NOPTS_VALUE){
1703 pos_min = s->data_offset;
1704 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1705 if (ts_min == AV_NOPTS_VALUE)
1709 if(ts_min >= target_ts){
1714 if(ts_max == AV_NOPTS_VALUE){
1716 filesize = avio_size(s->pb);
1717 pos_max = filesize - 1;
1720 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1722 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1723 if (ts_max == AV_NOPTS_VALUE)
1727 int64_t tmp_pos= pos_max + 1;
1728 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1729 if(tmp_ts == AV_NOPTS_VALUE)
1733 if(tmp_pos >= filesize)
1739 if(ts_max <= target_ts){
1744 if(ts_min > ts_max){
1746 }else if(ts_min == ts_max){
1751 while (pos_min < pos_limit) {
1752 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1753 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1754 assert(pos_limit <= pos_max);
1757 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1758 // interpolate position (better than dichotomy)
1759 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1760 + pos_min - approximate_keyframe_distance;
1761 }else if(no_change==1){
1762 // bisection, if interpolation failed to change min or max pos last time
1763 pos = (pos_min + pos_limit)>>1;
1765 /* linear search if bisection failed, can only happen if there
1766 are very few or no keyframes between min/max */
1771 else if(pos > pos_limit)
1775 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1780 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1781 pos_min, pos, pos_max,
1782 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1783 pos_limit, start_pos, no_change);
1784 if(ts == AV_NOPTS_VALUE){
1785 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1788 assert(ts != AV_NOPTS_VALUE);
1789 if (target_ts <= ts) {
1790 pos_limit = start_pos - 1;
1794 if (target_ts >= ts) {
1800 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1801 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1804 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1806 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1807 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1808 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1814 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1815 int64_t pos_min, pos_max;
1817 pos_min = s->data_offset;
1818 pos_max = avio_size(s->pb) - 1;
1820 if (pos < pos_min) pos= pos_min;
1821 else if(pos > pos_max) pos= pos_max;
1823 avio_seek(s->pb, pos, SEEK_SET);
1828 static int seek_frame_generic(AVFormatContext *s,
1829 int stream_index, int64_t timestamp, int flags)
1836 st = s->streams[stream_index];
1838 index = av_index_search_timestamp(st, timestamp, flags);
1840 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1843 if(index < 0 || index==st->nb_index_entries-1){
1847 if(st->nb_index_entries){
1848 assert(st->index_entries);
1849 ie= &st->index_entries[st->nb_index_entries-1];
1850 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1852 ff_update_cur_dts(s, st, ie->timestamp);
1854 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1860 read_status = av_read_frame(s, &pkt);
1861 } while (read_status == AVERROR(EAGAIN));
1862 if (read_status < 0)
1864 av_free_packet(&pkt);
1865 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1866 if(pkt.flags & AV_PKT_FLAG_KEY)
1868 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1869 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1874 index = av_index_search_timestamp(st, timestamp, flags);
1879 ff_read_frame_flush(s);
1880 AV_NOWARN_DEPRECATED(
1881 if (s->iformat->read_seek){
1882 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1886 ie = &st->index_entries[index];
1887 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1889 ff_update_cur_dts(s, st, ie->timestamp);
1894 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1895 int64_t timestamp, int flags)
1900 if (flags & AVSEEK_FLAG_BYTE) {
1901 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1903 ff_read_frame_flush(s);
1904 return seek_frame_byte(s, stream_index, timestamp, flags);
1907 if(stream_index < 0){
1908 stream_index= av_find_default_stream_index(s);
1909 if(stream_index < 0)
1912 st= s->streams[stream_index];
1913 /* timestamp for default must be expressed in AV_TIME_BASE units */
1914 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1917 /* first, we try the format specific seek */
1918 AV_NOWARN_DEPRECATED(
1919 if (s->iformat->read_seek) {
1920 ff_read_frame_flush(s);
1921 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1929 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1930 ff_read_frame_flush(s);
1931 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1932 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1933 ff_read_frame_flush(s);
1934 return seek_frame_generic(s, stream_index, timestamp, flags);
1940 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1942 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1945 queue_attached_pictures(s);
1950 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1952 if(min_ts > ts || max_ts < ts)
1955 if (s->iformat->read_seek2) {
1957 ff_read_frame_flush(s);
1958 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1961 queue_attached_pictures(s);
1965 if(s->iformat->read_timestamp){
1966 //try to seek via read_timestamp()
1969 //Fallback to old API if new is not implemented but old is
1970 //Note the old has somewat different sematics
1971 AV_NOWARN_DEPRECATED(
1972 if (s->iformat->read_seek || 1) {
1973 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1974 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1975 if (ret<0 && ts != min_ts && max_ts != ts) {
1976 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1978 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
1984 // try some generic seek like seek_frame_generic() but with new ts semantics
1987 /*******************************************************/
1990 * Return TRUE if the stream has accurate duration in any stream.
1992 * @return TRUE if the stream has accurate duration for at least one component.
1994 static int has_duration(AVFormatContext *ic)
1999 for(i = 0;i < ic->nb_streams; i++) {
2000 st = ic->streams[i];
2001 if (st->duration != AV_NOPTS_VALUE)
2004 if (ic->duration != AV_NOPTS_VALUE)
2010 * Estimate the stream timings from the one of each components.
2012 * Also computes the global bitrate if possible.
2014 static void update_stream_timings(AVFormatContext *ic)
2016 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2017 int64_t duration, duration1, filesize;
2021 start_time = INT64_MAX;
2022 start_time_text = INT64_MAX;
2023 end_time = INT64_MIN;
2024 duration = INT64_MIN;
2025 for(i = 0;i < ic->nb_streams; i++) {
2026 st = ic->streams[i];
2027 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2028 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2029 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2030 if (start_time1 < start_time_text)
2031 start_time_text = start_time1;
2033 start_time = FFMIN(start_time, start_time1);
2034 if (st->duration != AV_NOPTS_VALUE) {
2035 end_time1 = start_time1
2036 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2037 end_time = FFMAX(end_time, end_time1);
2040 if (st->duration != AV_NOPTS_VALUE) {
2041 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2042 duration = FFMAX(duration, duration1);
2045 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2046 start_time = start_time_text;
2047 if (start_time != INT64_MAX) {
2048 ic->start_time = start_time;
2049 if (end_time != INT64_MIN)
2050 duration = FFMAX(duration, end_time - start_time);
2052 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2053 ic->duration = duration;
2055 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2056 /* compute the bitrate */
2057 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2058 (double)ic->duration;
2062 static void fill_all_stream_timings(AVFormatContext *ic)
2067 update_stream_timings(ic);
2068 for(i = 0;i < ic->nb_streams; i++) {
2069 st = ic->streams[i];
2070 if (st->start_time == AV_NOPTS_VALUE) {
2071 if(ic->start_time != AV_NOPTS_VALUE)
2072 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2073 if(ic->duration != AV_NOPTS_VALUE)
2074 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2079 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2081 int64_t filesize, duration;
2085 /* if bit_rate is already set, we believe it */
2086 if (ic->bit_rate <= 0) {
2088 for(i=0;i<ic->nb_streams;i++) {
2089 st = ic->streams[i];
2090 if (st->codec->bit_rate > 0)
2091 bit_rate += st->codec->bit_rate;
2093 ic->bit_rate = bit_rate;
2096 /* if duration is already set, we believe it */
2097 if (ic->duration == AV_NOPTS_VALUE &&
2098 ic->bit_rate != 0) {
2099 filesize = ic->pb ? avio_size(ic->pb) : 0;
2101 for(i = 0; i < ic->nb_streams; i++) {
2102 st = ic->streams[i];
2103 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2104 if (st->duration == AV_NOPTS_VALUE)
2105 st->duration = duration;
2111 #define DURATION_MAX_READ_SIZE 250000
2112 #define DURATION_MAX_RETRY 3
2114 /* only usable for MPEG-PS streams */
2115 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2117 AVPacket pkt1, *pkt = &pkt1;
2119 int read_size, i, ret;
2121 int64_t filesize, offset, duration;
2124 /* flush packet queue */
2125 flush_packet_queue(ic);
2127 for (i=0; i<ic->nb_streams; i++) {
2128 st = ic->streams[i];
2129 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2130 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2133 av_parser_close(st->parser);
2138 /* estimate the end time (duration) */
2139 /* XXX: may need to support wrapping */
2140 filesize = ic->pb ? avio_size(ic->pb) : 0;
2141 end_time = AV_NOPTS_VALUE;
2143 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2147 avio_seek(ic->pb, offset, SEEK_SET);
2150 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2154 ret = ff_read_packet(ic, pkt);
2155 } while(ret == AVERROR(EAGAIN));
2158 read_size += pkt->size;
2159 st = ic->streams[pkt->stream_index];
2160 if (pkt->pts != AV_NOPTS_VALUE &&
2161 (st->start_time != AV_NOPTS_VALUE ||
2162 st->first_dts != AV_NOPTS_VALUE)) {
2163 duration = end_time = pkt->pts;
2164 if (st->start_time != AV_NOPTS_VALUE)
2165 duration -= st->start_time;
2167 duration -= st->first_dts;
2169 duration += 1LL<<st->pts_wrap_bits;
2171 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2172 st->duration = duration;
2175 av_free_packet(pkt);
2177 }while( end_time==AV_NOPTS_VALUE
2178 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2179 && ++retry <= DURATION_MAX_RETRY);
2181 fill_all_stream_timings(ic);
2183 avio_seek(ic->pb, old_offset, SEEK_SET);
2184 for (i=0; i<ic->nb_streams; i++) {
2186 st->cur_dts= st->first_dts;
2187 st->last_IP_pts = AV_NOPTS_VALUE;
2188 st->reference_dts = AV_NOPTS_VALUE;
2192 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2196 /* get the file size, if possible */
2197 if (ic->iformat->flags & AVFMT_NOFILE) {
2200 file_size = avio_size(ic->pb);
2201 file_size = FFMAX(0, file_size);
2204 if ((!strcmp(ic->iformat->name, "mpeg") ||
2205 !strcmp(ic->iformat->name, "mpegts")) &&
2206 file_size && ic->pb->seekable) {
2207 /* get accurate estimate from the PTSes */
2208 estimate_timings_from_pts(ic, old_offset);
2209 } else if (has_duration(ic)) {
2210 /* at least one component has timings - we use them for all
2212 fill_all_stream_timings(ic);
2214 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2215 /* less precise: use bitrate info */
2216 estimate_timings_from_bit_rate(ic);
2218 update_stream_timings(ic);
2222 AVStream av_unused *st;
2223 for(i = 0;i < ic->nb_streams; i++) {
2224 st = ic->streams[i];
2225 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2226 (double) st->start_time / AV_TIME_BASE,
2227 (double) st->duration / AV_TIME_BASE);
2229 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2230 (double) ic->start_time / AV_TIME_BASE,
2231 (double) ic->duration / AV_TIME_BASE,
2232 ic->bit_rate / 1000);
2236 static int has_codec_parameters(AVStream *st)
2238 AVCodecContext *avctx = st->codec;
2240 switch (avctx->codec_type) {
2241 case AVMEDIA_TYPE_AUDIO:
2242 val = avctx->sample_rate && avctx->channels;
2243 if (!avctx->frame_size && determinable_frame_size(avctx))
2245 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2248 case AVMEDIA_TYPE_VIDEO:
2250 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2253 case AVMEDIA_TYPE_DATA:
2254 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2259 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2262 static int has_decode_delay_been_guessed(AVStream *st)
2264 return st->codec->codec_id != CODEC_ID_H264 ||
2265 st->info->nb_decoded_frames >= 6;
2268 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2269 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2272 int got_picture = 1, ret = 0;
2274 AVPacket pkt = *avpkt;
2276 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2277 AVDictionary *thread_opt = NULL;
2279 codec = st->codec->codec ? st->codec->codec :
2280 avcodec_find_decoder(st->codec->codec_id);
2283 st->info->found_decoder = -1;
2287 /* force thread count to 1 since the h264 decoder will not extract SPS
2288 * and PPS to extradata during multi-threaded decoding */
2289 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2290 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2292 av_dict_free(&thread_opt);
2294 st->info->found_decoder = -1;
2297 st->info->found_decoder = 1;
2298 } else if (!st->info->found_decoder)
2299 st->info->found_decoder = 1;
2301 if (st->info->found_decoder < 0)
2304 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2306 (!has_codec_parameters(st) ||
2307 !has_decode_delay_been_guessed(st) ||
2308 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2310 avcodec_get_frame_defaults(&picture);
2311 switch(st->codec->codec_type) {
2312 case AVMEDIA_TYPE_VIDEO:
2313 ret = avcodec_decode_video2(st->codec, &picture,
2314 &got_picture, &pkt);
2316 case AVMEDIA_TYPE_AUDIO:
2317 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2324 st->info->nb_decoded_frames++;
2330 if(!pkt.data && !got_picture)
2335 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2337 while (tags->id != CODEC_ID_NONE) {
2345 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2348 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2349 if(tag == tags[i].tag)
2352 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2353 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2356 return CODEC_ID_NONE;
2359 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2362 for(i=0; tags && tags[i]; i++){
2363 int tag= ff_codec_get_tag(tags[i], id);
2369 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2372 for(i=0; tags && tags[i]; i++){
2373 enum CodecID id= ff_codec_get_id(tags[i], tag);
2374 if(id!=CODEC_ID_NONE) return id;
2376 return CODEC_ID_NONE;
2379 static void compute_chapters_end(AVFormatContext *s)
2382 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2384 for (i = 0; i < s->nb_chapters; i++)
2385 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2386 AVChapter *ch = s->chapters[i];
2387 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2390 for (j = 0; j < s->nb_chapters; j++) {
2391 AVChapter *ch1 = s->chapters[j];
2392 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2393 if (j != i && next_start > ch->start && next_start < end)
2396 ch->end = (end == INT64_MAX) ? ch->start : end;
2400 static int get_std_framerate(int i){
2401 if(i<60*12) return i*1001;
2402 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2406 * Is the time base unreliable.
2407 * This is a heuristic to balance between quick acceptance of the values in
2408 * the headers vs. some extra checks.
2409 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2410 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2411 * And there are "variable" fps files this needs to detect as well.
2413 static int tb_unreliable(AVCodecContext *c){
2414 if( c->time_base.den >= 101L*c->time_base.num
2415 || c->time_base.den < 5L*c->time_base.num
2416 /* || c->codec_tag == AV_RL32("DIVX")
2417 || c->codec_tag == AV_RL32("XVID")*/
2418 || c->codec_id == CODEC_ID_MPEG2VIDEO
2419 || c->codec_id == CODEC_ID_H264
2425 #if FF_API_FORMAT_PARAMETERS
2426 int av_find_stream_info(AVFormatContext *ic)
2428 return avformat_find_stream_info(ic, NULL);
2432 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2434 int i, count, ret, read_size, j;
2436 AVPacket pkt1, *pkt;
2437 int64_t old_offset = avio_tell(ic->pb);
2438 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2439 int flush_codecs = 1;
2442 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2444 for(i=0;i<ic->nb_streams;i++) {
2446 AVDictionary *thread_opt = NULL;
2447 st = ic->streams[i];
2449 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2450 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2451 /* if(!st->time_base.num)
2453 if(!st->codec->time_base.num)
2454 st->codec->time_base= st->time_base;
2456 //only for the split stuff
2457 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2458 st->parser = av_parser_init(st->codec->codec_id);
2459 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2460 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2463 codec = st->codec->codec ? st->codec->codec :
2464 avcodec_find_decoder(st->codec->codec_id);
2466 /* force thread count to 1 since the h264 decoder will not extract SPS
2467 * and PPS to extradata during multi-threaded decoding */
2468 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2470 /* Ensure that subtitle_header is properly set. */
2471 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2472 && codec && !st->codec->codec)
2473 avcodec_open2(st->codec, codec, options ? &options[i]
2476 //try to just open decoders, in case this is enough to get parameters
2477 if (!has_codec_parameters(st)) {
2478 if (codec && !st->codec->codec)
2479 avcodec_open2(st->codec, codec, options ? &options[i]
2483 av_dict_free(&thread_opt);
2486 for (i=0; i<ic->nb_streams; i++) {
2487 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2493 if (ff_check_interrupt(&ic->interrupt_callback)){
2495 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2499 /* check if one codec still needs to be handled */
2500 for(i=0;i<ic->nb_streams;i++) {
2501 int fps_analyze_framecount = 20;
2503 st = ic->streams[i];
2504 if (!has_codec_parameters(st))
2506 /* if the timebase is coarse (like the usual millisecond precision
2507 of mkv), we need to analyze more frames to reliably arrive at
2509 if (av_q2d(st->time_base) > 0.0005)
2510 fps_analyze_framecount *= 2;
2511 if (ic->fps_probe_size >= 0)
2512 fps_analyze_framecount = ic->fps_probe_size;
2513 /* variable fps and no guess at the real fps */
2514 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2515 && st->info->duration_count < fps_analyze_framecount
2516 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2518 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2520 if (st->first_dts == AV_NOPTS_VALUE &&
2521 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2522 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2525 if (i == ic->nb_streams) {
2526 /* NOTE: if the format has no header, then we need to read
2527 some packets to get most of the streams, so we cannot
2529 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2530 /* if we found the info for all the codecs, we can stop */
2532 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2537 /* we did not get all the codec info, but we read too much data */
2538 if (read_size >= ic->probesize) {
2540 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2541 for (i = 0; i < ic->nb_streams; i++)
2542 if (!ic->streams[i]->r_frame_rate.num &&
2543 ic->streams[i]->info->duration_count <= 1)
2544 av_log(ic, AV_LOG_WARNING,
2545 "Stream #%d: not enough frames to estimate rate; "
2546 "consider increasing probesize\n", i);
2550 /* NOTE: a new stream can be added there if no header in file
2551 (AVFMTCTX_NOHEADER) */
2552 ret = read_frame_internal(ic, &pkt1);
2553 if (ret == AVERROR(EAGAIN))
2561 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2562 if ((ret = av_dup_packet(pkt)) < 0)
2563 goto find_stream_info_err;
2565 read_size += pkt->size;
2567 st = ic->streams[pkt->stream_index];
2568 if (st->codec_info_nb_frames>1) {
2570 if (st->time_base.den > 0)
2571 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2572 if (st->avg_frame_rate.num > 0)
2573 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2575 if (t >= ic->max_analyze_duration) {
2576 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2579 st->info->codec_info_duration += pkt->duration;
2582 int64_t last = st->info->last_dts;
2584 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2585 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2586 int64_t duration= pkt->dts - last;
2588 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2589 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2590 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2591 int framerate= get_std_framerate(i);
2592 double sdts= dts*framerate/(1001*12);
2594 int ticks= lrintf(sdts+j*0.5);
2595 double error= sdts - ticks + j*0.5;
2596 st->info->duration_error[j][0][i] += error;
2597 st->info->duration_error[j][1][i] += error*error;
2600 st->info->duration_count++;
2601 // ignore the first 4 values, they might have some random jitter
2602 if (st->info->duration_count > 3)
2603 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2605 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2606 st->info->last_dts = pkt->dts;
2608 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2609 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2610 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2611 st->codec->extradata_size= i;
2612 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2613 if (!st->codec->extradata)
2614 return AVERROR(ENOMEM);
2615 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2616 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2620 /* if still no information, we try to open the codec and to
2621 decompress the frame. We try to avoid that in most cases as
2622 it takes longer and uses more memory. For MPEG-4, we need to
2623 decompress for QuickTime.
2625 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2626 least one frame of codec data, this makes sure the codec initializes
2627 the channel configuration and does not only trust the values from the container.
2629 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2631 st->codec_info_nb_frames++;
2636 AVPacket empty_pkt = { 0 };
2638 av_init_packet(&empty_pkt);
2640 ret = -1; /* we could not have all the codec parameters before EOF */
2641 for(i=0;i<ic->nb_streams;i++) {
2642 st = ic->streams[i];
2644 /* flush the decoders */
2645 if (st->info->found_decoder == 1) {
2647 err = try_decode_frame(st, &empty_pkt,
2648 (options && i < orig_nb_streams) ?
2649 &options[i] : NULL);
2650 } while (err > 0 && !has_codec_parameters(st));
2653 av_log(ic, AV_LOG_INFO,
2654 "decoding for stream %d failed\n", st->index);
2658 if (!has_codec_parameters(st)){
2660 avcodec_string(buf, sizeof(buf), st->codec, 0);
2661 av_log(ic, AV_LOG_WARNING,
2662 "Could not find codec parameters (%s)\n", buf);
2669 // close codecs which were opened in try_decode_frame()
2670 for(i=0;i<ic->nb_streams;i++) {
2671 st = ic->streams[i];
2672 avcodec_close(st->codec);
2674 for(i=0;i<ic->nb_streams;i++) {
2675 st = ic->streams[i];
2676 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2677 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2678 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2679 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2680 st->codec->codec_tag= tag;
2683 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2684 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2685 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2686 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2687 // the check for tb_unreliable() is not completely correct, since this is not about handling
2688 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2689 // ipmovie.c produces.
2690 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2691 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2692 if (st->info->duration_count && !st->r_frame_rate.num
2693 && tb_unreliable(st->codec) /*&&
2694 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2695 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2697 double best_error= 0.01;
2699 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2702 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2704 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2707 int n= st->info->duration_count;
2708 double a= st->info->duration_error[k][0][j] / n;
2709 double error= st->info->duration_error[k][1][j]/n - a*a;
2711 if(error < best_error && best_error> 0.000000001){
2713 num = get_std_framerate(j);
2716 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2719 // do not increase frame rate by more than 1 % in order to match a standard rate.
2720 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2721 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2724 if (!st->r_frame_rate.num){
2725 if( st->codec->time_base.den * (int64_t)st->time_base.num
2726 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2727 st->r_frame_rate.num = st->codec->time_base.den;
2728 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2730 st->r_frame_rate.num = st->time_base.den;
2731 st->r_frame_rate.den = st->time_base.num;
2734 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2735 if(!st->codec->bits_per_coded_sample)
2736 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2737 // set stream disposition based on audio service type
2738 switch (st->codec->audio_service_type) {
2739 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2740 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2741 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2742 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2743 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2744 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2745 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2746 st->disposition = AV_DISPOSITION_COMMENT; break;
2747 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2748 st->disposition = AV_DISPOSITION_KARAOKE; break;
2753 estimate_timings(ic, old_offset);
2755 compute_chapters_end(ic);
2757 find_stream_info_err:
2758 for (i=0; i < ic->nb_streams; i++) {
2759 if (ic->streams[i]->codec)
2760 ic->streams[i]->codec->thread_count = 0;
2761 av_freep(&ic->streams[i]->info);
2764 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2768 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2772 for (i = 0; i < ic->nb_programs; i++) {
2773 if (ic->programs[i] == last) {
2777 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2778 if (ic->programs[i]->stream_index[j] == s)
2779 return ic->programs[i];
2785 int av_find_best_stream(AVFormatContext *ic,
2786 enum AVMediaType type,
2787 int wanted_stream_nb,
2789 AVCodec **decoder_ret,
2792 int i, nb_streams = ic->nb_streams;
2793 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2794 unsigned *program = NULL;
2795 AVCodec *decoder = NULL, *best_decoder = NULL;
2797 if (related_stream >= 0 && wanted_stream_nb < 0) {
2798 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2800 program = p->stream_index;
2801 nb_streams = p->nb_stream_indexes;
2804 for (i = 0; i < nb_streams; i++) {
2805 int real_stream_index = program ? program[i] : i;
2806 AVStream *st = ic->streams[real_stream_index];
2807 AVCodecContext *avctx = st->codec;
2808 if (avctx->codec_type != type)
2810 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2812 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2815 decoder = avcodec_find_decoder(st->codec->codec_id);
2818 ret = AVERROR_DECODER_NOT_FOUND;
2822 if (best_count >= st->codec_info_nb_frames)
2824 best_count = st->codec_info_nb_frames;
2825 ret = real_stream_index;
2826 best_decoder = decoder;
2827 if (program && i == nb_streams - 1 && ret < 0) {
2829 nb_streams = ic->nb_streams;
2830 i = 0; /* no related stream found, try again with everything */
2834 *decoder_ret = best_decoder;
2838 /*******************************************************/
2840 int av_read_play(AVFormatContext *s)
2842 if (s->iformat->read_play)
2843 return s->iformat->read_play(s);
2845 return avio_pause(s->pb, 0);
2846 return AVERROR(ENOSYS);
2849 int av_read_pause(AVFormatContext *s)
2851 if (s->iformat->read_pause)
2852 return s->iformat->read_pause(s);
2854 return avio_pause(s->pb, 1);
2855 return AVERROR(ENOSYS);
2858 void avformat_free_context(AVFormatContext *s)
2864 if (s->iformat && s->iformat->priv_class && s->priv_data)
2865 av_opt_free(s->priv_data);
2867 for(i=0;i<s->nb_streams;i++) {
2868 /* free all data in a stream component */
2871 av_parser_close(st->parser);
2873 if (st->attached_pic.data)
2874 av_free_packet(&st->attached_pic);
2875 av_dict_free(&st->metadata);
2876 av_freep(&st->index_entries);
2877 av_freep(&st->codec->extradata);
2878 av_freep(&st->codec->subtitle_header);
2879 av_freep(&st->codec);
2880 av_freep(&st->priv_data);
2881 av_freep(&st->info);
2884 for(i=s->nb_programs-1; i>=0; i--) {
2885 av_dict_free(&s->programs[i]->metadata);
2886 av_freep(&s->programs[i]->stream_index);
2887 av_freep(&s->programs[i]);
2889 av_freep(&s->programs);
2890 av_freep(&s->priv_data);
2891 while(s->nb_chapters--) {
2892 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2893 av_freep(&s->chapters[s->nb_chapters]);
2895 av_freep(&s->chapters);
2896 av_dict_free(&s->metadata);
2897 av_freep(&s->streams);
2901 #if FF_API_CLOSE_INPUT_FILE
2902 void av_close_input_file(AVFormatContext *s)
2904 avformat_close_input(&s);
2908 void avformat_close_input(AVFormatContext **ps)
2910 AVFormatContext *s = *ps;
2911 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2913 flush_packet_queue(s);
2914 if (s->iformat && (s->iformat->read_close))
2915 s->iformat->read_close(s);
2916 avformat_free_context(s);
2922 #if FF_API_NEW_STREAM
2923 AVStream *av_new_stream(AVFormatContext *s, int id)
2925 AVStream *st = avformat_new_stream(s, NULL);
2932 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2938 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2940 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2943 s->streams = streams;
2945 st = av_mallocz(sizeof(AVStream));
2948 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2952 st->info->last_dts = AV_NOPTS_VALUE;
2954 st->codec = avcodec_alloc_context3(c);
2956 /* no default bitrate if decoding */
2957 st->codec->bit_rate = 0;
2959 st->index = s->nb_streams;
2960 st->start_time = AV_NOPTS_VALUE;
2961 st->duration = AV_NOPTS_VALUE;
2962 /* we set the current DTS to 0 so that formats without any timestamps
2963 but durations get some timestamps, formats with some unknown
2964 timestamps have their first few packets buffered and the
2965 timestamps corrected before they are returned to the user */
2966 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
2967 st->first_dts = AV_NOPTS_VALUE;
2968 st->probe_packets = MAX_PROBE_PACKETS;
2970 /* default pts setting is MPEG-like */
2971 avpriv_set_pts_info(st, 33, 1, 90000);
2972 st->last_IP_pts = AV_NOPTS_VALUE;
2973 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2974 st->pts_buffer[i]= AV_NOPTS_VALUE;
2975 st->reference_dts = AV_NOPTS_VALUE;
2977 st->sample_aspect_ratio = (AVRational){0,1};
2979 s->streams[s->nb_streams++] = st;
2983 AVProgram *av_new_program(AVFormatContext *ac, int id)
2985 AVProgram *program=NULL;
2988 av_dlog(ac, "new_program: id=0x%04x\n", id);
2990 for(i=0; i<ac->nb_programs; i++)
2991 if(ac->programs[i]->id == id)
2992 program = ac->programs[i];
2995 program = av_mallocz(sizeof(AVProgram));
2998 dynarray_add(&ac->programs, &ac->nb_programs, program);
2999 program->discard = AVDISCARD_NONE;
3006 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3008 AVChapter *chapter = NULL;
3011 for(i=0; i<s->nb_chapters; i++)
3012 if(s->chapters[i]->id == id)
3013 chapter = s->chapters[i];
3016 chapter= av_mallocz(sizeof(AVChapter));
3019 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3021 av_dict_set(&chapter->metadata, "title", title, 0);
3023 chapter->time_base= time_base;
3024 chapter->start = start;
3030 /************************************************************/
3031 /* output media file */
3033 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3034 const char *format, const char *filename)
3036 AVFormatContext *s = avformat_alloc_context();
3045 oformat = av_guess_format(format, NULL, NULL);
3047 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3048 ret = AVERROR(EINVAL);
3052 oformat = av_guess_format(NULL, filename, NULL);
3054 ret = AVERROR(EINVAL);
3055 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3062 s->oformat = oformat;
3063 if (s->oformat->priv_data_size > 0) {
3064 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3067 if (s->oformat->priv_class) {
3068 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3069 av_opt_set_defaults(s->priv_data);
3072 s->priv_data = NULL;
3075 av_strlcpy(s->filename, filename, sizeof(s->filename));
3079 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3080 ret = AVERROR(ENOMEM);
3082 avformat_free_context(s);
3086 #if FF_API_ALLOC_OUTPUT_CONTEXT
3087 AVFormatContext *avformat_alloc_output_context(const char *format,
3088 AVOutputFormat *oformat, const char *filename)
3090 AVFormatContext *avctx;
3091 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3092 return ret < 0 ? NULL : avctx;
3096 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3098 const AVCodecTag *avctag;
3100 enum CodecID id = CODEC_ID_NONE;
3101 unsigned int tag = 0;
3104 * Check that tag + id is in the table
3105 * If neither is in the table -> OK
3106 * If tag is in the table with another id -> FAIL
3107 * If id is in the table with another tag -> FAIL unless strict < normal
3109 for (n = 0; s->oformat->codec_tag[n]; n++) {
3110 avctag = s->oformat->codec_tag[n];
3111 while (avctag->id != CODEC_ID_NONE) {
3112 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3114 if (id == st->codec->codec_id)
3117 if (avctag->id == st->codec->codec_id)
3122 if (id != CODEC_ID_NONE)
3124 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3129 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3133 AVDictionary *tmp = NULL;
3136 av_dict_copy(&tmp, *options, 0);
3137 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3139 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3140 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3143 // some sanity checks
3144 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3145 av_log(s, AV_LOG_ERROR, "no streams\n");
3146 ret = AVERROR(EINVAL);
3150 for(i=0;i<s->nb_streams;i++) {
3153 switch (st->codec->codec_type) {
3154 case AVMEDIA_TYPE_AUDIO:
3155 if(st->codec->sample_rate<=0){
3156 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3157 ret = AVERROR(EINVAL);
3160 if(!st->codec->block_align)
3161 st->codec->block_align = st->codec->channels *
3162 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3164 case AVMEDIA_TYPE_VIDEO:
3165 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3166 av_log(s, AV_LOG_ERROR, "time base not set\n");
3167 ret = AVERROR(EINVAL);
3170 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3171 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3172 ret = AVERROR(EINVAL);
3175 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3176 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3178 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3179 "(%d/%d) and encoder layer (%d/%d)\n",
3180 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3181 st->codec->sample_aspect_ratio.num,
3182 st->codec->sample_aspect_ratio.den);
3183 ret = AVERROR(EINVAL);
3189 if(s->oformat->codec_tag){
3190 if( st->codec->codec_tag
3191 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3192 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3193 && !validate_codec_tag(s, st)){
3194 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3195 st->codec->codec_tag= 0;
3197 if(st->codec->codec_tag){
3198 if (!validate_codec_tag(s, st)) {
3199 char tagbuf[32], cortag[32];
3200 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3201 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3202 av_log(s, AV_LOG_ERROR,
3203 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3204 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3205 ret = AVERROR_INVALIDDATA;
3209 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3212 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3213 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3214 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3217 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3218 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3219 if (!s->priv_data) {
3220 ret = AVERROR(ENOMEM);
3223 if (s->oformat->priv_class) {
3224 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3225 av_opt_set_defaults(s->priv_data);
3226 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3231 /* set muxer identification string */
3232 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3233 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3236 if(s->oformat->write_header){
3237 ret = s->oformat->write_header(s);
3242 /* init PTS generation */
3243 for(i=0;i<s->nb_streams;i++) {
3244 int64_t den = AV_NOPTS_VALUE;
3247 switch (st->codec->codec_type) {
3248 case AVMEDIA_TYPE_AUDIO:
3249 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3251 case AVMEDIA_TYPE_VIDEO:
3252 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3257 if (den != AV_NOPTS_VALUE) {
3259 ret = AVERROR_INVALIDDATA;
3262 frac_init(&st->pts, 0, 0, den);
3267 av_dict_free(options);
3276 //FIXME merge with compute_pkt_fields
3277 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3278 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3279 int num, den, frame_size, i;
3281 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3282 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3284 /* duration field */
3285 if (pkt->duration == 0) {
3286 compute_frame_duration(&num, &den, st, NULL, pkt);
3288 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3292 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3295 //XXX/FIXME this is a temporary hack until all encoders output pts
3296 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3299 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3303 // pkt->pts= st->cur_dts;
3304 pkt->pts= st->pts.val;
3307 //calculate dts from pts
3308 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3309 st->pts_buffer[0]= pkt->pts;
3310 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3311 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3312 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3313 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3315 pkt->dts= st->pts_buffer[0];
3318 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){
3319 av_log(s, AV_LOG_ERROR,
3320 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3321 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3322 return AVERROR(EINVAL);
3324 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3325 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3326 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3327 return AVERROR(EINVAL);
3330 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3331 st->cur_dts= pkt->dts;
3332 st->pts.val= pkt->dts;
3335 switch (st->codec->codec_type) {
3336 case AVMEDIA_TYPE_AUDIO:
3337 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3339 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3340 likely equal to the encoder delay, but it would be better if we
3341 had the real timestamps from the encoder */
3342 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3343 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3346 case AVMEDIA_TYPE_VIDEO:
3347 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3355 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3360 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3361 return s->oformat->write_packet(s, pkt);
3365 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3367 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3370 ret= s->oformat->write_packet(s, pkt);
3373 s->streams[pkt->stream_index]->nb_frames++;
3377 #define CHUNK_START 0x1000
3379 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3380 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3382 AVPacketList **next_point, *this_pktl;
3383 AVStream *st= s->streams[pkt->stream_index];
3384 int chunked= s->max_chunk_size || s->max_chunk_duration;
3386 this_pktl = av_mallocz(sizeof(AVPacketList));
3388 return AVERROR(ENOMEM);
3389 this_pktl->pkt= *pkt;
3390 pkt->destruct= NULL; // do not free original but only the copy
3391 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3393 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3394 next_point = &(st->last_in_packet_buffer->next);
3396 next_point = &s->packet_buffer;
3401 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3402 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3403 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3404 st->interleaver_chunk_size += pkt->size;
3405 st->interleaver_chunk_duration += pkt->duration;
3408 st->interleaver_chunk_size =
3409 st->interleaver_chunk_duration = 0;
3410 this_pktl->pkt.flags |= CHUNK_START;
3414 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3416 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3417 || !compare(s, &(*next_point)->pkt, pkt))){
3418 next_point= &(*next_point)->next;
3423 next_point = &(s->packet_buffer_end->next);
3426 assert(!*next_point);
3428 s->packet_buffer_end= this_pktl;
3431 this_pktl->next= *next_point;
3433 s->streams[pkt->stream_index]->last_in_packet_buffer=
3434 *next_point= this_pktl;
3438 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3440 AVStream *st = s->streams[ pkt ->stream_index];
3441 AVStream *st2= s->streams[ next->stream_index];
3442 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3444 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3445 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3446 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3448 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3449 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3452 comp= (ts>ts2) - (ts<ts2);
3456 return pkt->stream_index < next->stream_index;
3460 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3461 AVPacket *pkt, int flush)
3464 int stream_count=0, noninterleaved_count=0;
3465 int64_t delta_dts_max = 0;
3469 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3474 for(i=0; i < s->nb_streams; i++) {
3475 if (s->streams[i]->last_in_packet_buffer) {
3477 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3478 ++noninterleaved_count;
3482 if (s->nb_streams == stream_count) {
3485 for(i=0; i < s->nb_streams; i++) {
3486 if (s->streams[i]->last_in_packet_buffer) {
3488 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3489 s->streams[i]->time_base,
3491 av_rescale_q(s->packet_buffer->pkt.dts,
3492 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3494 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3497 if(s->nb_streams == stream_count+noninterleaved_count &&
3498 delta_dts_max > 20*AV_TIME_BASE) {
3499 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3503 if(stream_count && flush){
3504 pktl= s->packet_buffer;
3507 s->packet_buffer= pktl->next;
3508 if(!s->packet_buffer)
3509 s->packet_buffer_end= NULL;
3511 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3512 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3516 av_init_packet(out);
3521 #if FF_API_INTERLEAVE_PACKET
3522 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3523 AVPacket *pkt, int flush)
3525 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3530 * Interleave an AVPacket correctly so it can be muxed.
3531 * @param out the interleaved packet will be output here
3532 * @param in the input packet
3533 * @param flush 1 if no further packets are available as input and all
3534 * remaining packets should be output
3535 * @return 1 if a packet was output, 0 if no packet could be output,
3536 * < 0 if an error occurred
3538 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3539 if (s->oformat->interleave_packet) {
3540 int ret = s->oformat->interleave_packet(s, out, in, flush);
3545 return ff_interleave_packet_per_dts(s, out, in, flush);
3548 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3552 AVStream *st= s->streams[ pkt->stream_index];
3554 //FIXME/XXX/HACK drop zero sized packets
3555 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3558 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3559 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3560 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3563 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3564 return AVERROR(EINVAL);
3566 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3572 int ret= interleave_packet(s, &opkt, pkt, flush);
3573 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3576 ret= s->oformat->write_packet(s, &opkt);
3578 s->streams[opkt.stream_index]->nb_frames++;
3580 av_free_packet(&opkt);
3585 if(s->pb && s->pb->error)
3586 return s->pb->error;
3590 int av_write_trailer(AVFormatContext *s)
3596 ret= interleave_packet(s, &pkt, NULL, 1);
3597 if(ret<0) //FIXME cleanup needed for ret<0 ?
3602 ret= s->oformat->write_packet(s, &pkt);
3604 s->streams[pkt.stream_index]->nb_frames++;
3606 av_free_packet(&pkt);
3610 if(s->pb && s->pb->error)
3614 if(s->oformat->write_trailer)
3615 ret = s->oformat->write_trailer(s);
3620 ret = s->pb ? s->pb->error : 0;
3621 for(i=0;i<s->nb_streams;i++) {
3622 av_freep(&s->streams[i]->priv_data);
3623 av_freep(&s->streams[i]->index_entries);
3625 if (s->oformat->priv_class)
3626 av_opt_free(s->priv_data);
3627 av_freep(&s->priv_data);
3631 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3632 int64_t *dts, int64_t *wall)
3634 if (!s->oformat || !s->oformat->get_output_timestamp)
3635 return AVERROR(ENOSYS);
3636 s->oformat->get_output_timestamp(s, stream, dts, wall);
3640 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3643 AVProgram *program=NULL;
3646 if (idx >= ac->nb_streams) {
3647 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3651 for(i=0; i<ac->nb_programs; i++){
3652 if(ac->programs[i]->id != progid)
3654 program = ac->programs[i];
3655 for(j=0; j<program->nb_stream_indexes; j++)
3656 if(program->stream_index[j] == idx)
3659 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3662 program->stream_index = tmp;
3663 program->stream_index[program->nb_stream_indexes++] = idx;
3668 static void print_fps(double d, const char *postfix){
3669 uint64_t v= lrintf(d*100);
3670 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3671 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3672 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3675 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3677 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3678 AVDictionaryEntry *tag=NULL;
3680 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3681 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3682 if(strcmp("language", tag->key)){
3683 const char *p = tag->value;
3684 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3687 size_t len = strcspn(p, "\xd\xa");
3688 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3689 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3691 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3692 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3695 av_log(ctx, AV_LOG_INFO, "\n");
3701 /* "user interface" functions */
3702 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3705 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3706 AVStream *st = ic->streams[i];
3707 int g = av_gcd(st->time_base.num, st->time_base.den);
3708 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3709 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3710 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3711 /* the pid is an important information, so we display it */
3712 /* XXX: add a generic system */
3713 if (flags & AVFMT_SHOW_IDS)
3714 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3716 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3717 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3718 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3719 if (st->sample_aspect_ratio.num && // default
3720 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3721 AVRational display_aspect_ratio;
3722 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3723 st->codec->width*st->sample_aspect_ratio.num,
3724 st->codec->height*st->sample_aspect_ratio.den,
3726 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3727 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3728 display_aspect_ratio.num, display_aspect_ratio.den);
3730 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3731 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3732 print_fps(av_q2d(st->avg_frame_rate), "fps");
3733 if(st->r_frame_rate.den && st->r_frame_rate.num)
3734 print_fps(av_q2d(st->r_frame_rate), "tbr");
3735 if(st->time_base.den && st->time_base.num)
3736 print_fps(1/av_q2d(st->time_base), "tbn");
3737 if(st->codec->time_base.den && st->codec->time_base.num)
3738 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3740 if (st->disposition & AV_DISPOSITION_DEFAULT)
3741 av_log(NULL, AV_LOG_INFO, " (default)");
3742 if (st->disposition & AV_DISPOSITION_DUB)
3743 av_log(NULL, AV_LOG_INFO, " (dub)");
3744 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3745 av_log(NULL, AV_LOG_INFO, " (original)");
3746 if (st->disposition & AV_DISPOSITION_COMMENT)
3747 av_log(NULL, AV_LOG_INFO, " (comment)");
3748 if (st->disposition & AV_DISPOSITION_LYRICS)
3749 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3750 if (st->disposition & AV_DISPOSITION_KARAOKE)
3751 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3752 if (st->disposition & AV_DISPOSITION_FORCED)
3753 av_log(NULL, AV_LOG_INFO, " (forced)");
3754 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3755 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3756 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3757 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3758 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3759 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3760 av_log(NULL, AV_LOG_INFO, "\n");
3761 dump_metadata(NULL, st->metadata, " ");
3764 void av_dump_format(AVFormatContext *ic,
3770 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3771 if (ic->nb_streams && !printed)
3774 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3775 is_output ? "Output" : "Input",
3777 is_output ? ic->oformat->name : ic->iformat->name,
3778 is_output ? "to" : "from", url);
3779 dump_metadata(NULL, ic->metadata, " ");
3781 av_log(NULL, AV_LOG_INFO, " Duration: ");
3782 if (ic->duration != AV_NOPTS_VALUE) {
3783 int hours, mins, secs, us;
3784 secs = ic->duration / AV_TIME_BASE;
3785 us = ic->duration % AV_TIME_BASE;
3790 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3791 (100 * us) / AV_TIME_BASE);
3793 av_log(NULL, AV_LOG_INFO, "N/A");
3795 if (ic->start_time != AV_NOPTS_VALUE) {
3797 av_log(NULL, AV_LOG_INFO, ", start: ");
3798 secs = ic->start_time / AV_TIME_BASE;
3799 us = abs(ic->start_time % AV_TIME_BASE);
3800 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3801 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3803 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3805 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3807 av_log(NULL, AV_LOG_INFO, "N/A");
3809 av_log(NULL, AV_LOG_INFO, "\n");
3811 for (i = 0; i < ic->nb_chapters; i++) {
3812 AVChapter *ch = ic->chapters[i];
3813 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3814 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3815 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3817 dump_metadata(NULL, ch->metadata, " ");
3819 if(ic->nb_programs) {
3820 int j, k, total = 0;
3821 for(j=0; j<ic->nb_programs; j++) {
3822 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3824 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3825 name ? name->value : "");
3826 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3827 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3828 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3829 printed[ic->programs[j]->stream_index[k]] = 1;
3831 total += ic->programs[j]->nb_stream_indexes;
3833 if (total < ic->nb_streams)
3834 av_log(NULL, AV_LOG_INFO, " No Program\n");
3836 for(i=0;i<ic->nb_streams;i++)
3838 dump_stream_format(ic, i, index, is_output);
3843 int64_t av_gettime(void)
3846 gettimeofday(&tv,NULL);
3847 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3850 uint64_t ff_ntp_time(void)
3852 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3855 int av_get_frame_filename(char *buf, int buf_size,
3856 const char *path, int number)
3859 char *q, buf1[20], c;
3860 int nd, len, percentd_found;
3872 while (isdigit(*p)) {
3873 nd = nd * 10 + *p++ - '0';
3876 } while (isdigit(c));
3885 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3887 if ((q - buf + len) > buf_size - 1)
3889 memcpy(q, buf1, len);
3897 if ((q - buf) < buf_size - 1)
3901 if (!percentd_found)
3910 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3914 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3916 for(i=0;i<size;i+=16) {
3923 PRINT(" %02x", buf[i+j]);
3928 for(j=0;j<len;j++) {
3930 if (c < ' ' || c > '~')
3939 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3941 hex_dump_internal(NULL, f, 0, buf, size);
3944 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3946 hex_dump_internal(avcl, NULL, level, buf, size);
3949 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3952 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3953 PRINT("stream #%d:\n", pkt->stream_index);
3954 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3955 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3956 /* DTS is _always_ valid after av_read_frame() */
3958 if (pkt->dts == AV_NOPTS_VALUE)
3961 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3962 /* PTS may not be known if B-frames are present. */
3964 if (pkt->pts == AV_NOPTS_VALUE)
3967 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3969 PRINT(" size=%d\n", pkt->size);
3972 av_hex_dump(f, pkt->data, pkt->size);
3976 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3978 AVRational tb = { 1, AV_TIME_BASE };
3979 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3983 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3985 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3989 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3991 AVRational tb = { 1, AV_TIME_BASE };
3992 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3996 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3999 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4002 void av_url_split(char *proto, int proto_size,
4003 char *authorization, int authorization_size,
4004 char *hostname, int hostname_size,
4006 char *path, int path_size,
4009 const char *p, *ls, *at, *col, *brk;
4011 if (port_ptr) *port_ptr = -1;
4012 if (proto_size > 0) proto[0] = 0;
4013 if (authorization_size > 0) authorization[0] = 0;
4014 if (hostname_size > 0) hostname[0] = 0;
4015 if (path_size > 0) path[0] = 0;
4017 /* parse protocol */
4018 if ((p = strchr(url, ':'))) {
4019 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4024 /* no protocol means plain filename */
4025 av_strlcpy(path, url, path_size);
4029 /* separate path from hostname */
4030 ls = strchr(p, '/');
4032 ls = strchr(p, '?');
4034 av_strlcpy(path, ls, path_size);
4036 ls = &p[strlen(p)]; // XXX
4038 /* the rest is hostname, use that to parse auth/port */
4040 /* authorization (user[:pass]@hostname) */
4041 if ((at = strchr(p, '@')) && at < ls) {
4042 av_strlcpy(authorization, p,
4043 FFMIN(authorization_size, at + 1 - p));
4044 p = at + 1; /* skip '@' */
4047 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4049 av_strlcpy(hostname, p + 1,
4050 FFMIN(hostname_size, brk - p));
4051 if (brk[1] == ':' && port_ptr)
4052 *port_ptr = atoi(brk + 2);
4053 } else if ((col = strchr(p, ':')) && col < ls) {
4054 av_strlcpy(hostname, p,
4055 FFMIN(col + 1 - p, hostname_size));
4056 if (port_ptr) *port_ptr = atoi(col + 1);
4058 av_strlcpy(hostname, p,
4059 FFMIN(ls + 1 - p, hostname_size));
4063 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4066 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4069 'C', 'D', 'E', 'F' };
4070 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4073 'c', 'd', 'e', 'f' };
4074 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4076 for(i = 0; i < s; i++) {
4077 buff[i * 2] = hex_table[src[i] >> 4];
4078 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4084 int ff_hex_to_data(uint8_t *data, const char *p)
4091 p += strspn(p, SPACE_CHARS);
4094 c = toupper((unsigned char) *p++);
4095 if (c >= '0' && c <= '9')
4097 else if (c >= 'A' && c <= 'F')
4112 #if FF_API_SET_PTS_INFO
4113 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4114 unsigned int pts_num, unsigned int pts_den)
4116 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4120 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4121 unsigned int pts_num, unsigned int pts_den)
4124 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4125 if(new_tb.num != pts_num)
4126 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4128 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4130 if(new_tb.num <= 0 || new_tb.den <= 0) {
4131 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
4134 s->time_base = new_tb;
4135 s->pts_wrap_bits = pts_wrap_bits;
4138 int ff_url_join(char *str, int size, const char *proto,
4139 const char *authorization, const char *hostname,
4140 int port, const char *fmt, ...)
4143 struct addrinfo hints = { 0 }, *ai;
4148 av_strlcatf(str, size, "%s://", proto);
4149 if (authorization && authorization[0])
4150 av_strlcatf(str, size, "%s@", authorization);
4151 #if CONFIG_NETWORK && defined(AF_INET6)
4152 /* Determine if hostname is a numerical IPv6 address,
4153 * properly escape it within [] in that case. */
4154 hints.ai_flags = AI_NUMERICHOST;
4155 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4156 if (ai->ai_family == AF_INET6) {
4157 av_strlcat(str, "[", size);
4158 av_strlcat(str, hostname, size);
4159 av_strlcat(str, "]", size);
4161 av_strlcat(str, hostname, size);
4166 /* Not an IPv6 address, just output the plain string. */
4167 av_strlcat(str, hostname, size);
4170 av_strlcatf(str, size, ":%d", port);
4173 int len = strlen(str);
4176 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4182 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4183 AVFormatContext *src)
4188 local_pkt.stream_index = dst_stream;
4189 if (pkt->pts != AV_NOPTS_VALUE)
4190 local_pkt.pts = av_rescale_q(pkt->pts,
4191 src->streams[pkt->stream_index]->time_base,
4192 dst->streams[dst_stream]->time_base);
4193 if (pkt->dts != AV_NOPTS_VALUE)
4194 local_pkt.dts = av_rescale_q(pkt->dts,
4195 src->streams[pkt->stream_index]->time_base,
4196 dst->streams[dst_stream]->time_base);
4197 return av_write_frame(dst, &local_pkt);
4200 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4203 const char *ptr = str;
4205 /* Parse key=value pairs. */
4208 char *dest = NULL, *dest_end;
4209 int key_len, dest_len = 0;
4211 /* Skip whitespace and potential commas. */
4212 while (*ptr && (isspace(*ptr) || *ptr == ','))
4219 if (!(ptr = strchr(key, '=')))
4222 key_len = ptr - key;
4224 callback_get_buf(context, key, key_len, &dest, &dest_len);
4225 dest_end = dest + dest_len - 1;
4229 while (*ptr && *ptr != '\"') {
4233 if (dest && dest < dest_end)
4237 if (dest && dest < dest_end)
4245 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4246 if (dest && dest < dest_end)
4254 int ff_find_stream_index(AVFormatContext *s, int id)
4257 for (i = 0; i < s->nb_streams; i++) {
4258 if (s->streams[i]->id == id)
4264 void ff_make_absolute_url(char *buf, int size, const char *base,
4268 /* Absolute path, relative to the current server */
4269 if (base && strstr(base, "://") && rel[0] == '/') {
4271 av_strlcpy(buf, base, size);
4272 sep = strstr(buf, "://");
4275 sep = strchr(sep, '/');
4279 av_strlcat(buf, rel, size);
4282 /* If rel actually is an absolute url, just copy it */
4283 if (!base || strstr(rel, "://") || rel[0] == '/') {
4284 av_strlcpy(buf, rel, size);
4288 av_strlcpy(buf, base, size);
4289 /* Remove the file name from the base url */
4290 sep = strrchr(buf, '/');
4295 while (av_strstart(rel, "../", NULL) && sep) {
4296 /* Remove the path delimiter at the end */
4298 sep = strrchr(buf, '/');
4299 /* If the next directory name to pop off is "..", break here */
4300 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4301 /* Readd the slash we just removed */
4302 av_strlcat(buf, "/", size);
4305 /* Cut off the directory name */
4312 av_strlcat(buf, rel, size);
4315 int64_t ff_iso8601_to_unix_time(const char *datestr)
4318 struct tm time1 = {0}, time2 = {0};
4320 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4321 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4323 return av_timegm(&time2);
4325 return av_timegm(&time1);
4327 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4328 "the date string.\n");
4333 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4336 if (ofmt->query_codec)
4337 return ofmt->query_codec(codec_id, std_compliance);
4338 else if (ofmt->codec_tag)
4339 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4340 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4341 codec_id == ofmt->subtitle_codec)
4344 return AVERROR_PATCHWELCOME;
4347 int avformat_network_init(void)
4351 ff_network_inited_globally = 1;
4352 if ((ret = ff_network_init()) < 0)
4359 int avformat_network_deinit(void)
4368 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4369 uint64_t channel_layout, int32_t sample_rate,
4370 int32_t width, int32_t height)
4376 return AVERROR(EINVAL);
4379 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4381 if (channel_layout) {
4383 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4387 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4389 if (width || height) {
4391 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4393 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4395 return AVERROR(ENOMEM);
4396 bytestream_put_le32(&data, flags);
4398 bytestream_put_le32(&data, channels);
4400 bytestream_put_le64(&data, channel_layout);
4402 bytestream_put_le32(&data, sample_rate);
4403 if (width || height) {
4404 bytestream_put_le32(&data, width);
4405 bytestream_put_le32(&data, height);
4410 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4412 return ff_codec_bmp_tags;
4414 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4416 return ff_codec_wav_tags;
4419 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4421 AVRational undef = {0, 1};
4422 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4423 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : undef;
4425 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4426 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4427 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4428 stream_sample_aspect_ratio = undef;
4430 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4431 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4432 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4433 frame_sample_aspect_ratio = undef;
4435 if (stream_sample_aspect_ratio.num)
4436 return stream_sample_aspect_ratio;
4438 return frame_sample_aspect_ratio;