2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/timestamp.h"
42 #include "audiointerleave.h"
56 * various utility functions for use within FFmpeg
59 unsigned avformat_version(void)
61 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
62 return LIBAVFORMAT_VERSION_INT;
65 const char *avformat_configuration(void)
67 return FFMPEG_CONFIGURATION;
70 const char *avformat_license(void)
72 #define LICENSE_PREFIX "libavformat license: "
73 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
76 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
78 static int is_relative(int64_t ts) {
79 return ts > (RELATIVE_TS_BASE - (1LL<<48));
82 /* fraction handling */
85 * f = val + (num / den) + 0.5.
87 * 'num' is normalized so that it is such as 0 <= num < den.
89 * @param f fractional number
90 * @param val integer value
91 * @param num must be >= 0
92 * @param den must be >= 1
94 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
107 * Fractional addition to f: f = f + (incr / f->den).
109 * @param f fractional number
110 * @param incr increment, can be positive or negative
112 static void frac_add(AVFrac *f, int64_t incr)
125 } else if (num >= den) {
132 /** head of registered input format linked list */
133 static AVInputFormat *first_iformat = NULL;
134 /** head of registered output format linked list */
135 static AVOutputFormat *first_oformat = NULL;
137 AVInputFormat *av_iformat_next(AVInputFormat *f)
139 if(f) return f->next;
140 else return first_iformat;
143 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
145 if(f) return f->next;
146 else return first_oformat;
149 void av_register_input_format(AVInputFormat *format)
153 while (*p != NULL) p = &(*p)->next;
158 void av_register_output_format(AVOutputFormat *format)
162 while (*p != NULL) p = &(*p)->next;
167 int av_match_ext(const char *filename, const char *extensions)
175 ext = strrchr(filename, '.');
181 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
184 if (!av_strcasecmp(ext1, ext))
194 static int match_format(const char *name, const char *names)
202 namelen = strlen(name);
203 while ((p = strchr(names, ','))) {
204 len = FFMAX(p - names, namelen);
205 if (!av_strncasecmp(name, names, len))
209 return !av_strcasecmp(name, names);
212 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
213 const char *mime_type)
215 AVOutputFormat *fmt = NULL, *fmt_found;
216 int score_max, score;
218 /* specific test for image sequences */
219 #if CONFIG_IMAGE2_MUXER
220 if (!short_name && filename &&
221 av_filename_number_test(filename) &&
222 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
223 return av_guess_format("image2", NULL, NULL);
226 /* Find the proper file type. */
229 while ((fmt = av_oformat_next(fmt))) {
231 if (fmt->name && short_name && !av_strcasecmp(fmt->name, short_name))
233 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
235 if (filename && fmt->extensions &&
236 av_match_ext(filename, fmt->extensions)) {
239 if (score > score_max) {
247 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
248 const char *filename, const char *mime_type, enum AVMediaType type){
249 if(type == AVMEDIA_TYPE_VIDEO){
250 enum CodecID codec_id= CODEC_ID_NONE;
252 #if CONFIG_IMAGE2_MUXER
253 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
254 codec_id= ff_guess_image2_codec(filename);
257 if(codec_id == CODEC_ID_NONE)
258 codec_id= fmt->video_codec;
260 }else if(type == AVMEDIA_TYPE_AUDIO)
261 return fmt->audio_codec;
262 else if (type == AVMEDIA_TYPE_SUBTITLE)
263 return fmt->subtitle_codec;
265 return CODEC_ID_NONE;
268 AVInputFormat *av_find_input_format(const char *short_name)
270 AVInputFormat *fmt = NULL;
271 while ((fmt = av_iformat_next(fmt))) {
272 if (match_format(short_name, fmt->name))
278 int ffio_limit(AVIOContext *s, int size)
281 int64_t remaining= s->maxsize - avio_tell(s);
282 if(remaining < size){
283 int64_t newsize= avio_size(s);
284 if(!s->maxsize || s->maxsize<newsize)
285 s->maxsize= newsize - !newsize;
286 remaining= s->maxsize - avio_tell(s);
287 remaining= FFMAX(remaining, 0);
290 if(s->maxsize>=0 && remaining+1 < size){
291 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
298 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
301 int orig_size = size;
302 size= ffio_limit(s, size);
304 ret= av_new_packet(pkt, size);
309 pkt->pos= avio_tell(s);
311 ret= avio_read(s, pkt->data, size);
315 av_shrink_packet(pkt, ret);
316 if (pkt->size < orig_size)
317 pkt->flags |= AV_PKT_FLAG_CORRUPT;
322 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
327 return av_get_packet(s, pkt, size);
328 old_size = pkt->size;
329 ret = av_grow_packet(pkt, size);
332 ret = avio_read(s, pkt->data + old_size, size);
333 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
338 int av_filename_number_test(const char *filename)
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
344 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
346 AVProbeData lpd = *pd;
347 AVInputFormat *fmt1 = NULL, *fmt;
348 int score, nodat = 0, score_max=0;
350 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
351 int id3len = ff_id3v2_tag_len(lpd.buf);
352 if (lpd.buf_size > id3len + 16) {
354 lpd.buf_size -= id3len;
360 while ((fmt1 = av_iformat_next(fmt1))) {
361 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
364 if (fmt1->read_probe) {
365 score = fmt1->read_probe(&lpd);
366 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
367 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
368 } else if (fmt1->extensions) {
369 if (av_match_ext(lpd.filename, fmt1->extensions)) {
373 if (score > score_max) {
376 }else if (score == score_max)
379 *score_ret= score_max;
384 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
387 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
388 if(score_ret > *score_max){
389 *score_max= score_ret;
395 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
397 return av_probe_input_format2(pd, is_opened, &score);
400 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
402 static const struct {
403 const char *name; enum CodecID id; enum AVMediaType type;
405 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
406 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
407 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
408 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
409 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
410 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
411 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
412 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
413 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
417 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
421 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
422 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
423 for (i = 0; fmt_id_type[i].name; i++) {
424 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
425 st->codec->codec_id = fmt_id_type[i].id;
426 st->codec->codec_type = fmt_id_type[i].type;
434 /************************************************************/
435 /* input media file */
437 int av_demuxer_open(AVFormatContext *ic){
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic);
446 if (ic->pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
453 /** size of probe buffer, for guessing file type from file contents */
454 #define PROBE_BUF_MIN 2048
455 #define PROBE_BUF_MAX (1<<20)
457 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
458 const char *filename, void *logctx,
459 unsigned int offset, unsigned int max_probe_size)
461 AVProbeData pd = { filename ? filename : "", NULL, -offset };
462 unsigned char *buf = NULL;
463 int ret = 0, probe_size;
465 if (!max_probe_size) {
466 max_probe_size = PROBE_BUF_MAX;
467 } else if (max_probe_size > PROBE_BUF_MAX) {
468 max_probe_size = PROBE_BUF_MAX;
469 } else if (max_probe_size < PROBE_BUF_MIN) {
470 return AVERROR(EINVAL);
473 if (offset >= max_probe_size) {
474 return AVERROR(EINVAL);
477 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
478 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
479 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
480 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
483 if (probe_size < offset) {
487 /* read probe data */
488 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
491 return AVERROR(ENOMEM);
494 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
495 /* fail if error was not end of file, otherwise, lower score */
496 if (ret != AVERROR_EOF) {
501 ret = 0; /* error was end of file, nothing read */
504 pd.buf = &buf[offset];
506 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
508 /* guess file format */
509 *fmt = av_probe_input_format2(&pd, 1, &score);
511 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
512 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
514 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
520 return AVERROR_INVALIDDATA;
523 /* rewind. reuse probe buffer to avoid seeking */
524 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
530 /* open input file and probe the format if necessary */
531 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
534 AVProbeData pd = {filename, NULL, 0};
537 s->flags |= AVFMT_FLAG_CUSTOM_IO;
539 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
540 else if (s->iformat->flags & AVFMT_NOFILE)
541 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
542 "will be ignored with AVFMT_NOFILE format.\n");
546 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
547 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
550 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
551 &s->interrupt_callback, options)) < 0)
555 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
558 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
559 AVPacketList **plast_pktl){
560 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
565 (*plast_pktl)->next = pktl;
567 *packet_buffer = pktl;
569 /* add the packet in the buffered packet list */
575 static void queue_attached_pictures(AVFormatContext *s)
578 for (i = 0; i < s->nb_streams; i++)
579 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
580 s->streams[i]->discard < AVDISCARD_ALL) {
581 AVPacket copy = s->streams[i]->attached_pic;
582 copy.destruct = NULL;
583 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
587 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
589 AVFormatContext *s = *ps;
591 AVDictionary *tmp = NULL;
592 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
594 if (!s && !(s = avformat_alloc_context()))
595 return AVERROR(ENOMEM);
597 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
598 return AVERROR(EINVAL);
604 av_dict_copy(&tmp, *options, 0);
606 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
609 if ((ret = init_input(s, filename, &tmp)) < 0)
612 /* check filename in case an image number is expected */
613 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
614 if (!av_filename_number_test(filename)) {
615 ret = AVERROR(EINVAL);
620 s->duration = s->start_time = AV_NOPTS_VALUE;
621 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
623 /* allocate private data */
624 if (s->iformat->priv_data_size > 0) {
625 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
626 ret = AVERROR(ENOMEM);
629 if (s->iformat->priv_class) {
630 *(const AVClass**)s->priv_data = s->iformat->priv_class;
631 av_opt_set_defaults(s->priv_data);
632 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
637 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
639 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
641 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
642 if ((ret = s->iformat->read_header(s)) < 0)
645 if (id3v2_extra_meta &&
646 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
648 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
650 queue_attached_pictures(s);
652 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
653 s->data_offset = avio_tell(s->pb);
655 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
658 av_dict_free(options);
665 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
667 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
669 avformat_free_context(s);
674 /*******************************************************/
676 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
678 if(st->request_probe>0){
679 AVProbeData *pd = &st->probe_data;
681 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
685 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
686 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
687 pd->buf_size += pkt->size;
688 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
690 st->probe_packets = 0;
693 end= s->raw_packet_buffer_remaining_size <= 0
694 || st->probe_packets<=0;
696 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
697 int score= set_codec_from_probe_data(s, st, pd);
698 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
702 st->request_probe= -1;
703 if(st->codec->codec_id != CODEC_ID_NONE){
704 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
706 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
712 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
718 AVPacketList *pktl = s->raw_packet_buffer;
722 st = s->streams[pkt->stream_index];
723 if(st->request_probe <= 0){
724 s->raw_packet_buffer = pktl->next;
725 s->raw_packet_buffer_remaining_size += pkt->size;
732 ret= s->iformat->read_packet(s, pkt);
734 if (!pktl || ret == AVERROR(EAGAIN))
736 for (i = 0; i < s->nb_streams; i++) {
738 if (st->probe_packets) {
739 probe_codec(s, st, NULL);
741 av_assert0(st->request_probe <= 0);
746 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
747 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
748 av_log(s, AV_LOG_WARNING,
749 "Dropped corrupted packet (stream = %d)\n",
755 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
756 av_packet_merge_side_data(pkt);
758 if(pkt->stream_index >= (unsigned)s->nb_streams){
759 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
763 st= s->streams[pkt->stream_index];
765 switch(st->codec->codec_type){
766 case AVMEDIA_TYPE_VIDEO:
767 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
769 case AVMEDIA_TYPE_AUDIO:
770 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
772 case AVMEDIA_TYPE_SUBTITLE:
773 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
777 if(!pktl && st->request_probe <= 0)
780 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
781 s->raw_packet_buffer_remaining_size -= pkt->size;
783 probe_codec(s, st, pkt);
787 #if FF_API_READ_PACKET
788 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
790 return ff_read_packet(s, pkt);
795 /**********************************************************/
797 static int determinable_frame_size(AVCodecContext *avctx)
799 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
800 avctx->codec_id == CODEC_ID_MP1 ||
801 avctx->codec_id == CODEC_ID_MP2 ||
802 avctx->codec_id == CODEC_ID_MP3/* ||
803 avctx->codec_id == CODEC_ID_CELT*/)
809 * Get the number of samples of an audio frame. Return -1 on error.
811 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
815 /* give frame_size priority if demuxing */
816 if (!mux && enc->frame_size > 1)
817 return enc->frame_size;
819 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
822 /* fallback to using frame_size if muxing */
823 if (enc->frame_size > 1)
824 return enc->frame_size;
831 * Return the frame duration in seconds. Return 0 if not available.
833 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
834 AVCodecParserContext *pc, AVPacket *pkt)
840 switch(st->codec->codec_type) {
841 case AVMEDIA_TYPE_VIDEO:
842 if (st->r_frame_rate.num && !pc) {
843 *pnum = st->r_frame_rate.den;
844 *pden = st->r_frame_rate.num;
845 } else if(st->time_base.num*1000LL > st->time_base.den) {
846 *pnum = st->time_base.num;
847 *pden = st->time_base.den;
848 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
849 *pnum = st->codec->time_base.num;
850 *pden = st->codec->time_base.den;
851 if (pc && pc->repeat_pict) {
852 *pnum = (*pnum) * (1 + pc->repeat_pict);
854 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
855 //Thus if we have no parser in such case leave duration undefined.
856 if(st->codec->ticks_per_frame>1 && !pc){
861 case AVMEDIA_TYPE_AUDIO:
862 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
863 if (frame_size <= 0 || st->codec->sample_rate <= 0)
866 *pden = st->codec->sample_rate;
873 static int is_intra_only(AVCodecContext *enc){
874 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
876 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
877 switch(enc->codec_id){
879 case CODEC_ID_MJPEGB:
881 case CODEC_ID_PRORES:
882 case CODEC_ID_RAWVIDEO:
884 case CODEC_ID_DVVIDEO:
885 case CODEC_ID_HUFFYUV:
886 case CODEC_ID_FFVHUFF:
891 case CODEC_ID_JPEG2000:
893 case CODEC_ID_UTVIDEO:
901 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
905 if (pktl == s->parse_queue_end)
906 return s->packet_buffer;
910 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
911 int64_t dts, int64_t pts)
913 AVStream *st= s->streams[stream_index];
914 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
916 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
919 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
922 if (is_relative(pts))
923 pts += st->first_dts - RELATIVE_TS_BASE;
925 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
926 if(pktl->pkt.stream_index != stream_index)
928 if(is_relative(pktl->pkt.pts))
929 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
931 if(is_relative(pktl->pkt.dts))
932 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
934 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
935 st->start_time= pktl->pkt.pts;
937 if (st->start_time == AV_NOPTS_VALUE)
938 st->start_time = pts;
941 static void update_initial_durations(AVFormatContext *s, AVStream *st,
942 int stream_index, int duration)
944 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
945 int64_t cur_dts= RELATIVE_TS_BASE;
947 if(st->first_dts != AV_NOPTS_VALUE){
948 cur_dts= st->first_dts;
949 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
950 if(pktl->pkt.stream_index == stream_index){
951 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
956 if(pktl && pktl->pkt.dts != st->first_dts) {
957 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
961 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
964 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
965 st->first_dts = cur_dts;
966 }else if(st->cur_dts != RELATIVE_TS_BASE)
969 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
970 if(pktl->pkt.stream_index != stream_index)
972 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
973 && !pktl->pkt.duration){
974 pktl->pkt.dts= cur_dts;
975 if(!st->codec->has_b_frames)
976 pktl->pkt.pts= cur_dts;
977 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
978 pktl->pkt.duration = duration;
981 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
984 st->cur_dts= cur_dts;
987 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
988 AVCodecParserContext *pc, AVPacket *pkt)
990 int num, den, presentation_delayed, delay, i;
993 if (s->flags & AVFMT_FLAG_NOFILLIN)
996 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
997 pkt->dts= AV_NOPTS_VALUE;
999 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1000 //FIXME Set low_delay = 0 when has_b_frames = 1
1001 st->codec->has_b_frames = 1;
1003 /* do we have a video B-frame ? */
1004 delay= st->codec->has_b_frames;
1005 presentation_delayed = 0;
1007 /* XXX: need has_b_frame, but cannot get it if the codec is
1010 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1011 presentation_delayed = 1;
1013 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1014 pkt->dts -= 1LL<<st->pts_wrap_bits;
1017 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1018 // we take the conservative approach and discard both
1019 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1020 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1021 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1022 pkt->dts= AV_NOPTS_VALUE;
1025 if (pkt->duration == 0) {
1026 compute_frame_duration(&num, &den, st, pc, pkt);
1028 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1031 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1032 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1034 /* correct timestamps with byte offset if demuxers only have timestamps
1035 on packet boundaries */
1036 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1037 /* this will estimate bitrate based on this frame's duration and size */
1038 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1039 if(pkt->pts != AV_NOPTS_VALUE)
1041 if(pkt->dts != AV_NOPTS_VALUE)
1045 if (pc && pc->dts_sync_point >= 0) {
1046 // we have synchronization info from the parser
1047 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1049 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1050 if (pkt->dts != AV_NOPTS_VALUE) {
1051 // got DTS from the stream, update reference timestamp
1052 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1053 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1054 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1055 // compute DTS based on reference timestamp
1056 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1057 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1059 if (pc->dts_sync_point > 0)
1060 st->reference_dts = pkt->dts; // new reference
1064 /* This may be redundant, but it should not hurt. */
1065 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1066 presentation_delayed = 1;
1068 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1069 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1070 /* interpolate PTS and DTS if they are not present */
1071 //We skip H264 currently because delay and has_b_frames are not reliably set
1072 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1073 if (presentation_delayed) {
1074 /* DTS = decompression timestamp */
1075 /* PTS = presentation timestamp */
1076 if (pkt->dts == AV_NOPTS_VALUE)
1077 pkt->dts = st->last_IP_pts;
1078 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1079 if (pkt->dts == AV_NOPTS_VALUE)
1080 pkt->dts = st->cur_dts;
1082 /* this is tricky: the dts must be incremented by the duration
1083 of the frame we are displaying, i.e. the last I- or P-frame */
1084 if (st->last_IP_duration == 0)
1085 st->last_IP_duration = pkt->duration;
1086 if(pkt->dts != AV_NOPTS_VALUE)
1087 st->cur_dts = pkt->dts + st->last_IP_duration;
1088 st->last_IP_duration = pkt->duration;
1089 st->last_IP_pts= pkt->pts;
1090 /* cannot compute PTS if not present (we can compute it only
1091 by knowing the future */
1092 } else if (pkt->pts != AV_NOPTS_VALUE ||
1093 pkt->dts != AV_NOPTS_VALUE ||
1095 int duration = pkt->duration;
1097 if(pkt->pts != AV_NOPTS_VALUE && duration){
1098 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1099 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1100 if( old_diff < new_diff && old_diff < (duration>>3)
1101 && (!strcmp(s->iformat->name, "mpeg") ||
1102 !strcmp(s->iformat->name, "mpegts"))){
1103 pkt->pts += duration;
1104 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1105 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1106 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1110 /* presentation is not delayed : PTS and DTS are the same */
1111 if (pkt->pts == AV_NOPTS_VALUE)
1112 pkt->pts = pkt->dts;
1113 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1115 if (pkt->pts == AV_NOPTS_VALUE)
1116 pkt->pts = st->cur_dts;
1117 pkt->dts = pkt->pts;
1118 if (pkt->pts != AV_NOPTS_VALUE)
1119 st->cur_dts = pkt->pts + duration;
1123 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1124 st->pts_buffer[0]= pkt->pts;
1125 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1126 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1127 if(pkt->dts == AV_NOPTS_VALUE)
1128 pkt->dts= st->pts_buffer[0];
1129 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1130 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1132 if(pkt->dts > st->cur_dts)
1133 st->cur_dts = pkt->dts;
1136 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1137 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1140 if(is_intra_only(st->codec))
1141 pkt->flags |= AV_PKT_FLAG_KEY;
1143 pkt->convergence_duration = pc->convergence_duration;
1146 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1149 AVPacketList *pktl = *pkt_buf;
1150 *pkt_buf = pktl->next;
1151 av_free_packet(&pktl->pkt);
1154 *pkt_buf_end = NULL;
1158 * Parse a packet, add all split parts to parse_queue
1160 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1162 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1164 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1165 AVStream *st = s->streams[stream_index];
1166 uint8_t *data = pkt ? pkt->data : NULL;
1167 int size = pkt ? pkt->size : 0;
1168 int ret = 0, got_output = 0;
1171 av_init_packet(&flush_pkt);
1174 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1175 // preserve 0-size sync packets
1176 compute_pkt_fields(s, st, st->parser, pkt);
1179 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1182 av_init_packet(&out_pkt);
1183 len = av_parser_parse2(st->parser, st->codec,
1184 &out_pkt.data, &out_pkt.size, data, size,
1185 pkt->pts, pkt->dts, pkt->pos);
1187 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1188 /* increment read pointer */
1192 got_output = !!out_pkt.size;
1197 /* set the duration */
1198 out_pkt.duration = 0;
1199 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1200 if (st->codec->sample_rate > 0) {
1201 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1202 (AVRational){ 1, st->codec->sample_rate },
1206 } else if (st->codec->time_base.num != 0 &&
1207 st->codec->time_base.den != 0) {
1208 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1209 st->codec->time_base,
1214 out_pkt.stream_index = st->index;
1215 out_pkt.pts = st->parser->pts;
1216 out_pkt.dts = st->parser->dts;
1217 out_pkt.pos = st->parser->pos;
1219 if (st->parser->key_frame == 1 ||
1220 (st->parser->key_frame == -1 &&
1221 st->parser->pict_type == AV_PICTURE_TYPE_I))
1222 out_pkt.flags |= AV_PKT_FLAG_KEY;
1224 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1225 out_pkt.flags |= AV_PKT_FLAG_KEY;
1227 compute_pkt_fields(s, st, st->parser, &out_pkt);
1229 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1230 out_pkt.flags & AV_PKT_FLAG_KEY) {
1231 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1232 ff_reduce_index(s, st->index);
1233 av_add_index_entry(st, pos, out_pkt.dts,
1234 0, 0, AVINDEX_KEYFRAME);
1237 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1238 out_pkt.destruct = pkt->destruct;
1239 pkt->destruct = NULL;
1241 if ((ret = av_dup_packet(&out_pkt)) < 0)
1244 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1245 av_free_packet(&out_pkt);
1246 ret = AVERROR(ENOMEM);
1252 /* end of the stream => close and free the parser */
1253 if (pkt == &flush_pkt) {
1254 av_parser_close(st->parser);
1259 av_free_packet(pkt);
1263 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1264 AVPacketList **pkt_buffer_end,
1268 av_assert0(*pkt_buffer);
1271 *pkt_buffer = pktl->next;
1273 *pkt_buffer_end = NULL;
1278 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1280 int ret = 0, i, got_packet = 0;
1282 av_init_packet(pkt);
1284 while (!got_packet && !s->parse_queue) {
1288 /* read next packet */
1289 ret = ff_read_packet(s, &cur_pkt);
1291 if (ret == AVERROR(EAGAIN))
1293 /* flush the parsers */
1294 for(i = 0; i < s->nb_streams; i++) {
1296 if (st->parser && st->need_parsing)
1297 parse_packet(s, NULL, st->index);
1299 /* all remaining packets are now in parse_queue =>
1300 * really terminate parsing */
1304 st = s->streams[cur_pkt.stream_index];
1306 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1307 cur_pkt.dts != AV_NOPTS_VALUE &&
1308 cur_pkt.pts < cur_pkt.dts) {
1309 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1310 cur_pkt.stream_index,
1311 av_ts2str(cur_pkt.pts),
1312 av_ts2str(cur_pkt.dts),
1315 if (s->debug & FF_FDEBUG_TS)
1316 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1317 cur_pkt.stream_index,
1318 av_ts2str(cur_pkt.pts),
1319 av_ts2str(cur_pkt.dts),
1324 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1325 st->parser = av_parser_init(st->codec->codec_id);
1327 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1328 "%s, packets or times may be invalid.\n",
1329 avcodec_get_name(st->codec->codec_id));
1330 /* no parser available: just output the raw packets */
1331 st->need_parsing = AVSTREAM_PARSE_NONE;
1332 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1333 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1334 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1335 st->parser->flags |= PARSER_FLAG_ONCE;
1336 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1337 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1341 if (!st->need_parsing || !st->parser) {
1342 /* no parsing needed: we just output the packet as is */
1344 compute_pkt_fields(s, st, NULL, pkt);
1345 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1346 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1347 ff_reduce_index(s, st->index);
1348 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1351 } else if (st->discard < AVDISCARD_ALL) {
1352 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1356 av_free_packet(&cur_pkt);
1358 if (pkt->flags & AV_PKT_FLAG_KEY)
1359 st->skip_to_keyframe = 0;
1360 if (st->skip_to_keyframe) {
1361 av_free_packet(&cur_pkt);
1366 if (!got_packet && s->parse_queue)
1367 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1369 if(s->debug & FF_FDEBUG_TS)
1370 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1372 av_ts2str(pkt->pts),
1373 av_ts2str(pkt->dts),
1381 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1383 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1388 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1389 &s->packet_buffer_end,
1391 read_frame_internal(s, pkt);
1396 AVPacketList *pktl = s->packet_buffer;
1399 AVPacket *next_pkt = &pktl->pkt;
1401 if (next_pkt->dts != AV_NOPTS_VALUE) {
1402 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1403 // last dts seen for this stream. if any of packets following
1404 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1405 int64_t last_dts = next_pkt->dts;
1406 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1407 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1408 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1409 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1410 next_pkt->pts = pktl->pkt.dts;
1412 if (last_dts != AV_NOPTS_VALUE) {
1413 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1414 last_dts = pktl->pkt.dts;
1419 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1420 // Fixing the last reference frame had none pts issue (For MXF etc).
1421 // We only do this when
1423 // 2. we are not able to resolve a pts value for current packet.
1424 // 3. the packets for this stream at the end of the files had valid dts.
1425 next_pkt->pts = last_dts + next_pkt->duration;
1427 pktl = s->packet_buffer;
1430 /* read packet from packet buffer, if there is data */
1431 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1432 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1433 ret = read_from_packet_buffer(&s->packet_buffer,
1434 &s->packet_buffer_end, pkt);
1439 ret = read_frame_internal(s, pkt);
1441 if (pktl && ret != AVERROR(EAGAIN)) {
1448 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1449 &s->packet_buffer_end)) < 0)
1450 return AVERROR(ENOMEM);
1454 if (is_relative(pkt->dts))
1455 pkt->dts -= RELATIVE_TS_BASE;
1456 if (is_relative(pkt->pts))
1457 pkt->pts -= RELATIVE_TS_BASE;
1461 /* XXX: suppress the packet queue */
1462 static void flush_packet_queue(AVFormatContext *s)
1464 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1465 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1466 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1468 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1471 /*******************************************************/
1474 int av_find_default_stream_index(AVFormatContext *s)
1476 int first_audio_index = -1;
1480 if (s->nb_streams <= 0)
1482 for(i = 0; i < s->nb_streams; i++) {
1484 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1485 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1488 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1489 first_audio_index = i;
1491 return first_audio_index >= 0 ? first_audio_index : 0;
1495 * Flush the frame reader.
1497 void ff_read_frame_flush(AVFormatContext *s)
1502 flush_packet_queue(s);
1504 /* for each stream, reset read state */
1505 for(i = 0; i < s->nb_streams; i++) {
1509 av_parser_close(st->parser);
1512 st->last_IP_pts = AV_NOPTS_VALUE;
1513 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1514 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1515 st->reference_dts = AV_NOPTS_VALUE;
1517 st->probe_packets = MAX_PROBE_PACKETS;
1519 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1520 st->pts_buffer[j]= AV_NOPTS_VALUE;
1524 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1528 for(i = 0; i < s->nb_streams; i++) {
1529 AVStream *st = s->streams[i];
1531 st->cur_dts = av_rescale(timestamp,
1532 st->time_base.den * (int64_t)ref_st->time_base.num,
1533 st->time_base.num * (int64_t)ref_st->time_base.den);
1537 void ff_reduce_index(AVFormatContext *s, int stream_index)
1539 AVStream *st= s->streams[stream_index];
1540 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1542 if((unsigned)st->nb_index_entries >= max_entries){
1544 for(i=0; 2*i<st->nb_index_entries; i++)
1545 st->index_entries[i]= st->index_entries[2*i];
1546 st->nb_index_entries= i;
1550 int ff_add_index_entry(AVIndexEntry **index_entries,
1551 int *nb_index_entries,
1552 unsigned int *index_entries_allocated_size,
1553 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1555 AVIndexEntry *entries, *ie;
1558 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1561 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1562 timestamp -= RELATIVE_TS_BASE;
1564 entries = av_fast_realloc(*index_entries,
1565 index_entries_allocated_size,
1566 (*nb_index_entries + 1) *
1567 sizeof(AVIndexEntry));
1571 *index_entries= entries;
1573 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1576 index= (*nb_index_entries)++;
1577 ie= &entries[index];
1578 assert(index==0 || ie[-1].timestamp < timestamp);
1580 ie= &entries[index];
1581 if(ie->timestamp != timestamp){
1582 if(ie->timestamp <= timestamp)
1584 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1585 (*nb_index_entries)++;
1586 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1587 distance= ie->min_distance;
1591 ie->timestamp = timestamp;
1592 ie->min_distance= distance;
1599 int av_add_index_entry(AVStream *st,
1600 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1602 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1603 &st->index_entries_allocated_size, pos,
1604 timestamp, size, distance, flags);
1607 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1608 int64_t wanted_timestamp, int flags)
1616 //optimize appending index entries at the end
1617 if(b && entries[b-1].timestamp < wanted_timestamp)
1622 timestamp = entries[m].timestamp;
1623 if(timestamp >= wanted_timestamp)
1625 if(timestamp <= wanted_timestamp)
1628 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1630 if(!(flags & AVSEEK_FLAG_ANY)){
1631 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1632 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1641 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1644 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1645 wanted_timestamp, flags);
1648 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1650 AVInputFormat *avif= s->iformat;
1651 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1652 int64_t ts_min, ts_max, ts;
1657 if (stream_index < 0)
1660 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1663 ts_min= AV_NOPTS_VALUE;
1664 pos_limit= -1; //gcc falsely says it may be uninitialized
1666 st= s->streams[stream_index];
1667 if(st->index_entries){
1670 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1671 index= FFMAX(index, 0);
1672 e= &st->index_entries[index];
1674 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1676 ts_min= e->timestamp;
1677 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1678 pos_min, av_ts2str(ts_min));
1683 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1684 assert(index < st->nb_index_entries);
1686 e= &st->index_entries[index];
1687 assert(e->timestamp >= target_ts);
1689 ts_max= e->timestamp;
1690 pos_limit= pos_max - e->min_distance;
1691 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1692 pos_max, pos_limit, av_ts2str(ts_max));
1696 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1701 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1704 ff_read_frame_flush(s);
1705 ff_update_cur_dts(s, st, ts);
1710 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1711 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1712 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1713 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1716 int64_t start_pos, filesize;
1719 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1721 if(ts_min == AV_NOPTS_VALUE){
1722 pos_min = s->data_offset;
1723 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1724 if (ts_min == AV_NOPTS_VALUE)
1728 if(ts_min >= target_ts){
1733 if(ts_max == AV_NOPTS_VALUE){
1735 filesize = avio_size(s->pb);
1736 pos_max = filesize - 1;
1739 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1741 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1742 if (ts_max == AV_NOPTS_VALUE)
1746 int64_t tmp_pos= pos_max + 1;
1747 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1748 if(tmp_ts == AV_NOPTS_VALUE)
1752 if(tmp_pos >= filesize)
1758 if(ts_max <= target_ts){
1763 if(ts_min > ts_max){
1765 }else if(ts_min == ts_max){
1770 while (pos_min < pos_limit) {
1771 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1772 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1773 assert(pos_limit <= pos_max);
1776 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1777 // interpolate position (better than dichotomy)
1778 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1779 + pos_min - approximate_keyframe_distance;
1780 }else if(no_change==1){
1781 // bisection, if interpolation failed to change min or max pos last time
1782 pos = (pos_min + pos_limit)>>1;
1784 /* linear search if bisection failed, can only happen if there
1785 are very few or no keyframes between min/max */
1790 else if(pos > pos_limit)
1794 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1799 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1800 pos_min, pos, pos_max,
1801 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1802 pos_limit, start_pos, no_change);
1803 if(ts == AV_NOPTS_VALUE){
1804 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1807 assert(ts != AV_NOPTS_VALUE);
1808 if (target_ts <= ts) {
1809 pos_limit = start_pos - 1;
1813 if (target_ts >= ts) {
1819 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1820 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1823 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1825 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1826 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1827 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1833 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1834 int64_t pos_min, pos_max;
1836 pos_min = s->data_offset;
1837 pos_max = avio_size(s->pb) - 1;
1839 if (pos < pos_min) pos= pos_min;
1840 else if(pos > pos_max) pos= pos_max;
1842 avio_seek(s->pb, pos, SEEK_SET);
1847 static int seek_frame_generic(AVFormatContext *s,
1848 int stream_index, int64_t timestamp, int flags)
1855 st = s->streams[stream_index];
1857 index = av_index_search_timestamp(st, timestamp, flags);
1859 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1862 if(index < 0 || index==st->nb_index_entries-1){
1866 if(st->nb_index_entries){
1867 assert(st->index_entries);
1868 ie= &st->index_entries[st->nb_index_entries-1];
1869 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1871 ff_update_cur_dts(s, st, ie->timestamp);
1873 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1879 read_status = av_read_frame(s, &pkt);
1880 } while (read_status == AVERROR(EAGAIN));
1881 if (read_status < 0)
1883 av_free_packet(&pkt);
1884 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1885 if(pkt.flags & AV_PKT_FLAG_KEY)
1887 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1888 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1893 index = av_index_search_timestamp(st, timestamp, flags);
1898 ff_read_frame_flush(s);
1899 AV_NOWARN_DEPRECATED(
1900 if (s->iformat->read_seek){
1901 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1905 ie = &st->index_entries[index];
1906 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1908 ff_update_cur_dts(s, st, ie->timestamp);
1913 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1914 int64_t timestamp, int flags)
1919 if (flags & AVSEEK_FLAG_BYTE) {
1920 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1922 ff_read_frame_flush(s);
1923 return seek_frame_byte(s, stream_index, timestamp, flags);
1926 if(stream_index < 0){
1927 stream_index= av_find_default_stream_index(s);
1928 if(stream_index < 0)
1931 st= s->streams[stream_index];
1932 /* timestamp for default must be expressed in AV_TIME_BASE units */
1933 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1936 /* first, we try the format specific seek */
1937 AV_NOWARN_DEPRECATED(
1938 if (s->iformat->read_seek) {
1939 ff_read_frame_flush(s);
1940 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1948 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1949 ff_read_frame_flush(s);
1950 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1951 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1952 ff_read_frame_flush(s);
1953 return seek_frame_generic(s, stream_index, timestamp, flags);
1959 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1961 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1964 queue_attached_pictures(s);
1969 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1971 if(min_ts > ts || max_ts < ts)
1974 if (s->iformat->read_seek2) {
1976 ff_read_frame_flush(s);
1977 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1980 queue_attached_pictures(s);
1984 if(s->iformat->read_timestamp){
1985 //try to seek via read_timestamp()
1988 //Fallback to old API if new is not implemented but old is
1989 //Note the old has somewat different sematics
1990 AV_NOWARN_DEPRECATED(
1991 if (s->iformat->read_seek || 1) {
1992 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1993 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1994 if (ret<0 && ts != min_ts && max_ts != ts) {
1995 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1997 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2003 // try some generic seek like seek_frame_generic() but with new ts semantics
2006 /*******************************************************/
2009 * Return TRUE if the stream has accurate duration in any stream.
2011 * @return TRUE if the stream has accurate duration for at least one component.
2013 static int has_duration(AVFormatContext *ic)
2018 for(i = 0;i < ic->nb_streams; i++) {
2019 st = ic->streams[i];
2020 if (st->duration != AV_NOPTS_VALUE)
2023 if (ic->duration != AV_NOPTS_VALUE)
2029 * Estimate the stream timings from the one of each components.
2031 * Also computes the global bitrate if possible.
2033 static void update_stream_timings(AVFormatContext *ic)
2035 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2036 int64_t duration, duration1, filesize;
2040 start_time = INT64_MAX;
2041 start_time_text = INT64_MAX;
2042 end_time = INT64_MIN;
2043 duration = INT64_MIN;
2044 for(i = 0;i < ic->nb_streams; i++) {
2045 st = ic->streams[i];
2046 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2047 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2048 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2049 if (start_time1 < start_time_text)
2050 start_time_text = start_time1;
2052 start_time = FFMIN(start_time, start_time1);
2053 if (st->duration != AV_NOPTS_VALUE) {
2054 end_time1 = start_time1
2055 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2056 end_time = FFMAX(end_time, end_time1);
2059 if (st->duration != AV_NOPTS_VALUE) {
2060 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2061 duration = FFMAX(duration, duration1);
2064 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2065 start_time = start_time_text;
2066 else if(start_time > start_time_text)
2067 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2069 if (start_time != INT64_MAX) {
2070 ic->start_time = start_time;
2071 if (end_time != INT64_MIN)
2072 duration = FFMAX(duration, end_time - start_time);
2074 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2075 ic->duration = duration;
2077 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2078 /* compute the bitrate */
2079 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2080 (double)ic->duration;
2084 static void fill_all_stream_timings(AVFormatContext *ic)
2089 update_stream_timings(ic);
2090 for(i = 0;i < ic->nb_streams; i++) {
2091 st = ic->streams[i];
2092 if (st->start_time == AV_NOPTS_VALUE) {
2093 if(ic->start_time != AV_NOPTS_VALUE)
2094 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2095 if(ic->duration != AV_NOPTS_VALUE)
2096 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2101 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2103 int64_t filesize, duration;
2107 /* if bit_rate is already set, we believe it */
2108 if (ic->bit_rate <= 0) {
2110 for(i=0;i<ic->nb_streams;i++) {
2111 st = ic->streams[i];
2112 if (st->codec->bit_rate > 0)
2113 bit_rate += st->codec->bit_rate;
2115 ic->bit_rate = bit_rate;
2118 /* if duration is already set, we believe it */
2119 if (ic->duration == AV_NOPTS_VALUE &&
2120 ic->bit_rate != 0) {
2121 filesize = ic->pb ? avio_size(ic->pb) : 0;
2123 for(i = 0; i < ic->nb_streams; i++) {
2124 st = ic->streams[i];
2125 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2126 if (st->duration == AV_NOPTS_VALUE)
2127 st->duration = duration;
2133 #define DURATION_MAX_READ_SIZE 250000
2134 #define DURATION_MAX_RETRY 3
2136 /* only usable for MPEG-PS streams */
2137 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2139 AVPacket pkt1, *pkt = &pkt1;
2141 int read_size, i, ret;
2143 int64_t filesize, offset, duration;
2146 /* flush packet queue */
2147 flush_packet_queue(ic);
2149 for (i=0; i<ic->nb_streams; i++) {
2150 st = ic->streams[i];
2151 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2152 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2155 av_parser_close(st->parser);
2160 /* estimate the end time (duration) */
2161 /* XXX: may need to support wrapping */
2162 filesize = ic->pb ? avio_size(ic->pb) : 0;
2163 end_time = AV_NOPTS_VALUE;
2165 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2169 avio_seek(ic->pb, offset, SEEK_SET);
2172 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2176 ret = ff_read_packet(ic, pkt);
2177 } while(ret == AVERROR(EAGAIN));
2180 read_size += pkt->size;
2181 st = ic->streams[pkt->stream_index];
2182 if (pkt->pts != AV_NOPTS_VALUE &&
2183 (st->start_time != AV_NOPTS_VALUE ||
2184 st->first_dts != AV_NOPTS_VALUE)) {
2185 duration = end_time = pkt->pts;
2186 if (st->start_time != AV_NOPTS_VALUE)
2187 duration -= st->start_time;
2189 duration -= st->first_dts;
2191 duration += 1LL<<st->pts_wrap_bits;
2193 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2194 st->duration = duration;
2197 av_free_packet(pkt);
2199 }while( end_time==AV_NOPTS_VALUE
2200 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2201 && ++retry <= DURATION_MAX_RETRY);
2203 fill_all_stream_timings(ic);
2205 avio_seek(ic->pb, old_offset, SEEK_SET);
2206 for (i=0; i<ic->nb_streams; i++) {
2208 st->cur_dts= st->first_dts;
2209 st->last_IP_pts = AV_NOPTS_VALUE;
2210 st->reference_dts = AV_NOPTS_VALUE;
2214 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2218 /* get the file size, if possible */
2219 if (ic->iformat->flags & AVFMT_NOFILE) {
2222 file_size = avio_size(ic->pb);
2223 file_size = FFMAX(0, file_size);
2226 if ((!strcmp(ic->iformat->name, "mpeg") ||
2227 !strcmp(ic->iformat->name, "mpegts")) &&
2228 file_size && ic->pb->seekable) {
2229 /* get accurate estimate from the PTSes */
2230 estimate_timings_from_pts(ic, old_offset);
2231 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2232 } else if (has_duration(ic)) {
2233 /* at least one component has timings - we use them for all
2235 fill_all_stream_timings(ic);
2236 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2238 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2239 /* less precise: use bitrate info */
2240 estimate_timings_from_bit_rate(ic);
2241 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2243 update_stream_timings(ic);
2247 AVStream av_unused *st;
2248 for(i = 0;i < ic->nb_streams; i++) {
2249 st = ic->streams[i];
2250 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2251 (double) st->start_time / AV_TIME_BASE,
2252 (double) st->duration / AV_TIME_BASE);
2254 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2255 (double) ic->start_time / AV_TIME_BASE,
2256 (double) ic->duration / AV_TIME_BASE,
2257 ic->bit_rate / 1000);
2261 static int has_codec_parameters(AVStream *st)
2263 AVCodecContext *avctx = st->codec;
2265 switch (avctx->codec_type) {
2266 case AVMEDIA_TYPE_AUDIO:
2267 val = avctx->sample_rate && avctx->channels;
2268 if (!avctx->frame_size && determinable_frame_size(avctx))
2270 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2273 case AVMEDIA_TYPE_VIDEO:
2275 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2278 case AVMEDIA_TYPE_DATA:
2279 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2284 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2287 static int has_decode_delay_been_guessed(AVStream *st)
2289 return st->codec->codec_id != CODEC_ID_H264 ||
2290 st->info->nb_decoded_frames >= 6;
2293 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2294 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2297 int got_picture = 1, ret = 0;
2299 AVPacket pkt = *avpkt;
2301 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2302 AVDictionary *thread_opt = NULL;
2304 codec = st->codec->codec ? st->codec->codec :
2305 avcodec_find_decoder(st->codec->codec_id);
2308 st->info->found_decoder = -1;
2312 /* force thread count to 1 since the h264 decoder will not extract SPS
2313 * and PPS to extradata during multi-threaded decoding */
2314 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2315 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2317 av_dict_free(&thread_opt);
2319 st->info->found_decoder = -1;
2322 st->info->found_decoder = 1;
2323 } else if (!st->info->found_decoder)
2324 st->info->found_decoder = 1;
2326 if (st->info->found_decoder < 0)
2329 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2331 (!has_codec_parameters(st) ||
2332 !has_decode_delay_been_guessed(st) ||
2333 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2335 avcodec_get_frame_defaults(&picture);
2336 switch(st->codec->codec_type) {
2337 case AVMEDIA_TYPE_VIDEO:
2338 ret = avcodec_decode_video2(st->codec, &picture,
2339 &got_picture, &pkt);
2341 case AVMEDIA_TYPE_AUDIO:
2342 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2349 st->info->nb_decoded_frames++;
2355 if(!pkt.data && !got_picture)
2360 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2362 while (tags->id != CODEC_ID_NONE) {
2370 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2373 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2374 if(tag == tags[i].tag)
2377 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2378 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2381 return CODEC_ID_NONE;
2384 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2387 for(i=0; tags && tags[i]; i++){
2388 int tag= ff_codec_get_tag(tags[i], id);
2394 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2397 for(i=0; tags && tags[i]; i++){
2398 enum CodecID id= ff_codec_get_id(tags[i], tag);
2399 if(id!=CODEC_ID_NONE) return id;
2401 return CODEC_ID_NONE;
2404 static void compute_chapters_end(AVFormatContext *s)
2407 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2409 for (i = 0; i < s->nb_chapters; i++)
2410 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2411 AVChapter *ch = s->chapters[i];
2412 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2415 for (j = 0; j < s->nb_chapters; j++) {
2416 AVChapter *ch1 = s->chapters[j];
2417 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2418 if (j != i && next_start > ch->start && next_start < end)
2421 ch->end = (end == INT64_MAX) ? ch->start : end;
2425 static int get_std_framerate(int i){
2426 if(i<60*12) return i*1001;
2427 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2431 * Is the time base unreliable.
2432 * This is a heuristic to balance between quick acceptance of the values in
2433 * the headers vs. some extra checks.
2434 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2435 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2436 * And there are "variable" fps files this needs to detect as well.
2438 static int tb_unreliable(AVCodecContext *c){
2439 if( c->time_base.den >= 101L*c->time_base.num
2440 || c->time_base.den < 5L*c->time_base.num
2441 /* || c->codec_tag == AV_RL32("DIVX")
2442 || c->codec_tag == AV_RL32("XVID")*/
2443 || c->codec_id == CODEC_ID_MPEG2VIDEO
2444 || c->codec_id == CODEC_ID_H264
2450 #if FF_API_FORMAT_PARAMETERS
2451 int av_find_stream_info(AVFormatContext *ic)
2453 return avformat_find_stream_info(ic, NULL);
2457 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2459 int i, count, ret, read_size, j;
2461 AVPacket pkt1, *pkt;
2462 int64_t old_offset = avio_tell(ic->pb);
2463 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2464 int flush_codecs = 1;
2467 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2469 for(i=0;i<ic->nb_streams;i++) {
2471 AVDictionary *thread_opt = NULL;
2472 st = ic->streams[i];
2474 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2475 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2476 /* if(!st->time_base.num)
2478 if(!st->codec->time_base.num)
2479 st->codec->time_base= st->time_base;
2481 //only for the split stuff
2482 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2483 st->parser = av_parser_init(st->codec->codec_id);
2485 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2486 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2487 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2488 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2490 } else if (st->need_parsing) {
2491 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2492 "%s, packets or times may be invalid.\n",
2493 avcodec_get_name(st->codec->codec_id));
2496 codec = st->codec->codec ? st->codec->codec :
2497 avcodec_find_decoder(st->codec->codec_id);
2499 /* force thread count to 1 since the h264 decoder will not extract SPS
2500 * and PPS to extradata during multi-threaded decoding */
2501 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2503 /* Ensure that subtitle_header is properly set. */
2504 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2505 && codec && !st->codec->codec)
2506 avcodec_open2(st->codec, codec, options ? &options[i]
2509 //try to just open decoders, in case this is enough to get parameters
2510 if (!has_codec_parameters(st)) {
2511 if (codec && !st->codec->codec)
2512 avcodec_open2(st->codec, codec, options ? &options[i]
2516 av_dict_free(&thread_opt);
2519 for (i=0; i<ic->nb_streams; i++) {
2520 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2526 if (ff_check_interrupt(&ic->interrupt_callback)){
2528 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2532 /* check if one codec still needs to be handled */
2533 for(i=0;i<ic->nb_streams;i++) {
2534 int fps_analyze_framecount = 20;
2536 st = ic->streams[i];
2537 if (!has_codec_parameters(st))
2539 /* if the timebase is coarse (like the usual millisecond precision
2540 of mkv), we need to analyze more frames to reliably arrive at
2542 if (av_q2d(st->time_base) > 0.0005)
2543 fps_analyze_framecount *= 2;
2544 if (ic->fps_probe_size >= 0)
2545 fps_analyze_framecount = ic->fps_probe_size;
2546 /* variable fps and no guess at the real fps */
2547 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2548 && st->info->duration_count < fps_analyze_framecount
2549 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2551 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2553 if (st->first_dts == AV_NOPTS_VALUE &&
2554 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2555 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2558 if (i == ic->nb_streams) {
2559 /* NOTE: if the format has no header, then we need to read
2560 some packets to get most of the streams, so we cannot
2562 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2563 /* if we found the info for all the codecs, we can stop */
2565 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2570 /* we did not get all the codec info, but we read too much data */
2571 if (read_size >= ic->probesize) {
2573 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2574 for (i = 0; i < ic->nb_streams; i++)
2575 if (!ic->streams[i]->r_frame_rate.num &&
2576 ic->streams[i]->info->duration_count <= 1)
2577 av_log(ic, AV_LOG_WARNING,
2578 "Stream #%d: not enough frames to estimate rate; "
2579 "consider increasing probesize\n", i);
2583 /* NOTE: a new stream can be added there if no header in file
2584 (AVFMTCTX_NOHEADER) */
2585 ret = read_frame_internal(ic, &pkt1);
2586 if (ret == AVERROR(EAGAIN))
2594 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2595 if ((ret = av_dup_packet(pkt)) < 0)
2596 goto find_stream_info_err;
2598 read_size += pkt->size;
2600 st = ic->streams[pkt->stream_index];
2601 if (st->codec_info_nb_frames>1) {
2603 if (st->time_base.den > 0)
2604 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2605 if (st->avg_frame_rate.num > 0)
2606 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2608 if (t >= ic->max_analyze_duration) {
2609 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2612 st->info->codec_info_duration += pkt->duration;
2615 int64_t last = st->info->last_dts;
2617 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2618 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2619 int64_t duration= pkt->dts - last;
2621 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2622 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2623 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2624 int framerate= get_std_framerate(i);
2625 double sdts= dts*framerate/(1001*12);
2627 int ticks= lrintf(sdts+j*0.5);
2628 double error= sdts - ticks + j*0.5;
2629 st->info->duration_error[j][0][i] += error;
2630 st->info->duration_error[j][1][i] += error*error;
2633 st->info->duration_count++;
2634 // ignore the first 4 values, they might have some random jitter
2635 if (st->info->duration_count > 3)
2636 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2638 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2639 st->info->last_dts = pkt->dts;
2641 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2642 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2643 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2644 st->codec->extradata_size= i;
2645 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2646 if (!st->codec->extradata)
2647 return AVERROR(ENOMEM);
2648 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2649 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2653 /* if still no information, we try to open the codec and to
2654 decompress the frame. We try to avoid that in most cases as
2655 it takes longer and uses more memory. For MPEG-4, we need to
2656 decompress for QuickTime.
2658 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2659 least one frame of codec data, this makes sure the codec initializes
2660 the channel configuration and does not only trust the values from the container.
2662 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2664 st->codec_info_nb_frames++;
2669 AVPacket empty_pkt = { 0 };
2671 av_init_packet(&empty_pkt);
2673 ret = -1; /* we could not have all the codec parameters before EOF */
2674 for(i=0;i<ic->nb_streams;i++) {
2675 st = ic->streams[i];
2677 /* flush the decoders */
2678 if (st->info->found_decoder == 1) {
2680 err = try_decode_frame(st, &empty_pkt,
2681 (options && i < orig_nb_streams) ?
2682 &options[i] : NULL);
2683 } while (err > 0 && !has_codec_parameters(st));
2686 av_log(ic, AV_LOG_INFO,
2687 "decoding for stream %d failed\n", st->index);
2691 if (!has_codec_parameters(st)){
2693 avcodec_string(buf, sizeof(buf), st->codec, 0);
2694 av_log(ic, AV_LOG_WARNING,
2695 "Could not find codec parameters (%s)\n", buf);
2702 // close codecs which were opened in try_decode_frame()
2703 for(i=0;i<ic->nb_streams;i++) {
2704 st = ic->streams[i];
2705 avcodec_close(st->codec);
2707 for(i=0;i<ic->nb_streams;i++) {
2708 st = ic->streams[i];
2709 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2710 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2711 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2712 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2713 st->codec->codec_tag= tag;
2716 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2717 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2718 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2719 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2720 // the check for tb_unreliable() is not completely correct, since this is not about handling
2721 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2722 // ipmovie.c produces.
2723 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2724 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2725 if (st->info->duration_count && !st->r_frame_rate.num
2726 && tb_unreliable(st->codec) /*&&
2727 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2728 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2730 double best_error= 0.01;
2732 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2735 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2737 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2740 int n= st->info->duration_count;
2741 double a= st->info->duration_error[k][0][j] / n;
2742 double error= st->info->duration_error[k][1][j]/n - a*a;
2744 if(error < best_error && best_error> 0.000000001){
2746 num = get_std_framerate(j);
2749 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2752 // do not increase frame rate by more than 1 % in order to match a standard rate.
2753 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2754 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2757 if (!st->r_frame_rate.num){
2758 if( st->codec->time_base.den * (int64_t)st->time_base.num
2759 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2760 st->r_frame_rate.num = st->codec->time_base.den;
2761 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2763 st->r_frame_rate.num = st->time_base.den;
2764 st->r_frame_rate.den = st->time_base.num;
2767 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2768 if(!st->codec->bits_per_coded_sample)
2769 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2770 // set stream disposition based on audio service type
2771 switch (st->codec->audio_service_type) {
2772 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2773 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2774 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2775 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2776 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2777 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2778 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2779 st->disposition = AV_DISPOSITION_COMMENT; break;
2780 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2781 st->disposition = AV_DISPOSITION_KARAOKE; break;
2786 estimate_timings(ic, old_offset);
2788 compute_chapters_end(ic);
2790 find_stream_info_err:
2791 for (i=0; i < ic->nb_streams; i++) {
2792 if (ic->streams[i]->codec)
2793 ic->streams[i]->codec->thread_count = 0;
2794 av_freep(&ic->streams[i]->info);
2797 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2801 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2805 for (i = 0; i < ic->nb_programs; i++) {
2806 if (ic->programs[i] == last) {
2810 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2811 if (ic->programs[i]->stream_index[j] == s)
2812 return ic->programs[i];
2818 int av_find_best_stream(AVFormatContext *ic,
2819 enum AVMediaType type,
2820 int wanted_stream_nb,
2822 AVCodec **decoder_ret,
2825 int i, nb_streams = ic->nb_streams;
2826 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2827 unsigned *program = NULL;
2828 AVCodec *decoder = NULL, *best_decoder = NULL;
2830 if (related_stream >= 0 && wanted_stream_nb < 0) {
2831 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2833 program = p->stream_index;
2834 nb_streams = p->nb_stream_indexes;
2837 for (i = 0; i < nb_streams; i++) {
2838 int real_stream_index = program ? program[i] : i;
2839 AVStream *st = ic->streams[real_stream_index];
2840 AVCodecContext *avctx = st->codec;
2841 if (avctx->codec_type != type)
2843 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2845 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2848 decoder = avcodec_find_decoder(st->codec->codec_id);
2851 ret = AVERROR_DECODER_NOT_FOUND;
2855 if (best_count >= st->codec_info_nb_frames)
2857 best_count = st->codec_info_nb_frames;
2858 ret = real_stream_index;
2859 best_decoder = decoder;
2860 if (program && i == nb_streams - 1 && ret < 0) {
2862 nb_streams = ic->nb_streams;
2863 i = 0; /* no related stream found, try again with everything */
2867 *decoder_ret = best_decoder;
2871 /*******************************************************/
2873 int av_read_play(AVFormatContext *s)
2875 if (s->iformat->read_play)
2876 return s->iformat->read_play(s);
2878 return avio_pause(s->pb, 0);
2879 return AVERROR(ENOSYS);
2882 int av_read_pause(AVFormatContext *s)
2884 if (s->iformat->read_pause)
2885 return s->iformat->read_pause(s);
2887 return avio_pause(s->pb, 1);
2888 return AVERROR(ENOSYS);
2891 void avformat_free_context(AVFormatContext *s)
2897 if (s->iformat && s->iformat->priv_class && s->priv_data)
2898 av_opt_free(s->priv_data);
2900 for(i=0;i<s->nb_streams;i++) {
2901 /* free all data in a stream component */
2904 av_parser_close(st->parser);
2906 if (st->attached_pic.data)
2907 av_free_packet(&st->attached_pic);
2908 av_dict_free(&st->metadata);
2909 av_freep(&st->index_entries);
2910 av_freep(&st->codec->extradata);
2911 av_freep(&st->codec->subtitle_header);
2912 av_freep(&st->codec);
2913 av_freep(&st->priv_data);
2914 av_freep(&st->info);
2917 for(i=s->nb_programs-1; i>=0; i--) {
2918 av_dict_free(&s->programs[i]->metadata);
2919 av_freep(&s->programs[i]->stream_index);
2920 av_freep(&s->programs[i]);
2922 av_freep(&s->programs);
2923 av_freep(&s->priv_data);
2924 while(s->nb_chapters--) {
2925 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2926 av_freep(&s->chapters[s->nb_chapters]);
2928 av_freep(&s->chapters);
2929 av_dict_free(&s->metadata);
2930 av_freep(&s->streams);
2934 #if FF_API_CLOSE_INPUT_FILE
2935 void av_close_input_file(AVFormatContext *s)
2937 avformat_close_input(&s);
2941 void avformat_close_input(AVFormatContext **ps)
2943 AVFormatContext *s = *ps;
2944 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2946 flush_packet_queue(s);
2947 if (s->iformat && (s->iformat->read_close))
2948 s->iformat->read_close(s);
2949 avformat_free_context(s);
2955 #if FF_API_NEW_STREAM
2956 AVStream *av_new_stream(AVFormatContext *s, int id)
2958 AVStream *st = avformat_new_stream(s, NULL);
2965 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2971 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2973 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2976 s->streams = streams;
2978 st = av_mallocz(sizeof(AVStream));
2981 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2985 st->info->last_dts = AV_NOPTS_VALUE;
2987 st->codec = avcodec_alloc_context3(c);
2989 /* no default bitrate if decoding */
2990 st->codec->bit_rate = 0;
2992 st->index = s->nb_streams;
2993 st->start_time = AV_NOPTS_VALUE;
2994 st->duration = AV_NOPTS_VALUE;
2995 /* we set the current DTS to 0 so that formats without any timestamps
2996 but durations get some timestamps, formats with some unknown
2997 timestamps have their first few packets buffered and the
2998 timestamps corrected before they are returned to the user */
2999 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3000 st->first_dts = AV_NOPTS_VALUE;
3001 st->probe_packets = MAX_PROBE_PACKETS;
3003 /* default pts setting is MPEG-like */
3004 avpriv_set_pts_info(st, 33, 1, 90000);
3005 st->last_IP_pts = AV_NOPTS_VALUE;
3006 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3007 st->pts_buffer[i]= AV_NOPTS_VALUE;
3008 st->reference_dts = AV_NOPTS_VALUE;
3010 st->sample_aspect_ratio = (AVRational){0,1};
3012 s->streams[s->nb_streams++] = st;
3016 AVProgram *av_new_program(AVFormatContext *ac, int id)
3018 AVProgram *program=NULL;
3021 av_dlog(ac, "new_program: id=0x%04x\n", id);
3023 for(i=0; i<ac->nb_programs; i++)
3024 if(ac->programs[i]->id == id)
3025 program = ac->programs[i];
3028 program = av_mallocz(sizeof(AVProgram));
3031 dynarray_add(&ac->programs, &ac->nb_programs, program);
3032 program->discard = AVDISCARD_NONE;
3039 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3041 AVChapter *chapter = NULL;
3044 for(i=0; i<s->nb_chapters; i++)
3045 if(s->chapters[i]->id == id)
3046 chapter = s->chapters[i];
3049 chapter= av_mallocz(sizeof(AVChapter));
3052 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3054 av_dict_set(&chapter->metadata, "title", title, 0);
3056 chapter->time_base= time_base;
3057 chapter->start = start;
3063 /************************************************************/
3064 /* output media file */
3066 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3067 const char *format, const char *filename)
3069 AVFormatContext *s = avformat_alloc_context();
3078 oformat = av_guess_format(format, NULL, NULL);
3080 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3081 ret = AVERROR(EINVAL);
3085 oformat = av_guess_format(NULL, filename, NULL);
3087 ret = AVERROR(EINVAL);
3088 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3095 s->oformat = oformat;
3096 if (s->oformat->priv_data_size > 0) {
3097 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3100 if (s->oformat->priv_class) {
3101 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3102 av_opt_set_defaults(s->priv_data);
3105 s->priv_data = NULL;
3108 av_strlcpy(s->filename, filename, sizeof(s->filename));
3112 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3113 ret = AVERROR(ENOMEM);
3115 avformat_free_context(s);
3119 #if FF_API_ALLOC_OUTPUT_CONTEXT
3120 AVFormatContext *avformat_alloc_output_context(const char *format,
3121 AVOutputFormat *oformat, const char *filename)
3123 AVFormatContext *avctx;
3124 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3125 return ret < 0 ? NULL : avctx;
3129 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3131 const AVCodecTag *avctag;
3133 enum CodecID id = CODEC_ID_NONE;
3134 unsigned int tag = 0;
3137 * Check that tag + id is in the table
3138 * If neither is in the table -> OK
3139 * If tag is in the table with another id -> FAIL
3140 * If id is in the table with another tag -> FAIL unless strict < normal
3142 for (n = 0; s->oformat->codec_tag[n]; n++) {
3143 avctag = s->oformat->codec_tag[n];
3144 while (avctag->id != CODEC_ID_NONE) {
3145 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3147 if (id == st->codec->codec_id)
3150 if (avctag->id == st->codec->codec_id)
3155 if (id != CODEC_ID_NONE)
3157 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3162 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3166 AVDictionary *tmp = NULL;
3169 av_dict_copy(&tmp, *options, 0);
3170 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3172 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3173 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3176 // some sanity checks
3177 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3178 av_log(s, AV_LOG_ERROR, "no streams\n");
3179 ret = AVERROR(EINVAL);
3183 for(i=0;i<s->nb_streams;i++) {
3186 switch (st->codec->codec_type) {
3187 case AVMEDIA_TYPE_AUDIO:
3188 if(st->codec->sample_rate<=0){
3189 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3190 ret = AVERROR(EINVAL);
3193 if(!st->codec->block_align)
3194 st->codec->block_align = st->codec->channels *
3195 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3197 case AVMEDIA_TYPE_VIDEO:
3198 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3199 av_log(s, AV_LOG_ERROR, "time base not set\n");
3200 ret = AVERROR(EINVAL);
3203 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3204 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3205 ret = AVERROR(EINVAL);
3208 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3209 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3211 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3212 "(%d/%d) and encoder layer (%d/%d)\n",
3213 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3214 st->codec->sample_aspect_ratio.num,
3215 st->codec->sample_aspect_ratio.den);
3216 ret = AVERROR(EINVAL);
3222 if(s->oformat->codec_tag){
3223 if( st->codec->codec_tag
3224 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3225 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3226 && !validate_codec_tag(s, st)){
3227 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3228 st->codec->codec_tag= 0;
3230 if(st->codec->codec_tag){
3231 if (!validate_codec_tag(s, st)) {
3232 char tagbuf[32], cortag[32];
3233 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3234 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3235 av_log(s, AV_LOG_ERROR,
3236 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3237 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3238 ret = AVERROR_INVALIDDATA;
3242 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3245 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3246 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3247 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3250 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3251 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3252 if (!s->priv_data) {
3253 ret = AVERROR(ENOMEM);
3256 if (s->oformat->priv_class) {
3257 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3258 av_opt_set_defaults(s->priv_data);
3259 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3264 /* set muxer identification string */
3265 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3266 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3269 if(s->oformat->write_header){
3270 ret = s->oformat->write_header(s);
3275 /* init PTS generation */
3276 for(i=0;i<s->nb_streams;i++) {
3277 int64_t den = AV_NOPTS_VALUE;
3280 switch (st->codec->codec_type) {
3281 case AVMEDIA_TYPE_AUDIO:
3282 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3284 case AVMEDIA_TYPE_VIDEO:
3285 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3290 if (den != AV_NOPTS_VALUE) {
3292 ret = AVERROR_INVALIDDATA;
3295 frac_init(&st->pts, 0, 0, den);
3300 av_dict_free(options);
3309 //FIXME merge with compute_pkt_fields
3310 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3311 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3312 int num, den, frame_size, i;
3314 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3315 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3317 /* duration field */
3318 if (pkt->duration == 0) {
3319 compute_frame_duration(&num, &den, st, NULL, pkt);
3321 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3325 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3328 //XXX/FIXME this is a temporary hack until all encoders output pts
3329 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3332 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3336 // pkt->pts= st->cur_dts;
3337 pkt->pts= st->pts.val;
3340 //calculate dts from pts
3341 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3342 st->pts_buffer[0]= pkt->pts;
3343 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3344 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3345 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3346 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3348 pkt->dts= st->pts_buffer[0];
3351 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3352 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3353 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3354 av_log(s, AV_LOG_ERROR,
3355 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3356 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3357 return AVERROR(EINVAL);
3359 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3360 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3361 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3362 return AVERROR(EINVAL);
3365 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3366 st->cur_dts= pkt->dts;
3367 st->pts.val= pkt->dts;
3370 switch (st->codec->codec_type) {
3371 case AVMEDIA_TYPE_AUDIO:
3372 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3374 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3375 likely equal to the encoder delay, but it would be better if we
3376 had the real timestamps from the encoder */
3377 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3378 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3381 case AVMEDIA_TYPE_VIDEO:
3382 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3390 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3395 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3396 return s->oformat->write_packet(s, pkt);
3400 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3402 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3405 ret= s->oformat->write_packet(s, pkt);
3408 s->streams[pkt->stream_index]->nb_frames++;
3412 #define CHUNK_START 0x1000
3414 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3415 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3417 AVPacketList **next_point, *this_pktl;
3418 AVStream *st= s->streams[pkt->stream_index];
3419 int chunked= s->max_chunk_size || s->max_chunk_duration;
3421 this_pktl = av_mallocz(sizeof(AVPacketList));
3423 return AVERROR(ENOMEM);
3424 this_pktl->pkt= *pkt;
3425 pkt->destruct= NULL; // do not free original but only the copy
3426 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3428 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3429 next_point = &(st->last_in_packet_buffer->next);
3431 next_point = &s->packet_buffer;
3436 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3437 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3438 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3439 st->interleaver_chunk_size += pkt->size;
3440 st->interleaver_chunk_duration += pkt->duration;
3443 st->interleaver_chunk_size =
3444 st->interleaver_chunk_duration = 0;
3445 this_pktl->pkt.flags |= CHUNK_START;
3449 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3451 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3452 || !compare(s, &(*next_point)->pkt, pkt))){
3453 next_point= &(*next_point)->next;
3458 next_point = &(s->packet_buffer_end->next);
3461 assert(!*next_point);
3463 s->packet_buffer_end= this_pktl;
3466 this_pktl->next= *next_point;
3468 s->streams[pkt->stream_index]->last_in_packet_buffer=
3469 *next_point= this_pktl;
3473 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3475 AVStream *st = s->streams[ pkt ->stream_index];
3476 AVStream *st2= s->streams[ next->stream_index];
3477 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3479 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3480 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3481 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3483 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3484 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3487 comp= (ts>ts2) - (ts<ts2);
3491 return pkt->stream_index < next->stream_index;
3495 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3496 AVPacket *pkt, int flush)
3499 int stream_count=0, noninterleaved_count=0;
3500 int64_t delta_dts_max = 0;
3504 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3509 for(i=0; i < s->nb_streams; i++) {
3510 if (s->streams[i]->last_in_packet_buffer) {
3512 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3513 ++noninterleaved_count;
3517 if (s->nb_streams == stream_count) {
3520 for(i=0; i < s->nb_streams; i++) {
3521 if (s->streams[i]->last_in_packet_buffer) {
3523 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3524 s->streams[i]->time_base,
3526 av_rescale_q(s->packet_buffer->pkt.dts,
3527 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3529 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3532 if(s->nb_streams == stream_count+noninterleaved_count &&
3533 delta_dts_max > 20*AV_TIME_BASE) {
3534 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3538 if(stream_count && flush){
3539 pktl= s->packet_buffer;
3542 s->packet_buffer= pktl->next;
3543 if(!s->packet_buffer)
3544 s->packet_buffer_end= NULL;
3546 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3547 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3551 av_init_packet(out);
3556 #if FF_API_INTERLEAVE_PACKET
3557 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3558 AVPacket *pkt, int flush)
3560 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3565 * Interleave an AVPacket correctly so it can be muxed.
3566 * @param out the interleaved packet will be output here
3567 * @param in the input packet
3568 * @param flush 1 if no further packets are available as input and all
3569 * remaining packets should be output
3570 * @return 1 if a packet was output, 0 if no packet could be output,
3571 * < 0 if an error occurred
3573 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3574 if (s->oformat->interleave_packet) {
3575 int ret = s->oformat->interleave_packet(s, out, in, flush);
3580 return ff_interleave_packet_per_dts(s, out, in, flush);
3583 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3587 AVStream *st= s->streams[ pkt->stream_index];
3589 //FIXME/XXX/HACK drop zero sized packets
3590 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3593 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3594 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3595 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3598 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3599 return AVERROR(EINVAL);
3601 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3607 int ret= interleave_packet(s, &opkt, pkt, flush);
3608 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3611 ret= s->oformat->write_packet(s, &opkt);
3613 s->streams[opkt.stream_index]->nb_frames++;
3615 av_free_packet(&opkt);
3620 if(s->pb && s->pb->error)
3621 return s->pb->error;
3625 int av_write_trailer(AVFormatContext *s)
3631 ret= interleave_packet(s, &pkt, NULL, 1);
3632 if(ret<0) //FIXME cleanup needed for ret<0 ?
3637 ret= s->oformat->write_packet(s, &pkt);
3639 s->streams[pkt.stream_index]->nb_frames++;
3641 av_free_packet(&pkt);
3645 if(s->pb && s->pb->error)
3649 if(s->oformat->write_trailer)
3650 ret = s->oformat->write_trailer(s);
3655 ret = s->pb ? s->pb->error : 0;
3656 for(i=0;i<s->nb_streams;i++) {
3657 av_freep(&s->streams[i]->priv_data);
3658 av_freep(&s->streams[i]->index_entries);
3660 if (s->oformat->priv_class)
3661 av_opt_free(s->priv_data);
3662 av_freep(&s->priv_data);
3666 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3667 int64_t *dts, int64_t *wall)
3669 if (!s->oformat || !s->oformat->get_output_timestamp)
3670 return AVERROR(ENOSYS);
3671 s->oformat->get_output_timestamp(s, stream, dts, wall);
3675 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3678 AVProgram *program=NULL;
3681 if (idx >= ac->nb_streams) {
3682 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3686 for(i=0; i<ac->nb_programs; i++){
3687 if(ac->programs[i]->id != progid)
3689 program = ac->programs[i];
3690 for(j=0; j<program->nb_stream_indexes; j++)
3691 if(program->stream_index[j] == idx)
3694 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3697 program->stream_index = tmp;
3698 program->stream_index[program->nb_stream_indexes++] = idx;
3703 static void print_fps(double d, const char *postfix){
3704 uint64_t v= lrintf(d*100);
3705 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3706 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3707 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3710 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3712 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3713 AVDictionaryEntry *tag=NULL;
3715 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3716 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3717 if(strcmp("language", tag->key)){
3718 const char *p = tag->value;
3719 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3722 size_t len = strcspn(p, "\xd\xa");
3723 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3724 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3726 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3727 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3730 av_log(ctx, AV_LOG_INFO, "\n");
3736 /* "user interface" functions */
3737 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3740 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3741 AVStream *st = ic->streams[i];
3742 int g = av_gcd(st->time_base.num, st->time_base.den);
3743 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3744 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3745 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3746 /* the pid is an important information, so we display it */
3747 /* XXX: add a generic system */
3748 if (flags & AVFMT_SHOW_IDS)
3749 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3751 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3752 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3753 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3754 if (st->sample_aspect_ratio.num && // default
3755 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3756 AVRational display_aspect_ratio;
3757 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3758 st->codec->width*st->sample_aspect_ratio.num,
3759 st->codec->height*st->sample_aspect_ratio.den,
3761 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3762 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3763 display_aspect_ratio.num, display_aspect_ratio.den);
3765 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3766 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3767 print_fps(av_q2d(st->avg_frame_rate), "fps");
3768 if(st->r_frame_rate.den && st->r_frame_rate.num)
3769 print_fps(av_q2d(st->r_frame_rate), "tbr");
3770 if(st->time_base.den && st->time_base.num)
3771 print_fps(1/av_q2d(st->time_base), "tbn");
3772 if(st->codec->time_base.den && st->codec->time_base.num)
3773 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3775 if (st->disposition & AV_DISPOSITION_DEFAULT)
3776 av_log(NULL, AV_LOG_INFO, " (default)");
3777 if (st->disposition & AV_DISPOSITION_DUB)
3778 av_log(NULL, AV_LOG_INFO, " (dub)");
3779 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3780 av_log(NULL, AV_LOG_INFO, " (original)");
3781 if (st->disposition & AV_DISPOSITION_COMMENT)
3782 av_log(NULL, AV_LOG_INFO, " (comment)");
3783 if (st->disposition & AV_DISPOSITION_LYRICS)
3784 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3785 if (st->disposition & AV_DISPOSITION_KARAOKE)
3786 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3787 if (st->disposition & AV_DISPOSITION_FORCED)
3788 av_log(NULL, AV_LOG_INFO, " (forced)");
3789 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3790 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3791 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3792 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3793 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3794 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3795 av_log(NULL, AV_LOG_INFO, "\n");
3796 dump_metadata(NULL, st->metadata, " ");
3799 void av_dump_format(AVFormatContext *ic,
3805 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3806 if (ic->nb_streams && !printed)
3809 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3810 is_output ? "Output" : "Input",
3812 is_output ? ic->oformat->name : ic->iformat->name,
3813 is_output ? "to" : "from", url);
3814 dump_metadata(NULL, ic->metadata, " ");
3816 av_log(NULL, AV_LOG_INFO, " Duration: ");
3817 if (ic->duration != AV_NOPTS_VALUE) {
3818 int hours, mins, secs, us;
3819 secs = ic->duration / AV_TIME_BASE;
3820 us = ic->duration % AV_TIME_BASE;
3825 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3826 (100 * us) / AV_TIME_BASE);
3828 av_log(NULL, AV_LOG_INFO, "N/A");
3830 if (ic->start_time != AV_NOPTS_VALUE) {
3832 av_log(NULL, AV_LOG_INFO, ", start: ");
3833 secs = ic->start_time / AV_TIME_BASE;
3834 us = abs(ic->start_time % AV_TIME_BASE);
3835 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3836 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3838 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3840 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3842 av_log(NULL, AV_LOG_INFO, "N/A");
3844 av_log(NULL, AV_LOG_INFO, "\n");
3846 for (i = 0; i < ic->nb_chapters; i++) {
3847 AVChapter *ch = ic->chapters[i];
3848 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3849 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3850 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3852 dump_metadata(NULL, ch->metadata, " ");
3854 if(ic->nb_programs) {
3855 int j, k, total = 0;
3856 for(j=0; j<ic->nb_programs; j++) {
3857 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3859 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3860 name ? name->value : "");
3861 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3862 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3863 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3864 printed[ic->programs[j]->stream_index[k]] = 1;
3866 total += ic->programs[j]->nb_stream_indexes;
3868 if (total < ic->nb_streams)
3869 av_log(NULL, AV_LOG_INFO, " No Program\n");
3871 for(i=0;i<ic->nb_streams;i++)
3873 dump_stream_format(ic, i, index, is_output);
3878 int64_t av_gettime(void)
3881 gettimeofday(&tv,NULL);
3882 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3885 uint64_t ff_ntp_time(void)
3887 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3890 int av_get_frame_filename(char *buf, int buf_size,
3891 const char *path, int number)
3894 char *q, buf1[20], c;
3895 int nd, len, percentd_found;
3907 while (isdigit(*p)) {
3908 nd = nd * 10 + *p++ - '0';
3911 } while (isdigit(c));
3920 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3922 if ((q - buf + len) > buf_size - 1)
3924 memcpy(q, buf1, len);
3932 if ((q - buf) < buf_size - 1)
3936 if (!percentd_found)
3945 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3949 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3951 for(i=0;i<size;i+=16) {
3958 PRINT(" %02x", buf[i+j]);
3963 for(j=0;j<len;j++) {
3965 if (c < ' ' || c > '~')
3974 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3976 hex_dump_internal(NULL, f, 0, buf, size);
3979 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3981 hex_dump_internal(avcl, NULL, level, buf, size);
3984 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3987 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3988 PRINT("stream #%d:\n", pkt->stream_index);
3989 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3990 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3991 /* DTS is _always_ valid after av_read_frame() */
3993 if (pkt->dts == AV_NOPTS_VALUE)
3996 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3997 /* PTS may not be known if B-frames are present. */
3999 if (pkt->pts == AV_NOPTS_VALUE)
4002 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
4004 PRINT(" size=%d\n", pkt->size);
4007 av_hex_dump(f, pkt->data, pkt->size);
4011 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
4013 AVRational tb = { 1, AV_TIME_BASE };
4014 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
4018 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
4020 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
4024 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
4026 AVRational tb = { 1, AV_TIME_BASE };
4027 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4031 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4034 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4037 void av_url_split(char *proto, int proto_size,
4038 char *authorization, int authorization_size,
4039 char *hostname, int hostname_size,
4041 char *path, int path_size,
4044 const char *p, *ls, *at, *col, *brk;
4046 if (port_ptr) *port_ptr = -1;
4047 if (proto_size > 0) proto[0] = 0;
4048 if (authorization_size > 0) authorization[0] = 0;
4049 if (hostname_size > 0) hostname[0] = 0;
4050 if (path_size > 0) path[0] = 0;
4052 /* parse protocol */
4053 if ((p = strchr(url, ':'))) {
4054 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4059 /* no protocol means plain filename */
4060 av_strlcpy(path, url, path_size);
4064 /* separate path from hostname */
4065 ls = strchr(p, '/');
4067 ls = strchr(p, '?');
4069 av_strlcpy(path, ls, path_size);
4071 ls = &p[strlen(p)]; // XXX
4073 /* the rest is hostname, use that to parse auth/port */
4075 /* authorization (user[:pass]@hostname) */
4076 if ((at = strchr(p, '@')) && at < ls) {
4077 av_strlcpy(authorization, p,
4078 FFMIN(authorization_size, at + 1 - p));
4079 p = at + 1; /* skip '@' */
4082 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4084 av_strlcpy(hostname, p + 1,
4085 FFMIN(hostname_size, brk - p));
4086 if (brk[1] == ':' && port_ptr)
4087 *port_ptr = atoi(brk + 2);
4088 } else if ((col = strchr(p, ':')) && col < ls) {
4089 av_strlcpy(hostname, p,
4090 FFMIN(col + 1 - p, hostname_size));
4091 if (port_ptr) *port_ptr = atoi(col + 1);
4093 av_strlcpy(hostname, p,
4094 FFMIN(ls + 1 - p, hostname_size));
4098 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4101 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4104 'C', 'D', 'E', 'F' };
4105 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4108 'c', 'd', 'e', 'f' };
4109 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4111 for(i = 0; i < s; i++) {
4112 buff[i * 2] = hex_table[src[i] >> 4];
4113 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4119 int ff_hex_to_data(uint8_t *data, const char *p)
4126 p += strspn(p, SPACE_CHARS);
4129 c = toupper((unsigned char) *p++);
4130 if (c >= '0' && c <= '9')
4132 else if (c >= 'A' && c <= 'F')
4147 #if FF_API_SET_PTS_INFO
4148 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4149 unsigned int pts_num, unsigned int pts_den)
4151 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4155 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4156 unsigned int pts_num, unsigned int pts_den)
4159 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4160 if(new_tb.num != pts_num)
4161 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4163 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4165 if(new_tb.num <= 0 || new_tb.den <= 0) {
4166 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4169 s->time_base = new_tb;
4170 s->pts_wrap_bits = pts_wrap_bits;
4173 int ff_url_join(char *str, int size, const char *proto,
4174 const char *authorization, const char *hostname,
4175 int port, const char *fmt, ...)
4178 struct addrinfo hints = { 0 }, *ai;
4183 av_strlcatf(str, size, "%s://", proto);
4184 if (authorization && authorization[0])
4185 av_strlcatf(str, size, "%s@", authorization);
4186 #if CONFIG_NETWORK && defined(AF_INET6)
4187 /* Determine if hostname is a numerical IPv6 address,
4188 * properly escape it within [] in that case. */
4189 hints.ai_flags = AI_NUMERICHOST;
4190 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4191 if (ai->ai_family == AF_INET6) {
4192 av_strlcat(str, "[", size);
4193 av_strlcat(str, hostname, size);
4194 av_strlcat(str, "]", size);
4196 av_strlcat(str, hostname, size);
4201 /* Not an IPv6 address, just output the plain string. */
4202 av_strlcat(str, hostname, size);
4205 av_strlcatf(str, size, ":%d", port);
4208 int len = strlen(str);
4211 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4217 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4218 AVFormatContext *src)
4223 local_pkt.stream_index = dst_stream;
4224 if (pkt->pts != AV_NOPTS_VALUE)
4225 local_pkt.pts = av_rescale_q(pkt->pts,
4226 src->streams[pkt->stream_index]->time_base,
4227 dst->streams[dst_stream]->time_base);
4228 if (pkt->dts != AV_NOPTS_VALUE)
4229 local_pkt.dts = av_rescale_q(pkt->dts,
4230 src->streams[pkt->stream_index]->time_base,
4231 dst->streams[dst_stream]->time_base);
4232 return av_write_frame(dst, &local_pkt);
4235 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4238 const char *ptr = str;
4240 /* Parse key=value pairs. */
4243 char *dest = NULL, *dest_end;
4244 int key_len, dest_len = 0;
4246 /* Skip whitespace and potential commas. */
4247 while (*ptr && (isspace(*ptr) || *ptr == ','))
4254 if (!(ptr = strchr(key, '=')))
4257 key_len = ptr - key;
4259 callback_get_buf(context, key, key_len, &dest, &dest_len);
4260 dest_end = dest + dest_len - 1;
4264 while (*ptr && *ptr != '\"') {
4268 if (dest && dest < dest_end)
4272 if (dest && dest < dest_end)
4280 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4281 if (dest && dest < dest_end)
4289 int ff_find_stream_index(AVFormatContext *s, int id)
4292 for (i = 0; i < s->nb_streams; i++) {
4293 if (s->streams[i]->id == id)
4299 void ff_make_absolute_url(char *buf, int size, const char *base,
4303 /* Absolute path, relative to the current server */
4304 if (base && strstr(base, "://") && rel[0] == '/') {
4306 av_strlcpy(buf, base, size);
4307 sep = strstr(buf, "://");
4310 sep = strchr(sep, '/');
4314 av_strlcat(buf, rel, size);
4317 /* If rel actually is an absolute url, just copy it */
4318 if (!base || strstr(rel, "://") || rel[0] == '/') {
4319 av_strlcpy(buf, rel, size);
4323 av_strlcpy(buf, base, size);
4324 /* Remove the file name from the base url */
4325 sep = strrchr(buf, '/');
4330 while (av_strstart(rel, "../", NULL) && sep) {
4331 /* Remove the path delimiter at the end */
4333 sep = strrchr(buf, '/');
4334 /* If the next directory name to pop off is "..", break here */
4335 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4336 /* Readd the slash we just removed */
4337 av_strlcat(buf, "/", size);
4340 /* Cut off the directory name */
4347 av_strlcat(buf, rel, size);
4350 int64_t ff_iso8601_to_unix_time(const char *datestr)
4353 struct tm time1 = {0}, time2 = {0};
4355 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4356 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4358 return av_timegm(&time2);
4360 return av_timegm(&time1);
4362 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4363 "the date string.\n");
4368 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4371 if (ofmt->query_codec)
4372 return ofmt->query_codec(codec_id, std_compliance);
4373 else if (ofmt->codec_tag)
4374 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4375 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4376 codec_id == ofmt->subtitle_codec)
4379 return AVERROR_PATCHWELCOME;
4382 int avformat_network_init(void)
4386 ff_network_inited_globally = 1;
4387 if ((ret = ff_network_init()) < 0)
4394 int avformat_network_deinit(void)
4403 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4404 uint64_t channel_layout, int32_t sample_rate,
4405 int32_t width, int32_t height)
4411 return AVERROR(EINVAL);
4414 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4416 if (channel_layout) {
4418 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4422 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4424 if (width || height) {
4426 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4428 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4430 return AVERROR(ENOMEM);
4431 bytestream_put_le32(&data, flags);
4433 bytestream_put_le32(&data, channels);
4435 bytestream_put_le64(&data, channel_layout);
4437 bytestream_put_le32(&data, sample_rate);
4438 if (width || height) {
4439 bytestream_put_le32(&data, width);
4440 bytestream_put_le32(&data, height);
4445 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4447 return ff_codec_bmp_tags;
4449 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4451 return ff_codec_wav_tags;
4454 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4456 AVRational undef = {0, 1};
4457 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4458 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4459 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4461 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4462 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4463 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4464 stream_sample_aspect_ratio = undef;
4466 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4467 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4468 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4469 frame_sample_aspect_ratio = undef;
4471 if (stream_sample_aspect_ratio.num)
4472 return stream_sample_aspect_ratio;
4474 return frame_sample_aspect_ratio;