2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
41 #include "audiointerleave.h"
55 * various utility functions for use within FFmpeg
58 unsigned avformat_version(void)
60 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
61 return LIBAVFORMAT_VERSION_INT;
64 const char *avformat_configuration(void)
66 return FFMPEG_CONFIGURATION;
69 const char *avformat_license(void)
71 #define LICENSE_PREFIX "libavformat license: "
72 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
75 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
77 static int is_relative(int64_t ts) {
78 return ts > (RELATIVE_TS_BASE - (1LL<<48));
81 /* fraction handling */
84 * f = val + (num / den) + 0.5.
86 * 'num' is normalized so that it is such as 0 <= num < den.
88 * @param f fractional number
89 * @param val integer value
90 * @param num must be >= 0
91 * @param den must be >= 1
93 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
106 * Fractional addition to f: f = f + (incr / f->den).
108 * @param f fractional number
109 * @param incr increment, can be positive or negative
111 static void frac_add(AVFrac *f, int64_t incr)
124 } else if (num >= den) {
131 /** head of registered input format linked list */
132 static AVInputFormat *first_iformat = NULL;
133 /** head of registered output format linked list */
134 static AVOutputFormat *first_oformat = NULL;
136 AVInputFormat *av_iformat_next(AVInputFormat *f)
138 if(f) return f->next;
139 else return first_iformat;
142 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
144 if(f) return f->next;
145 else return first_oformat;
148 void av_register_input_format(AVInputFormat *format)
152 while (*p != NULL) p = &(*p)->next;
157 void av_register_output_format(AVOutputFormat *format)
161 while (*p != NULL) p = &(*p)->next;
166 int av_match_ext(const char *filename, const char *extensions)
174 ext = strrchr(filename, '.');
180 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
183 if (!av_strcasecmp(ext1, ext))
193 static int match_format(const char *name, const char *names)
201 namelen = strlen(name);
202 while ((p = strchr(names, ','))) {
203 len = FFMAX(p - names, namelen);
204 if (!av_strncasecmp(name, names, len))
208 return !av_strcasecmp(name, names);
211 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = NULL, *fmt_found;
215 int score_max, score;
217 /* specific test for image sequences */
218 #if CONFIG_IMAGE2_MUXER
219 if (!short_name && filename &&
220 av_filename_number_test(filename) &&
221 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
222 return av_guess_format("image2", NULL, NULL);
225 /* Find the proper file type. */
228 while ((fmt = av_oformat_next(fmt))) {
230 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
232 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
234 if (filename && fmt->extensions &&
235 av_match_ext(filename, fmt->extensions)) {
238 if (score > score_max) {
246 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
247 const char *filename, const char *mime_type, enum AVMediaType type){
248 if(type == AVMEDIA_TYPE_VIDEO){
249 enum CodecID codec_id= CODEC_ID_NONE;
251 #if CONFIG_IMAGE2_MUXER
252 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
253 codec_id= ff_guess_image2_codec(filename);
256 if(codec_id == CODEC_ID_NONE)
257 codec_id= fmt->video_codec;
259 }else if(type == AVMEDIA_TYPE_AUDIO)
260 return fmt->audio_codec;
261 else if (type == AVMEDIA_TYPE_SUBTITLE)
262 return fmt->subtitle_codec;
264 return CODEC_ID_NONE;
267 AVInputFormat *av_find_input_format(const char *short_name)
269 AVInputFormat *fmt = NULL;
270 while ((fmt = av_iformat_next(fmt))) {
271 if (match_format(short_name, fmt->name))
277 int ffio_limit(AVIOContext *s, int size)
280 int64_t remaining= s->maxsize - avio_tell(s);
281 if(remaining < size){
282 int64_t newsize= avio_size(s);
283 if(!s->maxsize || s->maxsize<newsize)
284 s->maxsize= newsize - !newsize;
285 remaining= s->maxsize - avio_tell(s);
286 remaining= FFMAX(remaining, 0);
289 if(s->maxsize>=0 && remaining+1 < size){
290 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
297 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
300 size= ffio_limit(s, size);
302 ret= av_new_packet(pkt, size);
307 pkt->pos= avio_tell(s);
309 ret= avio_read(s, pkt->data, size);
313 av_shrink_packet(pkt, ret);
318 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
323 return av_get_packet(s, pkt, size);
324 old_size = pkt->size;
325 ret = av_grow_packet(pkt, size);
328 ret = avio_read(s, pkt->data + old_size, size);
329 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
334 int av_filename_number_test(const char *filename)
337 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
340 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
342 AVProbeData lpd = *pd;
343 AVInputFormat *fmt1 = NULL, *fmt;
344 int score, nodat = 0, score_max=0;
346 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
347 int id3len = ff_id3v2_tag_len(lpd.buf);
348 if (lpd.buf_size > id3len + 16) {
350 lpd.buf_size -= id3len;
356 while ((fmt1 = av_iformat_next(fmt1))) {
357 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
360 if (fmt1->read_probe) {
361 score = fmt1->read_probe(&lpd);
362 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
363 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
364 } else if (fmt1->extensions) {
365 if (av_match_ext(lpd.filename, fmt1->extensions)) {
369 if (score > score_max) {
372 }else if (score == score_max)
375 *score_ret= score_max;
380 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
383 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
384 if(score_ret > *score_max){
385 *score_max= score_ret;
391 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
393 return av_probe_input_format2(pd, is_opened, &score);
396 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
398 static const struct {
399 const char *name; enum CodecID id; enum AVMediaType type;
401 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
402 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
403 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
404 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
405 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
406 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
407 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
408 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
409 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
413 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
417 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
418 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
419 for (i = 0; fmt_id_type[i].name; i++) {
420 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
421 st->codec->codec_id = fmt_id_type[i].id;
422 st->codec->codec_type = fmt_id_type[i].type;
430 /************************************************************/
431 /* input media file */
433 int av_demuxer_open(AVFormatContext *ic){
436 if (ic->iformat->read_header) {
437 err = ic->iformat->read_header(ic);
442 if (ic->pb && !ic->data_offset)
443 ic->data_offset = avio_tell(ic->pb);
449 /** size of probe buffer, for guessing file type from file contents */
450 #define PROBE_BUF_MIN 2048
451 #define PROBE_BUF_MAX (1<<20)
453 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
454 const char *filename, void *logctx,
455 unsigned int offset, unsigned int max_probe_size)
457 AVProbeData pd = { filename ? filename : "", NULL, -offset };
458 unsigned char *buf = NULL;
459 int ret = 0, probe_size;
461 if (!max_probe_size) {
462 max_probe_size = PROBE_BUF_MAX;
463 } else if (max_probe_size > PROBE_BUF_MAX) {
464 max_probe_size = PROBE_BUF_MAX;
465 } else if (max_probe_size < PROBE_BUF_MIN) {
466 return AVERROR(EINVAL);
469 if (offset >= max_probe_size) {
470 return AVERROR(EINVAL);
473 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
474 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
475 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
476 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
479 if (probe_size < offset) {
483 /* read probe data */
484 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
487 return AVERROR(ENOMEM);
490 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
491 /* fail if error was not end of file, otherwise, lower score */
492 if (ret != AVERROR_EOF) {
497 ret = 0; /* error was end of file, nothing read */
500 pd.buf = &buf[offset];
502 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
504 /* guess file format */
505 *fmt = av_probe_input_format2(&pd, 1, &score);
507 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
508 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
510 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
516 return AVERROR_INVALIDDATA;
519 /* rewind. reuse probe buffer to avoid seeking */
520 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
526 /* open input file and probe the format if necessary */
527 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
530 AVProbeData pd = {filename, NULL, 0};
533 s->flags |= AVFMT_FLAG_CUSTOM_IO;
535 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
536 else if (s->iformat->flags & AVFMT_NOFILE)
537 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
538 "will be ignored with AVFMT_NOFILE format.\n");
542 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
543 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
546 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
547 &s->interrupt_callback, options)) < 0)
551 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
554 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
555 AVPacketList **plast_pktl){
556 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
561 (*plast_pktl)->next = pktl;
563 *packet_buffer = pktl;
565 /* add the packet in the buffered packet list */
571 static void queue_attached_pictures(AVFormatContext *s)
574 for (i = 0; i < s->nb_streams; i++)
575 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
576 s->streams[i]->discard < AVDISCARD_ALL) {
577 AVPacket copy = s->streams[i]->attached_pic;
578 copy.destruct = NULL;
579 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
583 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
585 AVFormatContext *s = *ps;
587 AVDictionary *tmp = NULL;
588 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
590 if (!s && !(s = avformat_alloc_context()))
591 return AVERROR(ENOMEM);
596 av_dict_copy(&tmp, *options, 0);
598 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
601 if ((ret = init_input(s, filename, &tmp)) < 0)
604 /* check filename in case an image number is expected */
605 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
606 if (!av_filename_number_test(filename)) {
607 ret = AVERROR(EINVAL);
612 s->duration = s->start_time = AV_NOPTS_VALUE;
613 av_strlcpy(s->filename, filename, sizeof(s->filename));
615 /* allocate private data */
616 if (s->iformat->priv_data_size > 0) {
617 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
618 ret = AVERROR(ENOMEM);
621 if (s->iformat->priv_class) {
622 *(const AVClass**)s->priv_data = s->iformat->priv_class;
623 av_opt_set_defaults(s->priv_data);
624 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
629 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
631 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
633 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
634 if ((ret = s->iformat->read_header(s)) < 0)
637 if (id3v2_extra_meta &&
638 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
640 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
642 queue_attached_pictures(s);
644 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
645 s->data_offset = avio_tell(s->pb);
647 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
650 av_dict_free(options);
657 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
659 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
661 avformat_free_context(s);
666 /*******************************************************/
668 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
674 AVPacketList *pktl = s->raw_packet_buffer;
678 if(s->streams[pkt->stream_index]->request_probe <= 0){
679 s->raw_packet_buffer = pktl->next;
680 s->raw_packet_buffer_remaining_size += pkt->size;
687 ret= s->iformat->read_packet(s, pkt);
689 if (!pktl || ret == AVERROR(EAGAIN))
691 for (i = 0; i < s->nb_streams; i++)
692 if(s->streams[i]->request_probe > 0)
693 s->streams[i]->request_probe = -1;
697 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
698 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
699 av_log(s, AV_LOG_WARNING,
700 "Dropped corrupted packet (stream = %d)\n",
706 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
707 av_packet_merge_side_data(pkt);
709 if(pkt->stream_index >= (unsigned)s->nb_streams){
710 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
714 st= s->streams[pkt->stream_index];
716 switch(st->codec->codec_type){
717 case AVMEDIA_TYPE_VIDEO:
718 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
720 case AVMEDIA_TYPE_AUDIO:
721 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
723 case AVMEDIA_TYPE_SUBTITLE:
724 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
728 if(!pktl && st->request_probe <= 0)
731 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
732 s->raw_packet_buffer_remaining_size -= pkt->size;
734 if(st->request_probe>0){
735 AVProbeData *pd = &st->probe_data;
737 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
740 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
741 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
742 pd->buf_size += pkt->size;
743 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
745 end= s->raw_packet_buffer_remaining_size <= 0
746 || st->probe_packets<=0;
748 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
749 int score= set_codec_from_probe_data(s, st, pd);
750 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
754 st->request_probe= -1;
755 if(st->codec->codec_id != CODEC_ID_NONE){
756 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
758 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
765 #if FF_API_READ_PACKET
766 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
768 return ff_read_packet(s, pkt);
773 /**********************************************************/
775 static int determinable_frame_size(AVCodecContext *avctx)
777 if (/*avctx->codec_id == CODEC_ID_AAC ||
778 avctx->codec_id == CODEC_ID_MP1 ||
779 avctx->codec_id == CODEC_ID_MP2 ||*/
780 avctx->codec_id == CODEC_ID_MP3/* ||
781 avctx->codec_id == CODEC_ID_CELT*/)
787 * Get the number of samples of an audio frame. Return -1 on error.
789 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
793 /* give frame_size priority if demuxing */
794 if (!mux && enc->frame_size > 1)
795 return enc->frame_size;
797 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
800 /* fallback to using frame_size if muxing */
801 if (enc->frame_size > 1)
802 return enc->frame_size;
809 * Return the frame duration in seconds. Return 0 if not available.
811 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
812 AVCodecParserContext *pc, AVPacket *pkt)
818 switch(st->codec->codec_type) {
819 case AVMEDIA_TYPE_VIDEO:
820 if (st->r_frame_rate.num && !pc) {
821 *pnum = st->r_frame_rate.den;
822 *pden = st->r_frame_rate.num;
823 } else if(st->time_base.num*1000LL > st->time_base.den) {
824 *pnum = st->time_base.num;
825 *pden = st->time_base.den;
826 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
827 *pnum = st->codec->time_base.num;
828 *pden = st->codec->time_base.den;
829 if (pc && pc->repeat_pict) {
830 *pnum = (*pnum) * (1 + pc->repeat_pict);
832 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
833 //Thus if we have no parser in such case leave duration undefined.
834 if(st->codec->ticks_per_frame>1 && !pc){
839 case AVMEDIA_TYPE_AUDIO:
840 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
841 if (frame_size <= 0 || st->codec->sample_rate <= 0)
844 *pden = st->codec->sample_rate;
851 static int is_intra_only(AVCodecContext *enc){
852 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
854 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
855 switch(enc->codec_id){
857 case CODEC_ID_MJPEGB:
859 case CODEC_ID_PRORES:
860 case CODEC_ID_RAWVIDEO:
862 case CODEC_ID_DVVIDEO:
863 case CODEC_ID_HUFFYUV:
864 case CODEC_ID_FFVHUFF:
869 case CODEC_ID_JPEG2000:
870 case CODEC_ID_UTVIDEO:
878 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
882 if (pktl == s->parse_queue_end)
883 return s->packet_buffer;
887 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
888 int64_t dts, int64_t pts)
890 AVStream *st= s->streams[stream_index];
891 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
893 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
896 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
899 if (is_relative(pts))
900 pts += st->first_dts - RELATIVE_TS_BASE;
902 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
903 if(pktl->pkt.stream_index != stream_index)
905 if(is_relative(pktl->pkt.pts))
906 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
908 if(is_relative(pktl->pkt.dts))
909 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
911 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
912 st->start_time= pktl->pkt.pts;
914 if (st->start_time == AV_NOPTS_VALUE)
915 st->start_time = pts;
918 static void update_initial_durations(AVFormatContext *s, AVStream *st,
919 int stream_index, int duration)
921 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
922 int64_t cur_dts= RELATIVE_TS_BASE;
924 if(st->first_dts != AV_NOPTS_VALUE){
925 cur_dts= st->first_dts;
926 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
927 if(pktl->pkt.stream_index == stream_index){
928 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
933 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
934 st->first_dts = cur_dts;
935 }else if(st->cur_dts != RELATIVE_TS_BASE)
938 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
939 if(pktl->pkt.stream_index != stream_index)
941 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
942 && !pktl->pkt.duration){
943 pktl->pkt.dts= cur_dts;
944 if(!st->codec->has_b_frames)
945 pktl->pkt.pts= cur_dts;
946 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
947 pktl->pkt.duration = duration;
950 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
953 st->cur_dts= cur_dts;
956 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
957 AVCodecParserContext *pc, AVPacket *pkt)
959 int num, den, presentation_delayed, delay, i;
962 if (s->flags & AVFMT_FLAG_NOFILLIN)
965 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
966 pkt->dts= AV_NOPTS_VALUE;
968 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
969 //FIXME Set low_delay = 0 when has_b_frames = 1
970 st->codec->has_b_frames = 1;
972 /* do we have a video B-frame ? */
973 delay= st->codec->has_b_frames;
974 presentation_delayed = 0;
976 /* XXX: need has_b_frame, but cannot get it if the codec is
979 pc && pc->pict_type != AV_PICTURE_TYPE_B)
980 presentation_delayed = 1;
982 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
983 pkt->dts -= 1LL<<st->pts_wrap_bits;
986 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
987 // we take the conservative approach and discard both
988 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
989 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
990 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
991 pkt->dts= AV_NOPTS_VALUE;
994 if (pkt->duration == 0) {
995 compute_frame_duration(&num, &den, st, pc, pkt);
997 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1000 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1001 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1003 /* correct timestamps with byte offset if demuxers only have timestamps
1004 on packet boundaries */
1005 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1006 /* this will estimate bitrate based on this frame's duration and size */
1007 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1008 if(pkt->pts != AV_NOPTS_VALUE)
1010 if(pkt->dts != AV_NOPTS_VALUE)
1014 if (pc && pc->dts_sync_point >= 0) {
1015 // we have synchronization info from the parser
1016 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1018 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1019 if (pkt->dts != AV_NOPTS_VALUE) {
1020 // got DTS from the stream, update reference timestamp
1021 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1022 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1023 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1024 // compute DTS based on reference timestamp
1025 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1026 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1028 if (pc->dts_sync_point > 0)
1029 st->reference_dts = pkt->dts; // new reference
1033 /* This may be redundant, but it should not hurt. */
1034 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1035 presentation_delayed = 1;
1037 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p duration:%d\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc, pkt->duration);
1038 /* interpolate PTS and DTS if they are not present */
1039 //We skip H264 currently because delay and has_b_frames are not reliably set
1040 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1041 if (presentation_delayed) {
1042 /* DTS = decompression timestamp */
1043 /* PTS = presentation timestamp */
1044 if (pkt->dts == AV_NOPTS_VALUE)
1045 pkt->dts = st->last_IP_pts;
1046 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1047 if (pkt->dts == AV_NOPTS_VALUE)
1048 pkt->dts = st->cur_dts;
1050 /* this is tricky: the dts must be incremented by the duration
1051 of the frame we are displaying, i.e. the last I- or P-frame */
1052 if (st->last_IP_duration == 0)
1053 st->last_IP_duration = pkt->duration;
1054 if(pkt->dts != AV_NOPTS_VALUE)
1055 st->cur_dts = pkt->dts + st->last_IP_duration;
1056 st->last_IP_duration = pkt->duration;
1057 st->last_IP_pts= pkt->pts;
1058 /* cannot compute PTS if not present (we can compute it only
1059 by knowing the future */
1060 } else if (pkt->pts != AV_NOPTS_VALUE ||
1061 pkt->dts != AV_NOPTS_VALUE ||
1063 int duration = pkt->duration;
1065 if(pkt->pts != AV_NOPTS_VALUE && duration){
1066 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1067 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1068 if(old_diff < new_diff && old_diff < (duration>>3)){
1069 pkt->pts += duration;
1070 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1074 /* presentation is not delayed : PTS and DTS are the same */
1075 if(pkt->pts == AV_NOPTS_VALUE)
1076 pkt->pts = pkt->dts;
1077 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1078 if(pkt->pts == AV_NOPTS_VALUE)
1079 pkt->pts = st->cur_dts;
1080 pkt->dts = pkt->pts;
1081 if(pkt->pts != AV_NOPTS_VALUE)
1082 st->cur_dts = pkt->pts + duration;
1086 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1087 st->pts_buffer[0]= pkt->pts;
1088 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1089 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1090 if(pkt->dts == AV_NOPTS_VALUE)
1091 pkt->dts= st->pts_buffer[0];
1092 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1093 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1095 if(pkt->dts > st->cur_dts)
1096 st->cur_dts = pkt->dts;
1099 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1102 if(is_intra_only(st->codec))
1103 pkt->flags |= AV_PKT_FLAG_KEY;
1105 pkt->convergence_duration = pc->convergence_duration;
1108 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1111 AVPacketList *pktl = *pkt_buf;
1112 *pkt_buf = pktl->next;
1113 av_free_packet(&pktl->pkt);
1116 *pkt_buf_end = NULL;
1120 * Parse a packet, add all split parts to parse_queue
1122 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1124 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1126 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1127 AVStream *st = s->streams[stream_index];
1128 uint8_t *data = pkt ? pkt->data : NULL;
1129 int size = pkt ? pkt->size : 0;
1130 int ret = 0, got_output = 0;
1133 av_init_packet(&flush_pkt);
1136 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1137 // preserve 0-size sync packets
1138 compute_pkt_fields(s, st, st->parser, pkt);
1141 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1144 av_init_packet(&out_pkt);
1145 len = av_parser_parse2(st->parser, st->codec,
1146 &out_pkt.data, &out_pkt.size, data, size,
1147 pkt->pts, pkt->dts, pkt->pos);
1149 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1150 /* increment read pointer */
1154 got_output = !!out_pkt.size;
1159 /* set the duration */
1160 out_pkt.duration = 0;
1161 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1162 if (st->codec->sample_rate > 0) {
1163 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1164 (AVRational){ 1, st->codec->sample_rate },
1168 } else if (st->codec->time_base.num != 0 &&
1169 st->codec->time_base.den != 0) {
1170 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1171 st->codec->time_base,
1176 out_pkt.stream_index = st->index;
1177 out_pkt.pts = st->parser->pts;
1178 out_pkt.dts = st->parser->dts;
1179 out_pkt.pos = st->parser->pos;
1181 if (st->parser->key_frame == 1 ||
1182 (st->parser->key_frame == -1 &&
1183 st->parser->pict_type == AV_PICTURE_TYPE_I))
1184 out_pkt.flags |= AV_PKT_FLAG_KEY;
1186 compute_pkt_fields(s, st, st->parser, &out_pkt);
1188 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1189 out_pkt.flags & AV_PKT_FLAG_KEY) {
1190 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1191 ff_reduce_index(s, st->index);
1192 av_add_index_entry(st, pos, out_pkt.dts,
1193 0, 0, AVINDEX_KEYFRAME);
1196 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1197 out_pkt.destruct = pkt->destruct;
1198 pkt->destruct = NULL;
1200 if ((ret = av_dup_packet(&out_pkt)) < 0)
1203 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1204 av_free_packet(&out_pkt);
1205 ret = AVERROR(ENOMEM);
1211 /* end of the stream => close and free the parser */
1212 if (pkt == &flush_pkt) {
1213 av_parser_close(st->parser);
1218 av_free_packet(pkt);
1222 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1223 AVPacketList **pkt_buffer_end,
1227 av_assert0(*pkt_buffer);
1230 *pkt_buffer = pktl->next;
1232 *pkt_buffer_end = NULL;
1237 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1239 int ret = 0, i, got_packet = 0;
1241 av_init_packet(pkt);
1243 while (!got_packet && !s->parse_queue) {
1247 /* read next packet */
1248 ret = ff_read_packet(s, &cur_pkt);
1250 if (ret == AVERROR(EAGAIN))
1252 /* flush the parsers */
1253 for(i = 0; i < s->nb_streams; i++) {
1255 if (st->parser && st->need_parsing)
1256 parse_packet(s, NULL, st->index);
1258 /* all remaining packets are now in parse_queue =>
1259 * really terminate parsing */
1263 st = s->streams[cur_pkt.stream_index];
1265 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1266 cur_pkt.dts != AV_NOPTS_VALUE &&
1267 cur_pkt.pts < cur_pkt.dts) {
1268 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1269 cur_pkt.stream_index,
1274 if (s->debug & FF_FDEBUG_TS)
1275 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1276 cur_pkt.stream_index,
1283 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1284 st->parser = av_parser_init(st->codec->codec_id);
1286 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1287 "%s, packets or times may be invalid.\n",
1288 avcodec_get_name(st->codec->codec_id));
1289 /* no parser available: just output the raw packets */
1290 st->need_parsing = AVSTREAM_PARSE_NONE;
1291 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1292 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1293 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1294 st->parser->flags |= PARSER_FLAG_ONCE;
1298 if (!st->need_parsing || !st->parser) {
1299 /* no parsing needed: we just output the packet as is */
1301 compute_pkt_fields(s, st, NULL, pkt);
1302 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1303 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1304 ff_reduce_index(s, st->index);
1305 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1308 } else if (st->discard < AVDISCARD_ALL) {
1309 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1313 av_free_packet(&cur_pkt);
1317 if (!got_packet && s->parse_queue)
1318 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1320 if(s->debug & FF_FDEBUG_TS)
1321 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1332 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1334 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1339 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1340 &s->packet_buffer_end,
1342 read_frame_internal(s, pkt);
1347 AVPacketList *pktl = s->packet_buffer;
1350 AVPacket *next_pkt = &pktl->pkt;
1352 if (next_pkt->dts != AV_NOPTS_VALUE) {
1353 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1354 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1355 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1356 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1357 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1358 next_pkt->pts = pktl->pkt.dts;
1362 pktl = s->packet_buffer;
1365 /* read packet from packet buffer, if there is data */
1366 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1367 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1368 ret = read_from_packet_buffer(&s->packet_buffer,
1369 &s->packet_buffer_end, pkt);
1374 ret = read_frame_internal(s, pkt);
1376 if (pktl && ret != AVERROR(EAGAIN)) {
1383 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1384 &s->packet_buffer_end)) < 0)
1385 return AVERROR(ENOMEM);
1389 if (is_relative(pkt->dts))
1390 pkt->dts -= RELATIVE_TS_BASE;
1391 if (is_relative(pkt->pts))
1392 pkt->pts -= RELATIVE_TS_BASE;
1396 /* XXX: suppress the packet queue */
1397 static void flush_packet_queue(AVFormatContext *s)
1399 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1400 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1401 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1403 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1406 /*******************************************************/
1409 int av_find_default_stream_index(AVFormatContext *s)
1411 int first_audio_index = -1;
1415 if (s->nb_streams <= 0)
1417 for(i = 0; i < s->nb_streams; i++) {
1419 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1420 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1423 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1424 first_audio_index = i;
1426 return first_audio_index >= 0 ? first_audio_index : 0;
1430 * Flush the frame reader.
1432 void ff_read_frame_flush(AVFormatContext *s)
1437 flush_packet_queue(s);
1439 /* for each stream, reset read state */
1440 for(i = 0; i < s->nb_streams; i++) {
1444 av_parser_close(st->parser);
1447 st->last_IP_pts = AV_NOPTS_VALUE;
1448 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1449 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1450 st->reference_dts = AV_NOPTS_VALUE;
1452 st->probe_packets = MAX_PROBE_PACKETS;
1454 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1455 st->pts_buffer[j]= AV_NOPTS_VALUE;
1459 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1463 for(i = 0; i < s->nb_streams; i++) {
1464 AVStream *st = s->streams[i];
1466 st->cur_dts = av_rescale(timestamp,
1467 st->time_base.den * (int64_t)ref_st->time_base.num,
1468 st->time_base.num * (int64_t)ref_st->time_base.den);
1472 void ff_reduce_index(AVFormatContext *s, int stream_index)
1474 AVStream *st= s->streams[stream_index];
1475 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1477 if((unsigned)st->nb_index_entries >= max_entries){
1479 for(i=0; 2*i<st->nb_index_entries; i++)
1480 st->index_entries[i]= st->index_entries[2*i];
1481 st->nb_index_entries= i;
1485 int ff_add_index_entry(AVIndexEntry **index_entries,
1486 int *nb_index_entries,
1487 unsigned int *index_entries_allocated_size,
1488 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1490 AVIndexEntry *entries, *ie;
1493 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1496 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1497 timestamp -= RELATIVE_TS_BASE;
1499 entries = av_fast_realloc(*index_entries,
1500 index_entries_allocated_size,
1501 (*nb_index_entries + 1) *
1502 sizeof(AVIndexEntry));
1506 *index_entries= entries;
1508 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1511 index= (*nb_index_entries)++;
1512 ie= &entries[index];
1513 assert(index==0 || ie[-1].timestamp < timestamp);
1515 ie= &entries[index];
1516 if(ie->timestamp != timestamp){
1517 if(ie->timestamp <= timestamp)
1519 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1520 (*nb_index_entries)++;
1521 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1522 distance= ie->min_distance;
1526 ie->timestamp = timestamp;
1527 ie->min_distance= distance;
1534 int av_add_index_entry(AVStream *st,
1535 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1537 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1538 &st->index_entries_allocated_size, pos,
1539 timestamp, size, distance, flags);
1542 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1543 int64_t wanted_timestamp, int flags)
1551 //optimize appending index entries at the end
1552 if(b && entries[b-1].timestamp < wanted_timestamp)
1557 timestamp = entries[m].timestamp;
1558 if(timestamp >= wanted_timestamp)
1560 if(timestamp <= wanted_timestamp)
1563 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1565 if(!(flags & AVSEEK_FLAG_ANY)){
1566 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1567 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1576 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1579 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1580 wanted_timestamp, flags);
1583 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1585 AVInputFormat *avif= s->iformat;
1586 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1587 int64_t ts_min, ts_max, ts;
1592 if (stream_index < 0)
1595 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1598 ts_min= AV_NOPTS_VALUE;
1599 pos_limit= -1; //gcc falsely says it may be uninitialized
1601 st= s->streams[stream_index];
1602 if(st->index_entries){
1605 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1606 index= FFMAX(index, 0);
1607 e= &st->index_entries[index];
1609 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1611 ts_min= e->timestamp;
1612 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1618 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1619 assert(index < st->nb_index_entries);
1621 e= &st->index_entries[index];
1622 assert(e->timestamp >= target_ts);
1624 ts_max= e->timestamp;
1625 pos_limit= pos_max - e->min_distance;
1626 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1627 pos_max,pos_limit, ts_max);
1631 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1636 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1639 ff_read_frame_flush(s);
1640 ff_update_cur_dts(s, st, ts);
1645 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1646 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1647 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1648 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1651 int64_t start_pos, filesize;
1654 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1656 if(ts_min == AV_NOPTS_VALUE){
1657 pos_min = s->data_offset;
1658 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1659 if (ts_min == AV_NOPTS_VALUE)
1663 if(ts_min >= target_ts){
1668 if(ts_max == AV_NOPTS_VALUE){
1670 filesize = avio_size(s->pb);
1671 pos_max = filesize - 1;
1674 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1676 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1677 if (ts_max == AV_NOPTS_VALUE)
1681 int64_t tmp_pos= pos_max + 1;
1682 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1683 if(tmp_ts == AV_NOPTS_VALUE)
1687 if(tmp_pos >= filesize)
1693 if(ts_max <= target_ts){
1698 if(ts_min > ts_max){
1700 }else if(ts_min == ts_max){
1705 while (pos_min < pos_limit) {
1706 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1707 pos_min, pos_max, ts_min, ts_max);
1708 assert(pos_limit <= pos_max);
1711 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1712 // interpolate position (better than dichotomy)
1713 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1714 + pos_min - approximate_keyframe_distance;
1715 }else if(no_change==1){
1716 // bisection, if interpolation failed to change min or max pos last time
1717 pos = (pos_min + pos_limit)>>1;
1719 /* linear search if bisection failed, can only happen if there
1720 are very few or no keyframes between min/max */
1725 else if(pos > pos_limit)
1729 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1734 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1735 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1736 pos_limit, start_pos, no_change);
1737 if(ts == AV_NOPTS_VALUE){
1738 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1741 assert(ts != AV_NOPTS_VALUE);
1742 if (target_ts <= ts) {
1743 pos_limit = start_pos - 1;
1747 if (target_ts >= ts) {
1753 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1754 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1757 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1759 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1760 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1761 pos, ts_min, target_ts, ts_max);
1767 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1768 int64_t pos_min, pos_max;
1770 pos_min = s->data_offset;
1771 pos_max = avio_size(s->pb) - 1;
1773 if (pos < pos_min) pos= pos_min;
1774 else if(pos > pos_max) pos= pos_max;
1776 avio_seek(s->pb, pos, SEEK_SET);
1781 static int seek_frame_generic(AVFormatContext *s,
1782 int stream_index, int64_t timestamp, int flags)
1789 st = s->streams[stream_index];
1791 index = av_index_search_timestamp(st, timestamp, flags);
1793 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1796 if(index < 0 || index==st->nb_index_entries-1){
1800 if(st->nb_index_entries){
1801 assert(st->index_entries);
1802 ie= &st->index_entries[st->nb_index_entries-1];
1803 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1805 ff_update_cur_dts(s, st, ie->timestamp);
1807 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1813 read_status = av_read_frame(s, &pkt);
1814 } while (read_status == AVERROR(EAGAIN));
1815 if (read_status < 0)
1817 av_free_packet(&pkt);
1818 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1819 if(pkt.flags & AV_PKT_FLAG_KEY)
1821 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1822 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1827 index = av_index_search_timestamp(st, timestamp, flags);
1832 ff_read_frame_flush(s);
1833 AV_NOWARN_DEPRECATED(
1834 if (s->iformat->read_seek){
1835 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1839 ie = &st->index_entries[index];
1840 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1842 ff_update_cur_dts(s, st, ie->timestamp);
1847 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1848 int64_t timestamp, int flags)
1853 if (flags & AVSEEK_FLAG_BYTE) {
1854 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1856 ff_read_frame_flush(s);
1857 return seek_frame_byte(s, stream_index, timestamp, flags);
1860 if(stream_index < 0){
1861 stream_index= av_find_default_stream_index(s);
1862 if(stream_index < 0)
1865 st= s->streams[stream_index];
1866 /* timestamp for default must be expressed in AV_TIME_BASE units */
1867 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1870 /* first, we try the format specific seek */
1871 AV_NOWARN_DEPRECATED(
1872 if (s->iformat->read_seek) {
1873 ff_read_frame_flush(s);
1874 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1882 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1883 ff_read_frame_flush(s);
1884 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1885 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1886 ff_read_frame_flush(s);
1887 return seek_frame_generic(s, stream_index, timestamp, flags);
1893 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1895 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1898 queue_attached_pictures(s);
1903 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1905 if(min_ts > ts || max_ts < ts)
1908 if (s->iformat->read_seek2) {
1910 ff_read_frame_flush(s);
1911 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1914 queue_attached_pictures(s);
1918 if(s->iformat->read_timestamp){
1919 //try to seek via read_timestamp()
1922 //Fallback to old API if new is not implemented but old is
1923 //Note the old has somewat different sematics
1924 AV_NOWARN_DEPRECATED(
1925 if (s->iformat->read_seek || 1) {
1926 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1927 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1928 if (ret<0 && ts != min_ts && max_ts != ts) {
1929 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1931 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
1937 // try some generic seek like seek_frame_generic() but with new ts semantics
1940 /*******************************************************/
1943 * Return TRUE if the stream has accurate duration in any stream.
1945 * @return TRUE if the stream has accurate duration for at least one component.
1947 static int has_duration(AVFormatContext *ic)
1951 if(ic->duration != AV_NOPTS_VALUE)
1954 for(i = 0;i < ic->nb_streams; i++) {
1955 st = ic->streams[i];
1956 if (st->duration != AV_NOPTS_VALUE)
1963 * Estimate the stream timings from the one of each components.
1965 * Also computes the global bitrate if possible.
1967 static void update_stream_timings(AVFormatContext *ic)
1969 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
1970 int64_t duration, duration1, filesize;
1974 start_time = INT64_MAX;
1975 start_time_text = INT64_MAX;
1976 end_time = INT64_MIN;
1977 duration = INT64_MIN;
1978 for(i = 0;i < ic->nb_streams; i++) {
1979 st = ic->streams[i];
1980 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1981 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1982 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1983 if (start_time1 < start_time_text)
1984 start_time_text = start_time1;
1986 start_time = FFMIN(start_time, start_time1);
1987 if (st->duration != AV_NOPTS_VALUE) {
1988 end_time1 = start_time1
1989 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1990 end_time = FFMAX(end_time, end_time1);
1993 if (st->duration != AV_NOPTS_VALUE) {
1994 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1995 duration = FFMAX(duration, duration1);
1998 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
1999 start_time = start_time_text;
2000 if (start_time != INT64_MAX) {
2001 ic->start_time = start_time;
2002 if (end_time != INT64_MIN)
2003 duration = FFMAX(duration, end_time - start_time);
2005 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2006 ic->duration = duration;
2008 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2009 /* compute the bitrate */
2010 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2011 (double)ic->duration;
2015 static void fill_all_stream_timings(AVFormatContext *ic)
2020 update_stream_timings(ic);
2021 for(i = 0;i < ic->nb_streams; i++) {
2022 st = ic->streams[i];
2023 if (st->start_time == AV_NOPTS_VALUE) {
2024 if(ic->start_time != AV_NOPTS_VALUE)
2025 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2026 if(ic->duration != AV_NOPTS_VALUE)
2027 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2032 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2034 int64_t filesize, duration;
2038 /* if bit_rate is already set, we believe it */
2039 if (ic->bit_rate <= 0) {
2041 for(i=0;i<ic->nb_streams;i++) {
2042 st = ic->streams[i];
2043 if (st->codec->bit_rate > 0)
2044 bit_rate += st->codec->bit_rate;
2046 ic->bit_rate = bit_rate;
2049 /* if duration is already set, we believe it */
2050 if (ic->duration == AV_NOPTS_VALUE &&
2051 ic->bit_rate != 0) {
2052 filesize = ic->pb ? avio_size(ic->pb) : 0;
2054 for(i = 0; i < ic->nb_streams; i++) {
2055 st = ic->streams[i];
2056 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2057 if (st->duration == AV_NOPTS_VALUE)
2058 st->duration = duration;
2064 #define DURATION_MAX_READ_SIZE 250000
2065 #define DURATION_MAX_RETRY 3
2067 /* only usable for MPEG-PS streams */
2068 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2070 AVPacket pkt1, *pkt = &pkt1;
2072 int read_size, i, ret;
2074 int64_t filesize, offset, duration;
2077 /* flush packet queue */
2078 flush_packet_queue(ic);
2080 for (i=0; i<ic->nb_streams; i++) {
2081 st = ic->streams[i];
2082 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2083 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2086 av_parser_close(st->parser);
2091 /* estimate the end time (duration) */
2092 /* XXX: may need to support wrapping */
2093 filesize = ic->pb ? avio_size(ic->pb) : 0;
2094 end_time = AV_NOPTS_VALUE;
2096 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2100 avio_seek(ic->pb, offset, SEEK_SET);
2103 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2107 ret = ff_read_packet(ic, pkt);
2108 } while(ret == AVERROR(EAGAIN));
2111 read_size += pkt->size;
2112 st = ic->streams[pkt->stream_index];
2113 if (pkt->pts != AV_NOPTS_VALUE &&
2114 (st->start_time != AV_NOPTS_VALUE ||
2115 st->first_dts != AV_NOPTS_VALUE)) {
2116 duration = end_time = pkt->pts;
2117 if (st->start_time != AV_NOPTS_VALUE)
2118 duration -= st->start_time;
2120 duration -= st->first_dts;
2122 duration += 1LL<<st->pts_wrap_bits;
2124 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2125 st->duration = duration;
2128 av_free_packet(pkt);
2130 }while( end_time==AV_NOPTS_VALUE
2131 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2132 && ++retry <= DURATION_MAX_RETRY);
2134 fill_all_stream_timings(ic);
2136 avio_seek(ic->pb, old_offset, SEEK_SET);
2137 for (i=0; i<ic->nb_streams; i++) {
2139 st->cur_dts= st->first_dts;
2140 st->last_IP_pts = AV_NOPTS_VALUE;
2141 st->reference_dts = AV_NOPTS_VALUE;
2145 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2149 /* get the file size, if possible */
2150 if (ic->iformat->flags & AVFMT_NOFILE) {
2153 file_size = avio_size(ic->pb);
2154 file_size = FFMAX(0, file_size);
2157 if ((!strcmp(ic->iformat->name, "mpeg") ||
2158 !strcmp(ic->iformat->name, "mpegts")) &&
2159 file_size && ic->pb->seekable) {
2160 /* get accurate estimate from the PTSes */
2161 estimate_timings_from_pts(ic, old_offset);
2162 } else if (has_duration(ic)) {
2163 /* at least one component has timings - we use them for all
2165 fill_all_stream_timings(ic);
2167 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2168 /* less precise: use bitrate info */
2169 estimate_timings_from_bit_rate(ic);
2171 update_stream_timings(ic);
2175 AVStream av_unused *st;
2176 for(i = 0;i < ic->nb_streams; i++) {
2177 st = ic->streams[i];
2178 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2179 (double) st->start_time / AV_TIME_BASE,
2180 (double) st->duration / AV_TIME_BASE);
2182 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2183 (double) ic->start_time / AV_TIME_BASE,
2184 (double) ic->duration / AV_TIME_BASE,
2185 ic->bit_rate / 1000);
2189 static int has_codec_parameters(AVStream *st)
2191 AVCodecContext *avctx = st->codec;
2193 switch (avctx->codec_type) {
2194 case AVMEDIA_TYPE_AUDIO:
2195 val = avctx->sample_rate && avctx->channels;
2196 if (!avctx->frame_size && determinable_frame_size(avctx))
2198 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2201 case AVMEDIA_TYPE_VIDEO:
2203 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2206 case AVMEDIA_TYPE_DATA:
2207 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2212 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2215 static int has_decode_delay_been_guessed(AVStream *st)
2217 return st->codec->codec_id != CODEC_ID_H264 ||
2218 st->info->nb_decoded_frames >= 6;
2221 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2222 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2225 int got_picture = 1, ret = 0;
2227 AVPacket pkt = *avpkt;
2229 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2230 AVDictionary *thread_opt = NULL;
2232 codec = st->codec->codec ? st->codec->codec :
2233 avcodec_find_decoder(st->codec->codec_id);
2236 st->info->found_decoder = -1;
2240 /* force thread count to 1 since the h264 decoder will not extract SPS
2241 * and PPS to extradata during multi-threaded decoding */
2242 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2243 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2245 av_dict_free(&thread_opt);
2247 st->info->found_decoder = -1;
2250 st->info->found_decoder = 1;
2251 } else if (!st->info->found_decoder)
2252 st->info->found_decoder = 1;
2254 if (st->info->found_decoder < 0)
2257 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2259 (!has_codec_parameters(st) ||
2260 !has_decode_delay_been_guessed(st) ||
2261 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2263 avcodec_get_frame_defaults(&picture);
2264 switch(st->codec->codec_type) {
2265 case AVMEDIA_TYPE_VIDEO:
2266 ret = avcodec_decode_video2(st->codec, &picture,
2267 &got_picture, &pkt);
2269 case AVMEDIA_TYPE_AUDIO:
2270 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2277 st->info->nb_decoded_frames++;
2283 if(!pkt.data && !got_picture)
2288 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2290 while (tags->id != CODEC_ID_NONE) {
2298 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2301 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2302 if(tag == tags[i].tag)
2305 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2306 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2309 return CODEC_ID_NONE;
2312 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2315 for(i=0; tags && tags[i]; i++){
2316 int tag= ff_codec_get_tag(tags[i], id);
2322 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2325 for(i=0; tags && tags[i]; i++){
2326 enum CodecID id= ff_codec_get_id(tags[i], tag);
2327 if(id!=CODEC_ID_NONE) return id;
2329 return CODEC_ID_NONE;
2332 static void compute_chapters_end(AVFormatContext *s)
2335 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2337 for (i = 0; i < s->nb_chapters; i++)
2338 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2339 AVChapter *ch = s->chapters[i];
2340 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2343 for (j = 0; j < s->nb_chapters; j++) {
2344 AVChapter *ch1 = s->chapters[j];
2345 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2346 if (j != i && next_start > ch->start && next_start < end)
2349 ch->end = (end == INT64_MAX) ? ch->start : end;
2353 static int get_std_framerate(int i){
2354 if(i<60*12) return i*1001;
2355 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2359 * Is the time base unreliable.
2360 * This is a heuristic to balance between quick acceptance of the values in
2361 * the headers vs. some extra checks.
2362 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2363 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2364 * And there are "variable" fps files this needs to detect as well.
2366 static int tb_unreliable(AVCodecContext *c){
2367 if( c->time_base.den >= 101L*c->time_base.num
2368 || c->time_base.den < 5L*c->time_base.num
2369 /* || c->codec_tag == AV_RL32("DIVX")
2370 || c->codec_tag == AV_RL32("XVID")*/
2371 || c->codec_id == CODEC_ID_MPEG2VIDEO
2372 || c->codec_id == CODEC_ID_H264
2378 #if FF_API_FORMAT_PARAMETERS
2379 int av_find_stream_info(AVFormatContext *ic)
2381 return avformat_find_stream_info(ic, NULL);
2385 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2387 int i, count, ret, read_size, j;
2389 AVPacket pkt1, *pkt;
2390 int64_t old_offset = avio_tell(ic->pb);
2391 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2392 int flush_codecs = 1;
2394 for(i=0;i<ic->nb_streams;i++) {
2396 AVDictionary *thread_opt = NULL;
2397 st = ic->streams[i];
2399 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2400 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2401 /* if(!st->time_base.num)
2403 if(!st->codec->time_base.num)
2404 st->codec->time_base= st->time_base;
2406 //only for the split stuff
2407 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2408 st->parser = av_parser_init(st->codec->codec_id);
2409 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2410 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2413 codec = st->codec->codec ? st->codec->codec :
2414 avcodec_find_decoder(st->codec->codec_id);
2416 /* force thread count to 1 since the h264 decoder will not extract SPS
2417 * and PPS to extradata during multi-threaded decoding */
2418 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2420 /* Ensure that subtitle_header is properly set. */
2421 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2422 && codec && !st->codec->codec)
2423 avcodec_open2(st->codec, codec, options ? &options[i]
2426 //try to just open decoders, in case this is enough to get parameters
2427 if (!has_codec_parameters(st)) {
2428 if (codec && !st->codec->codec)
2429 avcodec_open2(st->codec, codec, options ? &options[i]
2433 av_dict_free(&thread_opt);
2436 for (i=0; i<ic->nb_streams; i++) {
2437 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2443 if (ff_check_interrupt(&ic->interrupt_callback)){
2445 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2449 /* check if one codec still needs to be handled */
2450 for(i=0;i<ic->nb_streams;i++) {
2451 int fps_analyze_framecount = 20;
2453 st = ic->streams[i];
2454 if (!has_codec_parameters(st))
2456 /* if the timebase is coarse (like the usual millisecond precision
2457 of mkv), we need to analyze more frames to reliably arrive at
2459 if (av_q2d(st->time_base) > 0.0005)
2460 fps_analyze_framecount *= 2;
2461 if (ic->fps_probe_size >= 0)
2462 fps_analyze_framecount = ic->fps_probe_size;
2463 /* variable fps and no guess at the real fps */
2464 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2465 && st->info->duration_count < fps_analyze_framecount
2466 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2468 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2470 if(st->first_dts == AV_NOPTS_VALUE && (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2473 if (i == ic->nb_streams) {
2474 /* NOTE: if the format has no header, then we need to read
2475 some packets to get most of the streams, so we cannot
2477 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2478 /* if we found the info for all the codecs, we can stop */
2480 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2485 /* we did not get all the codec info, but we read too much data */
2486 if (read_size >= ic->probesize) {
2488 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2492 /* NOTE: a new stream can be added there if no header in file
2493 (AVFMTCTX_NOHEADER) */
2494 ret = read_frame_internal(ic, &pkt1);
2495 if (ret == AVERROR(EAGAIN))
2503 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2504 if ((ret = av_dup_packet(pkt)) < 0)
2505 goto find_stream_info_err;
2507 read_size += pkt->size;
2509 st = ic->streams[pkt->stream_index];
2510 if (st->codec_info_nb_frames>1) {
2512 if (st->time_base.den > 0)
2513 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2514 if (st->avg_frame_rate.num > 0)
2515 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2517 if (t >= ic->max_analyze_duration) {
2518 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2521 st->info->codec_info_duration += pkt->duration;
2524 int64_t last = st->info->last_dts;
2526 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2527 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2528 int64_t duration= pkt->dts - last;
2530 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2531 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2532 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2533 int framerate= get_std_framerate(i);
2534 double sdts= dts*framerate/(1001*12);
2536 int ticks= lrintf(sdts+j*0.5);
2537 double error= sdts - ticks + j*0.5;
2538 st->info->duration_error[j][0][i] += error;
2539 st->info->duration_error[j][1][i] += error*error;
2542 st->info->duration_count++;
2543 // ignore the first 4 values, they might have some random jitter
2544 if (st->info->duration_count > 3)
2545 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2547 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2548 st->info->last_dts = pkt->dts;
2550 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2551 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2552 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2553 st->codec->extradata_size= i;
2554 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2555 if (!st->codec->extradata)
2556 return AVERROR(ENOMEM);
2557 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2558 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2562 /* if still no information, we try to open the codec and to
2563 decompress the frame. We try to avoid that in most cases as
2564 it takes longer and uses more memory. For MPEG-4, we need to
2565 decompress for QuickTime.
2567 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2568 least one frame of codec data, this makes sure the codec initializes
2569 the channel configuration and does not only trust the values from the container.
2571 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2573 st->codec_info_nb_frames++;
2578 AVPacket empty_pkt = { 0 };
2580 av_init_packet(&empty_pkt);
2582 ret = -1; /* we could not have all the codec parameters before EOF */
2583 for(i=0;i<ic->nb_streams;i++) {
2584 st = ic->streams[i];
2586 /* flush the decoders */
2587 if (st->info->found_decoder == 1) {
2589 err = try_decode_frame(st, &empty_pkt,
2590 (options && i < orig_nb_streams) ?
2591 &options[i] : NULL);
2592 } while (err > 0 && !has_codec_parameters(st));
2595 av_log(ic, AV_LOG_INFO,
2596 "decoding for stream %d failed\n", st->index);
2600 if (!has_codec_parameters(st)){
2602 avcodec_string(buf, sizeof(buf), st->codec, 0);
2603 av_log(ic, AV_LOG_WARNING,
2604 "Could not find codec parameters (%s)\n", buf);
2611 // close codecs which were opened in try_decode_frame()
2612 for(i=0;i<ic->nb_streams;i++) {
2613 st = ic->streams[i];
2614 avcodec_close(st->codec);
2616 for(i=0;i<ic->nb_streams;i++) {
2617 st = ic->streams[i];
2618 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2619 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2620 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2621 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2622 st->codec->codec_tag= tag;
2625 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2626 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2627 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2628 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2629 // the check for tb_unreliable() is not completely correct, since this is not about handling
2630 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2631 // ipmovie.c produces.
2632 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2633 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2634 if (st->info->duration_count && !st->r_frame_rate.num
2635 && tb_unreliable(st->codec) /*&&
2636 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2637 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2639 double best_error= 0.01;
2641 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2644 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2646 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2649 int n= st->info->duration_count;
2650 double a= st->info->duration_error[k][0][j] / n;
2651 double error= st->info->duration_error[k][1][j]/n - a*a;
2653 if(error < best_error && best_error> 0.000000001){
2655 num = get_std_framerate(j);
2658 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2661 // do not increase frame rate by more than 1 % in order to match a standard rate.
2662 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2663 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2666 if (!st->r_frame_rate.num){
2667 if( st->codec->time_base.den * (int64_t)st->time_base.num
2668 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2669 st->r_frame_rate.num = st->codec->time_base.den;
2670 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2672 st->r_frame_rate.num = st->time_base.den;
2673 st->r_frame_rate.den = st->time_base.num;
2676 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2677 if(!st->codec->bits_per_coded_sample)
2678 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2679 // set stream disposition based on audio service type
2680 switch (st->codec->audio_service_type) {
2681 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2682 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2683 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2684 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2685 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2686 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2687 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2688 st->disposition = AV_DISPOSITION_COMMENT; break;
2689 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2690 st->disposition = AV_DISPOSITION_KARAOKE; break;
2695 estimate_timings(ic, old_offset);
2697 compute_chapters_end(ic);
2699 find_stream_info_err:
2700 for (i=0; i < ic->nb_streams; i++) {
2701 if (ic->streams[i]->codec)
2702 ic->streams[i]->codec->thread_count = 0;
2703 av_freep(&ic->streams[i]->info);
2708 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2712 for (i = 0; i < ic->nb_programs; i++) {
2713 if (ic->programs[i] == last) {
2717 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2718 if (ic->programs[i]->stream_index[j] == s)
2719 return ic->programs[i];
2725 int av_find_best_stream(AVFormatContext *ic,
2726 enum AVMediaType type,
2727 int wanted_stream_nb,
2729 AVCodec **decoder_ret,
2732 int i, nb_streams = ic->nb_streams;
2733 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2734 unsigned *program = NULL;
2735 AVCodec *decoder = NULL, *best_decoder = NULL;
2737 if (related_stream >= 0 && wanted_stream_nb < 0) {
2738 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2740 program = p->stream_index;
2741 nb_streams = p->nb_stream_indexes;
2744 for (i = 0; i < nb_streams; i++) {
2745 int real_stream_index = program ? program[i] : i;
2746 AVStream *st = ic->streams[real_stream_index];
2747 AVCodecContext *avctx = st->codec;
2748 if (avctx->codec_type != type)
2750 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2752 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2755 decoder = avcodec_find_decoder(st->codec->codec_id);
2758 ret = AVERROR_DECODER_NOT_FOUND;
2762 if (best_count >= st->codec_info_nb_frames)
2764 best_count = st->codec_info_nb_frames;
2765 ret = real_stream_index;
2766 best_decoder = decoder;
2767 if (program && i == nb_streams - 1 && ret < 0) {
2769 nb_streams = ic->nb_streams;
2770 i = 0; /* no related stream found, try again with everything */
2774 *decoder_ret = best_decoder;
2778 /*******************************************************/
2780 int av_read_play(AVFormatContext *s)
2782 if (s->iformat->read_play)
2783 return s->iformat->read_play(s);
2785 return avio_pause(s->pb, 0);
2786 return AVERROR(ENOSYS);
2789 int av_read_pause(AVFormatContext *s)
2791 if (s->iformat->read_pause)
2792 return s->iformat->read_pause(s);
2794 return avio_pause(s->pb, 1);
2795 return AVERROR(ENOSYS);
2798 void avformat_free_context(AVFormatContext *s)
2804 if (s->iformat && s->iformat->priv_class && s->priv_data)
2805 av_opt_free(s->priv_data);
2807 for(i=0;i<s->nb_streams;i++) {
2808 /* free all data in a stream component */
2811 av_parser_close(st->parser);
2813 if (st->attached_pic.data)
2814 av_free_packet(&st->attached_pic);
2815 av_dict_free(&st->metadata);
2816 av_freep(&st->index_entries);
2817 av_freep(&st->codec->extradata);
2818 av_freep(&st->codec->subtitle_header);
2819 av_freep(&st->codec);
2820 av_freep(&st->priv_data);
2821 av_freep(&st->info);
2824 for(i=s->nb_programs-1; i>=0; i--) {
2825 av_dict_free(&s->programs[i]->metadata);
2826 av_freep(&s->programs[i]->stream_index);
2827 av_freep(&s->programs[i]);
2829 av_freep(&s->programs);
2830 av_freep(&s->priv_data);
2831 while(s->nb_chapters--) {
2832 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2833 av_freep(&s->chapters[s->nb_chapters]);
2835 av_freep(&s->chapters);
2836 av_dict_free(&s->metadata);
2837 av_freep(&s->streams);
2841 #if FF_API_CLOSE_INPUT_FILE
2842 void av_close_input_file(AVFormatContext *s)
2844 avformat_close_input(&s);
2848 void avformat_close_input(AVFormatContext **ps)
2850 AVFormatContext *s = *ps;
2851 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2853 flush_packet_queue(s);
2854 if (s->iformat && (s->iformat->read_close))
2855 s->iformat->read_close(s);
2856 avformat_free_context(s);
2862 #if FF_API_NEW_STREAM
2863 AVStream *av_new_stream(AVFormatContext *s, int id)
2865 AVStream *st = avformat_new_stream(s, NULL);
2872 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2878 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2880 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2883 s->streams = streams;
2885 st = av_mallocz(sizeof(AVStream));
2888 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2892 st->info->last_dts = AV_NOPTS_VALUE;
2894 st->codec = avcodec_alloc_context3(c);
2896 /* no default bitrate if decoding */
2897 st->codec->bit_rate = 0;
2899 st->index = s->nb_streams;
2900 st->start_time = AV_NOPTS_VALUE;
2901 st->duration = AV_NOPTS_VALUE;
2902 /* we set the current DTS to 0 so that formats without any timestamps
2903 but durations get some timestamps, formats with some unknown
2904 timestamps have their first few packets buffered and the
2905 timestamps corrected before they are returned to the user */
2906 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
2907 st->first_dts = AV_NOPTS_VALUE;
2908 st->probe_packets = MAX_PROBE_PACKETS;
2910 /* default pts setting is MPEG-like */
2911 avpriv_set_pts_info(st, 33, 1, 90000);
2912 st->last_IP_pts = AV_NOPTS_VALUE;
2913 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2914 st->pts_buffer[i]= AV_NOPTS_VALUE;
2915 st->reference_dts = AV_NOPTS_VALUE;
2917 st->sample_aspect_ratio = (AVRational){0,1};
2919 s->streams[s->nb_streams++] = st;
2923 AVProgram *av_new_program(AVFormatContext *ac, int id)
2925 AVProgram *program=NULL;
2928 av_dlog(ac, "new_program: id=0x%04x\n", id);
2930 for(i=0; i<ac->nb_programs; i++)
2931 if(ac->programs[i]->id == id)
2932 program = ac->programs[i];
2935 program = av_mallocz(sizeof(AVProgram));
2938 dynarray_add(&ac->programs, &ac->nb_programs, program);
2939 program->discard = AVDISCARD_NONE;
2946 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2948 AVChapter *chapter = NULL;
2951 for(i=0; i<s->nb_chapters; i++)
2952 if(s->chapters[i]->id == id)
2953 chapter = s->chapters[i];
2956 chapter= av_mallocz(sizeof(AVChapter));
2959 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2961 av_dict_set(&chapter->metadata, "title", title, 0);
2963 chapter->time_base= time_base;
2964 chapter->start = start;
2970 /************************************************************/
2971 /* output media file */
2973 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
2974 const char *format, const char *filename)
2976 AVFormatContext *s = avformat_alloc_context();
2985 oformat = av_guess_format(format, NULL, NULL);
2987 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
2988 ret = AVERROR(EINVAL);
2992 oformat = av_guess_format(NULL, filename, NULL);
2994 ret = AVERROR(EINVAL);
2995 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3002 s->oformat = oformat;
3003 if (s->oformat->priv_data_size > 0) {
3004 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3007 if (s->oformat->priv_class) {
3008 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3009 av_opt_set_defaults(s->priv_data);
3012 s->priv_data = NULL;
3015 av_strlcpy(s->filename, filename, sizeof(s->filename));
3019 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3020 ret = AVERROR(ENOMEM);
3022 avformat_free_context(s);
3026 #if FF_API_ALLOC_OUTPUT_CONTEXT
3027 AVFormatContext *avformat_alloc_output_context(const char *format,
3028 AVOutputFormat *oformat, const char *filename)
3030 AVFormatContext *avctx;
3031 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3032 return ret < 0 ? NULL : avctx;
3036 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3038 const AVCodecTag *avctag;
3040 enum CodecID id = CODEC_ID_NONE;
3041 unsigned int tag = 0;
3044 * Check that tag + id is in the table
3045 * If neither is in the table -> OK
3046 * If tag is in the table with another id -> FAIL
3047 * If id is in the table with another tag -> FAIL unless strict < normal
3049 for (n = 0; s->oformat->codec_tag[n]; n++) {
3050 avctag = s->oformat->codec_tag[n];
3051 while (avctag->id != CODEC_ID_NONE) {
3052 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3054 if (id == st->codec->codec_id)
3057 if (avctag->id == st->codec->codec_id)
3062 if (id != CODEC_ID_NONE)
3064 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3069 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3073 AVDictionary *tmp = NULL;
3076 av_dict_copy(&tmp, *options, 0);
3077 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3079 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3080 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3083 // some sanity checks
3084 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3085 av_log(s, AV_LOG_ERROR, "no streams\n");
3086 ret = AVERROR(EINVAL);
3090 for(i=0;i<s->nb_streams;i++) {
3093 switch (st->codec->codec_type) {
3094 case AVMEDIA_TYPE_AUDIO:
3095 if(st->codec->sample_rate<=0){
3096 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3097 ret = AVERROR(EINVAL);
3100 if(!st->codec->block_align)
3101 st->codec->block_align = st->codec->channels *
3102 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3104 case AVMEDIA_TYPE_VIDEO:
3105 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3106 av_log(s, AV_LOG_ERROR, "time base not set\n");
3107 ret = AVERROR(EINVAL);
3110 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3111 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3112 ret = AVERROR(EINVAL);
3115 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3116 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3118 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3119 "(%d/%d) and encoder layer (%d/%d)\n",
3120 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3121 st->codec->sample_aspect_ratio.num,
3122 st->codec->sample_aspect_ratio.den);
3123 ret = AVERROR(EINVAL);
3129 if(s->oformat->codec_tag){
3130 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
3131 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
3132 st->codec->codec_tag= 0;
3134 if(st->codec->codec_tag){
3135 if (!validate_codec_tag(s, st)) {
3137 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3138 av_log(s, AV_LOG_ERROR,
3139 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
3140 tagbuf, st->codec->codec_tag, st->codec->codec_id);
3141 ret = AVERROR_INVALIDDATA;
3145 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3148 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3149 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3150 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3153 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3154 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3155 if (!s->priv_data) {
3156 ret = AVERROR(ENOMEM);
3159 if (s->oformat->priv_class) {
3160 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3161 av_opt_set_defaults(s->priv_data);
3162 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3167 /* set muxer identification string */
3168 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3169 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3172 if(s->oformat->write_header){
3173 ret = s->oformat->write_header(s);
3178 /* init PTS generation */
3179 for(i=0;i<s->nb_streams;i++) {
3180 int64_t den = AV_NOPTS_VALUE;
3183 switch (st->codec->codec_type) {
3184 case AVMEDIA_TYPE_AUDIO:
3185 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3187 case AVMEDIA_TYPE_VIDEO:
3188 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3193 if (den != AV_NOPTS_VALUE) {
3195 ret = AVERROR_INVALIDDATA;
3198 frac_init(&st->pts, 0, 0, den);
3203 av_dict_free(options);
3212 //FIXME merge with compute_pkt_fields
3213 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3214 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3215 int num, den, frame_size, i;
3217 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3218 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3220 /* duration field */
3221 if (pkt->duration == 0) {
3222 compute_frame_duration(&num, &den, st, NULL, pkt);
3224 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3228 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3231 //XXX/FIXME this is a temporary hack until all encoders output pts
3232 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3235 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3239 // pkt->pts= st->cur_dts;
3240 pkt->pts= st->pts.val;
3243 //calculate dts from pts
3244 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3245 st->pts_buffer[0]= pkt->pts;
3246 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3247 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3248 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3249 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3251 pkt->dts= st->pts_buffer[0];
3254 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){
3255 av_log(s, AV_LOG_ERROR,
3256 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3257 st->index, st->cur_dts, pkt->dts);
3258 return AVERROR(EINVAL);
3260 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3261 av_log(s, AV_LOG_ERROR, "pts (%"PRId64") < dts (%"PRId64") in stream %d\n", pkt->pts, pkt->dts, st->index);
3262 return AVERROR(EINVAL);
3265 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3266 st->cur_dts= pkt->dts;
3267 st->pts.val= pkt->dts;
3270 switch (st->codec->codec_type) {
3271 case AVMEDIA_TYPE_AUDIO:
3272 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3274 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3275 likely equal to the encoder delay, but it would be better if we
3276 had the real timestamps from the encoder */
3277 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3278 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3281 case AVMEDIA_TYPE_VIDEO:
3282 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3290 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3295 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3296 return s->oformat->write_packet(s, pkt);
3300 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3302 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3305 ret= s->oformat->write_packet(s, pkt);
3308 s->streams[pkt->stream_index]->nb_frames++;
3312 #define CHUNK_START 0x1000
3314 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3315 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3317 AVPacketList **next_point, *this_pktl;
3318 AVStream *st= s->streams[pkt->stream_index];
3319 int chunked= s->max_chunk_size || s->max_chunk_duration;
3321 this_pktl = av_mallocz(sizeof(AVPacketList));
3323 return AVERROR(ENOMEM);
3324 this_pktl->pkt= *pkt;
3325 pkt->destruct= NULL; // do not free original but only the copy
3326 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3328 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3329 next_point = &(st->last_in_packet_buffer->next);
3331 next_point = &s->packet_buffer;
3336 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3337 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3338 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3339 st->interleaver_chunk_size += pkt->size;
3340 st->interleaver_chunk_duration += pkt->duration;
3343 st->interleaver_chunk_size =
3344 st->interleaver_chunk_duration = 0;
3345 this_pktl->pkt.flags |= CHUNK_START;
3349 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3351 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3352 || !compare(s, &(*next_point)->pkt, pkt))){
3353 next_point= &(*next_point)->next;
3358 next_point = &(s->packet_buffer_end->next);
3361 assert(!*next_point);
3363 s->packet_buffer_end= this_pktl;
3366 this_pktl->next= *next_point;
3368 s->streams[pkt->stream_index]->last_in_packet_buffer=
3369 *next_point= this_pktl;
3373 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3375 AVStream *st = s->streams[ pkt ->stream_index];
3376 AVStream *st2= s->streams[ next->stream_index];
3377 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3379 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3380 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3381 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3383 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3384 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3387 comp= (ts>ts2) - (ts<ts2);
3391 return pkt->stream_index < next->stream_index;
3395 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3396 AVPacket *pkt, int flush)
3399 int stream_count=0, noninterleaved_count=0;
3400 int64_t delta_dts_max = 0;
3404 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3409 for(i=0; i < s->nb_streams; i++) {
3410 if (s->streams[i]->last_in_packet_buffer) {
3412 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3413 ++noninterleaved_count;
3417 if (s->nb_streams == stream_count) {
3420 for(i=0; i < s->nb_streams; i++) {
3421 if (s->streams[i]->last_in_packet_buffer) {
3423 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3424 s->streams[i]->time_base,
3426 av_rescale_q(s->packet_buffer->pkt.dts,
3427 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3429 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3432 if(s->nb_streams == stream_count+noninterleaved_count &&
3433 delta_dts_max > 20*AV_TIME_BASE) {
3434 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3438 if(stream_count && flush){
3439 pktl= s->packet_buffer;
3442 s->packet_buffer= pktl->next;
3443 if(!s->packet_buffer)
3444 s->packet_buffer_end= NULL;
3446 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3447 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3451 av_init_packet(out);
3456 #if FF_API_INTERLEAVE_PACKET
3457 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3458 AVPacket *pkt, int flush)
3460 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3465 * Interleave an AVPacket correctly so it can be muxed.
3466 * @param out the interleaved packet will be output here
3467 * @param in the input packet
3468 * @param flush 1 if no further packets are available as input and all
3469 * remaining packets should be output
3470 * @return 1 if a packet was output, 0 if no packet could be output,
3471 * < 0 if an error occurred
3473 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3474 if (s->oformat->interleave_packet) {
3475 int ret = s->oformat->interleave_packet(s, out, in, flush);
3480 return ff_interleave_packet_per_dts(s, out, in, flush);
3483 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3487 AVStream *st= s->streams[ pkt->stream_index];
3489 //FIXME/XXX/HACK drop zero sized packets
3490 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3493 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3494 pkt->size, pkt->dts, pkt->pts);
3495 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3498 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3499 return AVERROR(EINVAL);
3501 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3507 int ret= interleave_packet(s, &opkt, pkt, flush);
3508 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3511 ret= s->oformat->write_packet(s, &opkt);
3513 s->streams[opkt.stream_index]->nb_frames++;
3515 av_free_packet(&opkt);
3520 if(s->pb && s->pb->error)
3521 return s->pb->error;
3525 int av_write_trailer(AVFormatContext *s)
3531 ret= interleave_packet(s, &pkt, NULL, 1);
3532 if(ret<0) //FIXME cleanup needed for ret<0 ?
3537 ret= s->oformat->write_packet(s, &pkt);
3539 s->streams[pkt.stream_index]->nb_frames++;
3541 av_free_packet(&pkt);
3545 if(s->pb && s->pb->error)
3549 if(s->oformat->write_trailer)
3550 ret = s->oformat->write_trailer(s);
3553 ret = s->pb ? s->pb->error : 0;
3554 for(i=0;i<s->nb_streams;i++) {
3555 av_freep(&s->streams[i]->priv_data);
3556 av_freep(&s->streams[i]->index_entries);
3558 if (s->oformat->priv_class)
3559 av_opt_free(s->priv_data);
3560 av_freep(&s->priv_data);
3564 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3565 int64_t *dts, int64_t *wall)
3567 if (!s->oformat || !s->oformat->get_output_timestamp)
3568 return AVERROR(ENOSYS);
3569 s->oformat->get_output_timestamp(s, stream, dts, wall);
3573 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3576 AVProgram *program=NULL;
3579 if (idx >= ac->nb_streams) {
3580 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3584 for(i=0; i<ac->nb_programs; i++){
3585 if(ac->programs[i]->id != progid)
3587 program = ac->programs[i];
3588 for(j=0; j<program->nb_stream_indexes; j++)
3589 if(program->stream_index[j] == idx)
3592 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3595 program->stream_index = tmp;
3596 program->stream_index[program->nb_stream_indexes++] = idx;
3601 static void print_fps(double d, const char *postfix){
3602 uint64_t v= lrintf(d*100);
3603 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3604 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3605 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3608 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3610 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3611 AVDictionaryEntry *tag=NULL;
3613 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3614 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3615 if(strcmp("language", tag->key)){
3616 const char *p = tag->value;
3617 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3620 size_t len = strcspn(p, "\xd\xa");
3621 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3622 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3624 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3625 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3628 av_log(ctx, AV_LOG_INFO, "\n");
3634 /* "user interface" functions */
3635 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3638 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3639 AVStream *st = ic->streams[i];
3640 int g = av_gcd(st->time_base.num, st->time_base.den);
3641 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3642 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3643 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3644 /* the pid is an important information, so we display it */
3645 /* XXX: add a generic system */
3646 if (flags & AVFMT_SHOW_IDS)
3647 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3649 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3650 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3651 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3652 if (st->sample_aspect_ratio.num && // default
3653 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3654 AVRational display_aspect_ratio;
3655 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3656 st->codec->width*st->sample_aspect_ratio.num,
3657 st->codec->height*st->sample_aspect_ratio.den,
3659 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3660 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3661 display_aspect_ratio.num, display_aspect_ratio.den);
3663 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3664 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3665 print_fps(av_q2d(st->avg_frame_rate), "fps");
3666 if(st->r_frame_rate.den && st->r_frame_rate.num)
3667 print_fps(av_q2d(st->r_frame_rate), "tbr");
3668 if(st->time_base.den && st->time_base.num)
3669 print_fps(1/av_q2d(st->time_base), "tbn");
3670 if(st->codec->time_base.den && st->codec->time_base.num)
3671 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3673 if (st->disposition & AV_DISPOSITION_DEFAULT)
3674 av_log(NULL, AV_LOG_INFO, " (default)");
3675 if (st->disposition & AV_DISPOSITION_DUB)
3676 av_log(NULL, AV_LOG_INFO, " (dub)");
3677 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3678 av_log(NULL, AV_LOG_INFO, " (original)");
3679 if (st->disposition & AV_DISPOSITION_COMMENT)
3680 av_log(NULL, AV_LOG_INFO, " (comment)");
3681 if (st->disposition & AV_DISPOSITION_LYRICS)
3682 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3683 if (st->disposition & AV_DISPOSITION_KARAOKE)
3684 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3685 if (st->disposition & AV_DISPOSITION_FORCED)
3686 av_log(NULL, AV_LOG_INFO, " (forced)");
3687 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3688 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3689 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3690 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3691 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3692 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3693 av_log(NULL, AV_LOG_INFO, "\n");
3694 dump_metadata(NULL, st->metadata, " ");
3697 void av_dump_format(AVFormatContext *ic,
3703 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3704 if (ic->nb_streams && !printed)
3707 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3708 is_output ? "Output" : "Input",
3710 is_output ? ic->oformat->name : ic->iformat->name,
3711 is_output ? "to" : "from", url);
3712 dump_metadata(NULL, ic->metadata, " ");
3714 av_log(NULL, AV_LOG_INFO, " Duration: ");
3715 if (ic->duration != AV_NOPTS_VALUE) {
3716 int hours, mins, secs, us;
3717 secs = ic->duration / AV_TIME_BASE;
3718 us = ic->duration % AV_TIME_BASE;
3723 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3724 (100 * us) / AV_TIME_BASE);
3726 av_log(NULL, AV_LOG_INFO, "N/A");
3728 if (ic->start_time != AV_NOPTS_VALUE) {
3730 av_log(NULL, AV_LOG_INFO, ", start: ");
3731 secs = ic->start_time / AV_TIME_BASE;
3732 us = abs(ic->start_time % AV_TIME_BASE);
3733 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3734 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3736 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3738 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3740 av_log(NULL, AV_LOG_INFO, "N/A");
3742 av_log(NULL, AV_LOG_INFO, "\n");
3744 for (i = 0; i < ic->nb_chapters; i++) {
3745 AVChapter *ch = ic->chapters[i];
3746 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3747 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3748 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3750 dump_metadata(NULL, ch->metadata, " ");
3752 if(ic->nb_programs) {
3753 int j, k, total = 0;
3754 for(j=0; j<ic->nb_programs; j++) {
3755 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3757 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3758 name ? name->value : "");
3759 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3760 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3761 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3762 printed[ic->programs[j]->stream_index[k]] = 1;
3764 total += ic->programs[j]->nb_stream_indexes;
3766 if (total < ic->nb_streams)
3767 av_log(NULL, AV_LOG_INFO, " No Program\n");
3769 for(i=0;i<ic->nb_streams;i++)
3771 dump_stream_format(ic, i, index, is_output);
3776 int64_t av_gettime(void)
3779 gettimeofday(&tv,NULL);
3780 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3783 uint64_t ff_ntp_time(void)
3785 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3788 int av_get_frame_filename(char *buf, int buf_size,
3789 const char *path, int number)
3792 char *q, buf1[20], c;
3793 int nd, len, percentd_found;
3805 while (isdigit(*p)) {
3806 nd = nd * 10 + *p++ - '0';
3809 } while (isdigit(c));
3818 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3820 if ((q - buf + len) > buf_size - 1)
3822 memcpy(q, buf1, len);
3830 if ((q - buf) < buf_size - 1)
3834 if (!percentd_found)
3843 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3847 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3849 for(i=0;i<size;i+=16) {
3856 PRINT(" %02x", buf[i+j]);
3861 for(j=0;j<len;j++) {
3863 if (c < ' ' || c > '~')
3872 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3874 hex_dump_internal(NULL, f, 0, buf, size);
3877 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3879 hex_dump_internal(avcl, NULL, level, buf, size);
3882 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3885 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3886 PRINT("stream #%d:\n", pkt->stream_index);
3887 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3888 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3889 /* DTS is _always_ valid after av_read_frame() */
3891 if (pkt->dts == AV_NOPTS_VALUE)
3894 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3895 /* PTS may not be known if B-frames are present. */
3897 if (pkt->pts == AV_NOPTS_VALUE)
3900 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3902 PRINT(" size=%d\n", pkt->size);
3905 av_hex_dump(f, pkt->data, pkt->size);
3909 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3911 AVRational tb = { 1, AV_TIME_BASE };
3912 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3916 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3918 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3922 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3924 AVRational tb = { 1, AV_TIME_BASE };
3925 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3929 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3932 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3935 void av_url_split(char *proto, int proto_size,
3936 char *authorization, int authorization_size,
3937 char *hostname, int hostname_size,
3939 char *path, int path_size,
3942 const char *p, *ls, *at, *col, *brk;
3944 if (port_ptr) *port_ptr = -1;
3945 if (proto_size > 0) proto[0] = 0;
3946 if (authorization_size > 0) authorization[0] = 0;
3947 if (hostname_size > 0) hostname[0] = 0;
3948 if (path_size > 0) path[0] = 0;
3950 /* parse protocol */
3951 if ((p = strchr(url, ':'))) {
3952 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3957 /* no protocol means plain filename */
3958 av_strlcpy(path, url, path_size);
3962 /* separate path from hostname */
3963 ls = strchr(p, '/');
3965 ls = strchr(p, '?');
3967 av_strlcpy(path, ls, path_size);
3969 ls = &p[strlen(p)]; // XXX
3971 /* the rest is hostname, use that to parse auth/port */
3973 /* authorization (user[:pass]@hostname) */
3974 if ((at = strchr(p, '@')) && at < ls) {
3975 av_strlcpy(authorization, p,
3976 FFMIN(authorization_size, at + 1 - p));
3977 p = at + 1; /* skip '@' */
3980 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3982 av_strlcpy(hostname, p + 1,
3983 FFMIN(hostname_size, brk - p));
3984 if (brk[1] == ':' && port_ptr)
3985 *port_ptr = atoi(brk + 2);
3986 } else if ((col = strchr(p, ':')) && col < ls) {
3987 av_strlcpy(hostname, p,
3988 FFMIN(col + 1 - p, hostname_size));
3989 if (port_ptr) *port_ptr = atoi(col + 1);
3991 av_strlcpy(hostname, p,
3992 FFMIN(ls + 1 - p, hostname_size));
3996 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3999 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4002 'C', 'D', 'E', 'F' };
4003 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4006 'c', 'd', 'e', 'f' };
4007 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4009 for(i = 0; i < s; i++) {
4010 buff[i * 2] = hex_table[src[i] >> 4];
4011 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4017 int ff_hex_to_data(uint8_t *data, const char *p)
4024 p += strspn(p, SPACE_CHARS);
4027 c = toupper((unsigned char) *p++);
4028 if (c >= '0' && c <= '9')
4030 else if (c >= 'A' && c <= 'F')
4045 #if FF_API_SET_PTS_INFO
4046 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4047 unsigned int pts_num, unsigned int pts_den)
4049 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4053 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4054 unsigned int pts_num, unsigned int pts_den)
4057 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4058 if(new_tb.num != pts_num)
4059 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4061 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4063 if(new_tb.num <= 0 || new_tb.den <= 0) {
4064 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
4067 s->time_base = new_tb;
4068 s->pts_wrap_bits = pts_wrap_bits;
4071 int ff_url_join(char *str, int size, const char *proto,
4072 const char *authorization, const char *hostname,
4073 int port, const char *fmt, ...)
4076 struct addrinfo hints = { 0 }, *ai;
4081 av_strlcatf(str, size, "%s://", proto);
4082 if (authorization && authorization[0])
4083 av_strlcatf(str, size, "%s@", authorization);
4084 #if CONFIG_NETWORK && defined(AF_INET6)
4085 /* Determine if hostname is a numerical IPv6 address,
4086 * properly escape it within [] in that case. */
4087 hints.ai_flags = AI_NUMERICHOST;
4088 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4089 if (ai->ai_family == AF_INET6) {
4090 av_strlcat(str, "[", size);
4091 av_strlcat(str, hostname, size);
4092 av_strlcat(str, "]", size);
4094 av_strlcat(str, hostname, size);
4099 /* Not an IPv6 address, just output the plain string. */
4100 av_strlcat(str, hostname, size);
4103 av_strlcatf(str, size, ":%d", port);
4106 int len = strlen(str);
4109 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4115 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4116 AVFormatContext *src)
4121 local_pkt.stream_index = dst_stream;
4122 if (pkt->pts != AV_NOPTS_VALUE)
4123 local_pkt.pts = av_rescale_q(pkt->pts,
4124 src->streams[pkt->stream_index]->time_base,
4125 dst->streams[dst_stream]->time_base);
4126 if (pkt->dts != AV_NOPTS_VALUE)
4127 local_pkt.dts = av_rescale_q(pkt->dts,
4128 src->streams[pkt->stream_index]->time_base,
4129 dst->streams[dst_stream]->time_base);
4130 return av_write_frame(dst, &local_pkt);
4133 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4136 const char *ptr = str;
4138 /* Parse key=value pairs. */
4141 char *dest = NULL, *dest_end;
4142 int key_len, dest_len = 0;
4144 /* Skip whitespace and potential commas. */
4145 while (*ptr && (isspace(*ptr) || *ptr == ','))
4152 if (!(ptr = strchr(key, '=')))
4155 key_len = ptr - key;
4157 callback_get_buf(context, key, key_len, &dest, &dest_len);
4158 dest_end = dest + dest_len - 1;
4162 while (*ptr && *ptr != '\"') {
4166 if (dest && dest < dest_end)
4170 if (dest && dest < dest_end)
4178 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4179 if (dest && dest < dest_end)
4187 int ff_find_stream_index(AVFormatContext *s, int id)
4190 for (i = 0; i < s->nb_streams; i++) {
4191 if (s->streams[i]->id == id)
4197 void ff_make_absolute_url(char *buf, int size, const char *base,
4201 /* Absolute path, relative to the current server */
4202 if (base && strstr(base, "://") && rel[0] == '/') {
4204 av_strlcpy(buf, base, size);
4205 sep = strstr(buf, "://");
4208 sep = strchr(sep, '/');
4212 av_strlcat(buf, rel, size);
4215 /* If rel actually is an absolute url, just copy it */
4216 if (!base || strstr(rel, "://") || rel[0] == '/') {
4217 av_strlcpy(buf, rel, size);
4221 av_strlcpy(buf, base, size);
4222 /* Remove the file name from the base url */
4223 sep = strrchr(buf, '/');
4228 while (av_strstart(rel, "../", NULL) && sep) {
4229 /* Remove the path delimiter at the end */
4231 sep = strrchr(buf, '/');
4232 /* If the next directory name to pop off is "..", break here */
4233 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4234 /* Readd the slash we just removed */
4235 av_strlcat(buf, "/", size);
4238 /* Cut off the directory name */
4245 av_strlcat(buf, rel, size);
4248 int64_t ff_iso8601_to_unix_time(const char *datestr)
4251 struct tm time1 = {0}, time2 = {0};
4253 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4254 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4256 return av_timegm(&time2);
4258 return av_timegm(&time1);
4260 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4261 "the date string.\n");
4266 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4269 if (ofmt->query_codec)
4270 return ofmt->query_codec(codec_id, std_compliance);
4271 else if (ofmt->codec_tag)
4272 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4273 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4274 codec_id == ofmt->subtitle_codec)
4277 return AVERROR_PATCHWELCOME;
4280 int avformat_network_init(void)
4284 ff_network_inited_globally = 1;
4285 if ((ret = ff_network_init()) < 0)
4292 int avformat_network_deinit(void)
4301 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4302 uint64_t channel_layout, int32_t sample_rate,
4303 int32_t width, int32_t height)
4309 return AVERROR(EINVAL);
4312 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4314 if (channel_layout) {
4316 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4320 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4322 if (width || height) {
4324 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4326 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4328 return AVERROR(ENOMEM);
4329 bytestream_put_le32(&data, flags);
4331 bytestream_put_le32(&data, channels);
4333 bytestream_put_le64(&data, channel_layout);
4335 bytestream_put_le32(&data, sample_rate);
4336 if (width || height) {
4337 bytestream_put_le32(&data, width);
4338 bytestream_put_le32(&data, height);
4343 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4345 return ff_codec_bmp_tags;
4347 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4349 return ff_codec_wav_tags;