2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
41 #include "audiointerleave.h"
55 * various utility functions for use within FFmpeg
58 unsigned avformat_version(void)
60 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
61 return LIBAVFORMAT_VERSION_INT;
64 const char *avformat_configuration(void)
66 return FFMPEG_CONFIGURATION;
69 const char *avformat_license(void)
71 #define LICENSE_PREFIX "libavformat license: "
72 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
75 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
77 static int is_relative(int64_t ts) {
78 return ts > (RELATIVE_TS_BASE - (1LL<<48));
81 /* fraction handling */
84 * f = val + (num / den) + 0.5.
86 * 'num' is normalized so that it is such as 0 <= num < den.
88 * @param f fractional number
89 * @param val integer value
90 * @param num must be >= 0
91 * @param den must be >= 1
93 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
106 * Fractional addition to f: f = f + (incr / f->den).
108 * @param f fractional number
109 * @param incr increment, can be positive or negative
111 static void frac_add(AVFrac *f, int64_t incr)
124 } else if (num >= den) {
131 /** head of registered input format linked list */
132 static AVInputFormat *first_iformat = NULL;
133 /** head of registered output format linked list */
134 static AVOutputFormat *first_oformat = NULL;
136 AVInputFormat *av_iformat_next(AVInputFormat *f)
138 if(f) return f->next;
139 else return first_iformat;
142 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
144 if(f) return f->next;
145 else return first_oformat;
148 void av_register_input_format(AVInputFormat *format)
152 while (*p != NULL) p = &(*p)->next;
157 void av_register_output_format(AVOutputFormat *format)
161 while (*p != NULL) p = &(*p)->next;
166 int av_match_ext(const char *filename, const char *extensions)
174 ext = strrchr(filename, '.');
180 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
183 if (!av_strcasecmp(ext1, ext))
193 static int match_format(const char *name, const char *names)
201 namelen = strlen(name);
202 while ((p = strchr(names, ','))) {
203 len = FFMAX(p - names, namelen);
204 if (!av_strncasecmp(name, names, len))
208 return !av_strcasecmp(name, names);
211 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = NULL, *fmt_found;
215 int score_max, score;
217 /* specific test for image sequences */
218 #if CONFIG_IMAGE2_MUXER
219 if (!short_name && filename &&
220 av_filename_number_test(filename) &&
221 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
222 return av_guess_format("image2", NULL, NULL);
225 /* Find the proper file type. */
228 while ((fmt = av_oformat_next(fmt))) {
230 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
232 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
234 if (filename && fmt->extensions &&
235 av_match_ext(filename, fmt->extensions)) {
238 if (score > score_max) {
246 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
247 const char *filename, const char *mime_type, enum AVMediaType type){
248 if(type == AVMEDIA_TYPE_VIDEO){
249 enum CodecID codec_id= CODEC_ID_NONE;
251 #if CONFIG_IMAGE2_MUXER
252 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
253 codec_id= ff_guess_image2_codec(filename);
256 if(codec_id == CODEC_ID_NONE)
257 codec_id= fmt->video_codec;
259 }else if(type == AVMEDIA_TYPE_AUDIO)
260 return fmt->audio_codec;
261 else if (type == AVMEDIA_TYPE_SUBTITLE)
262 return fmt->subtitle_codec;
264 return CODEC_ID_NONE;
267 AVInputFormat *av_find_input_format(const char *short_name)
269 AVInputFormat *fmt = NULL;
270 while ((fmt = av_iformat_next(fmt))) {
271 if (match_format(short_name, fmt->name))
277 int ffio_limit(AVIOContext *s, int size)
280 int64_t remaining= s->maxsize - avio_tell(s);
281 if(remaining < size){
282 int64_t newsize= avio_size(s);
283 if(!s->maxsize || s->maxsize<newsize)
284 s->maxsize= newsize - !newsize;
285 remaining= s->maxsize - avio_tell(s);
286 remaining= FFMAX(remaining, 0);
289 if(s->maxsize>=0 && remaining+1 < size){
290 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
297 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
300 int orig_size = size;
301 size= ffio_limit(s, size);
303 ret= av_new_packet(pkt, size);
308 pkt->pos= avio_tell(s);
310 ret= avio_read(s, pkt->data, size);
314 av_shrink_packet(pkt, ret);
315 if (pkt->size < orig_size)
316 pkt->flags |= AV_PKT_FLAG_CORRUPT;
321 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
326 return av_get_packet(s, pkt, size);
327 old_size = pkt->size;
328 ret = av_grow_packet(pkt, size);
331 ret = avio_read(s, pkt->data + old_size, size);
332 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
337 int av_filename_number_test(const char *filename)
340 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
343 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
345 AVProbeData lpd = *pd;
346 AVInputFormat *fmt1 = NULL, *fmt;
347 int score, nodat = 0, score_max=0;
349 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
350 int id3len = ff_id3v2_tag_len(lpd.buf);
351 if (lpd.buf_size > id3len + 16) {
353 lpd.buf_size -= id3len;
359 while ((fmt1 = av_iformat_next(fmt1))) {
360 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
363 if (fmt1->read_probe) {
364 score = fmt1->read_probe(&lpd);
365 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
366 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
367 } else if (fmt1->extensions) {
368 if (av_match_ext(lpd.filename, fmt1->extensions)) {
372 if (score > score_max) {
375 }else if (score == score_max)
378 *score_ret= score_max;
383 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
386 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
387 if(score_ret > *score_max){
388 *score_max= score_ret;
394 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
396 return av_probe_input_format2(pd, is_opened, &score);
399 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
401 static const struct {
402 const char *name; enum CodecID id; enum AVMediaType type;
404 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
405 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
406 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
407 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
408 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
409 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
410 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
411 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
412 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
416 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
420 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
421 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
422 for (i = 0; fmt_id_type[i].name; i++) {
423 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
424 st->codec->codec_id = fmt_id_type[i].id;
425 st->codec->codec_type = fmt_id_type[i].type;
433 /************************************************************/
434 /* input media file */
436 int av_demuxer_open(AVFormatContext *ic){
439 if (ic->iformat->read_header) {
440 err = ic->iformat->read_header(ic);
445 if (ic->pb && !ic->data_offset)
446 ic->data_offset = avio_tell(ic->pb);
452 /** size of probe buffer, for guessing file type from file contents */
453 #define PROBE_BUF_MIN 2048
454 #define PROBE_BUF_MAX (1<<20)
456 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
457 const char *filename, void *logctx,
458 unsigned int offset, unsigned int max_probe_size)
460 AVProbeData pd = { filename ? filename : "", NULL, -offset };
461 unsigned char *buf = NULL;
462 int ret = 0, probe_size;
464 if (!max_probe_size) {
465 max_probe_size = PROBE_BUF_MAX;
466 } else if (max_probe_size > PROBE_BUF_MAX) {
467 max_probe_size = PROBE_BUF_MAX;
468 } else if (max_probe_size < PROBE_BUF_MIN) {
469 return AVERROR(EINVAL);
472 if (offset >= max_probe_size) {
473 return AVERROR(EINVAL);
476 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
477 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
478 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
479 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
482 if (probe_size < offset) {
486 /* read probe data */
487 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
490 return AVERROR(ENOMEM);
493 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
494 /* fail if error was not end of file, otherwise, lower score */
495 if (ret != AVERROR_EOF) {
500 ret = 0; /* error was end of file, nothing read */
503 pd.buf = &buf[offset];
505 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
507 /* guess file format */
508 *fmt = av_probe_input_format2(&pd, 1, &score);
510 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
511 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
513 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
519 return AVERROR_INVALIDDATA;
522 /* rewind. reuse probe buffer to avoid seeking */
523 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
529 /* open input file and probe the format if necessary */
530 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
533 AVProbeData pd = {filename, NULL, 0};
536 s->flags |= AVFMT_FLAG_CUSTOM_IO;
538 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
539 else if (s->iformat->flags & AVFMT_NOFILE)
540 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
541 "will be ignored with AVFMT_NOFILE format.\n");
545 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
546 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
549 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
550 &s->interrupt_callback, options)) < 0)
554 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
557 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
558 AVPacketList **plast_pktl){
559 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
564 (*plast_pktl)->next = pktl;
566 *packet_buffer = pktl;
568 /* add the packet in the buffered packet list */
574 static void queue_attached_pictures(AVFormatContext *s)
577 for (i = 0; i < s->nb_streams; i++)
578 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
579 s->streams[i]->discard < AVDISCARD_ALL) {
580 AVPacket copy = s->streams[i]->attached_pic;
581 copy.destruct = NULL;
582 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
586 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
588 AVFormatContext *s = *ps;
590 AVDictionary *tmp = NULL;
591 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
593 if (!s && !(s = avformat_alloc_context()))
594 return AVERROR(ENOMEM);
596 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
597 return AVERROR(EINVAL);
603 av_dict_copy(&tmp, *options, 0);
605 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
608 if ((ret = init_input(s, filename, &tmp)) < 0)
611 /* check filename in case an image number is expected */
612 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
613 if (!av_filename_number_test(filename)) {
614 ret = AVERROR(EINVAL);
619 s->duration = s->start_time = AV_NOPTS_VALUE;
620 av_strlcpy(s->filename, filename, sizeof(s->filename));
622 /* allocate private data */
623 if (s->iformat->priv_data_size > 0) {
624 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
625 ret = AVERROR(ENOMEM);
628 if (s->iformat->priv_class) {
629 *(const AVClass**)s->priv_data = s->iformat->priv_class;
630 av_opt_set_defaults(s->priv_data);
631 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
636 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
638 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
640 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
641 if ((ret = s->iformat->read_header(s)) < 0)
644 if (id3v2_extra_meta &&
645 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
647 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
649 queue_attached_pictures(s);
651 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
652 s->data_offset = avio_tell(s->pb);
654 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
657 av_dict_free(options);
664 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
666 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
668 avformat_free_context(s);
673 /*******************************************************/
675 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
681 AVPacketList *pktl = s->raw_packet_buffer;
685 if(s->streams[pkt->stream_index]->request_probe <= 0){
686 s->raw_packet_buffer = pktl->next;
687 s->raw_packet_buffer_remaining_size += pkt->size;
694 ret= s->iformat->read_packet(s, pkt);
696 if (!pktl || ret == AVERROR(EAGAIN))
698 for (i = 0; i < s->nb_streams; i++)
699 if(s->streams[i]->request_probe > 0)
700 s->streams[i]->request_probe = -1;
704 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
705 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
706 av_log(s, AV_LOG_WARNING,
707 "Dropped corrupted packet (stream = %d)\n",
713 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
714 av_packet_merge_side_data(pkt);
716 if(pkt->stream_index >= (unsigned)s->nb_streams){
717 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
721 st= s->streams[pkt->stream_index];
723 switch(st->codec->codec_type){
724 case AVMEDIA_TYPE_VIDEO:
725 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
727 case AVMEDIA_TYPE_AUDIO:
728 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
730 case AVMEDIA_TYPE_SUBTITLE:
731 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
735 if(!pktl && st->request_probe <= 0)
738 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
739 s->raw_packet_buffer_remaining_size -= pkt->size;
741 if(st->request_probe>0){
742 AVProbeData *pd = &st->probe_data;
744 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
747 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
748 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
749 pd->buf_size += pkt->size;
750 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
752 end= s->raw_packet_buffer_remaining_size <= 0
753 || st->probe_packets<=0;
755 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
756 int score= set_codec_from_probe_data(s, st, pd);
757 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
761 st->request_probe= -1;
762 if(st->codec->codec_id != CODEC_ID_NONE){
763 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
765 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
772 #if FF_API_READ_PACKET
773 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
775 return ff_read_packet(s, pkt);
780 /**********************************************************/
782 static int determinable_frame_size(AVCodecContext *avctx)
784 if (/*avctx->codec_id == CODEC_ID_AAC ||*/
785 avctx->codec_id == CODEC_ID_MP1 ||
786 avctx->codec_id == CODEC_ID_MP2 ||
787 avctx->codec_id == CODEC_ID_MP3/* ||
788 avctx->codec_id == CODEC_ID_CELT*/)
794 * Get the number of samples of an audio frame. Return -1 on error.
796 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
800 /* give frame_size priority if demuxing */
801 if (!mux && enc->frame_size > 1)
802 return enc->frame_size;
804 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
807 /* fallback to using frame_size if muxing */
808 if (enc->frame_size > 1)
809 return enc->frame_size;
816 * Return the frame duration in seconds. Return 0 if not available.
818 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
819 AVCodecParserContext *pc, AVPacket *pkt)
825 switch(st->codec->codec_type) {
826 case AVMEDIA_TYPE_VIDEO:
827 if (st->r_frame_rate.num && !pc) {
828 *pnum = st->r_frame_rate.den;
829 *pden = st->r_frame_rate.num;
830 } else if(st->time_base.num*1000LL > st->time_base.den) {
831 *pnum = st->time_base.num;
832 *pden = st->time_base.den;
833 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
834 *pnum = st->codec->time_base.num;
835 *pden = st->codec->time_base.den;
836 if (pc && pc->repeat_pict) {
837 *pnum = (*pnum) * (1 + pc->repeat_pict);
839 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
840 //Thus if we have no parser in such case leave duration undefined.
841 if(st->codec->ticks_per_frame>1 && !pc){
846 case AVMEDIA_TYPE_AUDIO:
847 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
848 if (frame_size <= 0 || st->codec->sample_rate <= 0)
851 *pden = st->codec->sample_rate;
858 static int is_intra_only(AVCodecContext *enc){
859 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
861 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
862 switch(enc->codec_id){
864 case CODEC_ID_MJPEGB:
866 case CODEC_ID_PRORES:
867 case CODEC_ID_RAWVIDEO:
869 case CODEC_ID_DVVIDEO:
870 case CODEC_ID_HUFFYUV:
871 case CODEC_ID_FFVHUFF:
876 case CODEC_ID_JPEG2000:
877 case CODEC_ID_UTVIDEO:
885 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
889 if (pktl == s->parse_queue_end)
890 return s->packet_buffer;
894 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
895 int64_t dts, int64_t pts)
897 AVStream *st= s->streams[stream_index];
898 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
900 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
903 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
906 if (is_relative(pts))
907 pts += st->first_dts - RELATIVE_TS_BASE;
909 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
910 if(pktl->pkt.stream_index != stream_index)
912 if(is_relative(pktl->pkt.pts))
913 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
915 if(is_relative(pktl->pkt.dts))
916 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
918 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
919 st->start_time= pktl->pkt.pts;
921 if (st->start_time == AV_NOPTS_VALUE)
922 st->start_time = pts;
925 static void update_initial_durations(AVFormatContext *s, AVStream *st,
926 int stream_index, int duration)
928 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
929 int64_t cur_dts= RELATIVE_TS_BASE;
931 if(st->first_dts != AV_NOPTS_VALUE){
932 cur_dts= st->first_dts;
933 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
934 if(pktl->pkt.stream_index == stream_index){
935 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
940 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
941 st->first_dts = cur_dts;
942 }else if(st->cur_dts != RELATIVE_TS_BASE)
945 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
946 if(pktl->pkt.stream_index != stream_index)
948 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
949 && !pktl->pkt.duration){
950 pktl->pkt.dts= cur_dts;
951 if(!st->codec->has_b_frames)
952 pktl->pkt.pts= cur_dts;
953 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
954 pktl->pkt.duration = duration;
957 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
960 st->cur_dts= cur_dts;
963 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
964 AVCodecParserContext *pc, AVPacket *pkt)
966 int num, den, presentation_delayed, delay, i;
969 if (s->flags & AVFMT_FLAG_NOFILLIN)
972 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
973 pkt->dts= AV_NOPTS_VALUE;
975 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
976 //FIXME Set low_delay = 0 when has_b_frames = 1
977 st->codec->has_b_frames = 1;
979 /* do we have a video B-frame ? */
980 delay= st->codec->has_b_frames;
981 presentation_delayed = 0;
983 /* XXX: need has_b_frame, but cannot get it if the codec is
986 pc && pc->pict_type != AV_PICTURE_TYPE_B)
987 presentation_delayed = 1;
989 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
990 pkt->dts -= 1LL<<st->pts_wrap_bits;
993 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
994 // we take the conservative approach and discard both
995 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
996 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
997 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
998 pkt->dts= AV_NOPTS_VALUE;
1001 if (pkt->duration == 0) {
1002 compute_frame_duration(&num, &den, st, pc, pkt);
1004 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1007 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1008 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1010 /* correct timestamps with byte offset if demuxers only have timestamps
1011 on packet boundaries */
1012 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1013 /* this will estimate bitrate based on this frame's duration and size */
1014 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1015 if(pkt->pts != AV_NOPTS_VALUE)
1017 if(pkt->dts != AV_NOPTS_VALUE)
1021 if (pc && pc->dts_sync_point >= 0) {
1022 // we have synchronization info from the parser
1023 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1025 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1026 if (pkt->dts != AV_NOPTS_VALUE) {
1027 // got DTS from the stream, update reference timestamp
1028 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1029 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1030 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1031 // compute DTS based on reference timestamp
1032 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1033 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1035 if (pc->dts_sync_point > 0)
1036 st->reference_dts = pkt->dts; // new reference
1040 /* This may be redundant, but it should not hurt. */
1041 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1042 presentation_delayed = 1;
1044 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p duration:%d\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc, pkt->duration);
1045 /* interpolate PTS and DTS if they are not present */
1046 //We skip H264 currently because delay and has_b_frames are not reliably set
1047 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1048 if (presentation_delayed) {
1049 /* DTS = decompression timestamp */
1050 /* PTS = presentation timestamp */
1051 if (pkt->dts == AV_NOPTS_VALUE)
1052 pkt->dts = st->last_IP_pts;
1053 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1054 if (pkt->dts == AV_NOPTS_VALUE)
1055 pkt->dts = st->cur_dts;
1057 /* this is tricky: the dts must be incremented by the duration
1058 of the frame we are displaying, i.e. the last I- or P-frame */
1059 if (st->last_IP_duration == 0)
1060 st->last_IP_duration = pkt->duration;
1061 if(pkt->dts != AV_NOPTS_VALUE)
1062 st->cur_dts = pkt->dts + st->last_IP_duration;
1063 st->last_IP_duration = pkt->duration;
1064 st->last_IP_pts= pkt->pts;
1065 /* cannot compute PTS if not present (we can compute it only
1066 by knowing the future */
1067 } else if (pkt->pts != AV_NOPTS_VALUE ||
1068 pkt->dts != AV_NOPTS_VALUE ||
1070 int duration = pkt->duration;
1072 if(pkt->pts != AV_NOPTS_VALUE && duration){
1073 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1074 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1075 if( old_diff < new_diff && old_diff < (duration>>3)
1076 && (!strcmp(s->iformat->name, "mpeg") ||
1077 !strcmp(s->iformat->name, "mpegts"))){
1078 pkt->pts += duration;
1079 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1080 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1084 /* presentation is not delayed : PTS and DTS are the same */
1085 if (pkt->pts == AV_NOPTS_VALUE)
1086 pkt->pts = pkt->dts;
1087 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1089 if (pkt->pts == AV_NOPTS_VALUE)
1090 pkt->pts = st->cur_dts;
1091 pkt->dts = pkt->pts;
1092 if (pkt->pts != AV_NOPTS_VALUE)
1093 st->cur_dts = pkt->pts + duration;
1097 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1098 st->pts_buffer[0]= pkt->pts;
1099 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1100 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1101 if(pkt->dts == AV_NOPTS_VALUE)
1102 pkt->dts= st->pts_buffer[0];
1103 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1104 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1106 if(pkt->dts > st->cur_dts)
1107 st->cur_dts = pkt->dts;
1110 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1113 if(is_intra_only(st->codec))
1114 pkt->flags |= AV_PKT_FLAG_KEY;
1116 pkt->convergence_duration = pc->convergence_duration;
1119 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1122 AVPacketList *pktl = *pkt_buf;
1123 *pkt_buf = pktl->next;
1124 av_free_packet(&pktl->pkt);
1127 *pkt_buf_end = NULL;
1131 * Parse a packet, add all split parts to parse_queue
1133 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1135 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1137 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1138 AVStream *st = s->streams[stream_index];
1139 uint8_t *data = pkt ? pkt->data : NULL;
1140 int size = pkt ? pkt->size : 0;
1141 int ret = 0, got_output = 0;
1144 av_init_packet(&flush_pkt);
1147 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1148 // preserve 0-size sync packets
1149 compute_pkt_fields(s, st, st->parser, pkt);
1152 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1155 av_init_packet(&out_pkt);
1156 len = av_parser_parse2(st->parser, st->codec,
1157 &out_pkt.data, &out_pkt.size, data, size,
1158 pkt->pts, pkt->dts, pkt->pos);
1160 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1161 /* increment read pointer */
1165 got_output = !!out_pkt.size;
1170 /* set the duration */
1171 out_pkt.duration = 0;
1172 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1173 if (st->codec->sample_rate > 0) {
1174 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1175 (AVRational){ 1, st->codec->sample_rate },
1179 } else if (st->codec->time_base.num != 0 &&
1180 st->codec->time_base.den != 0) {
1181 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1182 st->codec->time_base,
1187 out_pkt.stream_index = st->index;
1188 out_pkt.pts = st->parser->pts;
1189 out_pkt.dts = st->parser->dts;
1190 out_pkt.pos = st->parser->pos;
1192 if (st->parser->key_frame == 1 ||
1193 (st->parser->key_frame == -1 &&
1194 st->parser->pict_type == AV_PICTURE_TYPE_I))
1195 out_pkt.flags |= AV_PKT_FLAG_KEY;
1197 compute_pkt_fields(s, st, st->parser, &out_pkt);
1199 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1200 out_pkt.flags & AV_PKT_FLAG_KEY) {
1201 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
1202 ff_reduce_index(s, st->index);
1203 av_add_index_entry(st, pos, out_pkt.dts,
1204 0, 0, AVINDEX_KEYFRAME);
1207 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1208 out_pkt.destruct = pkt->destruct;
1209 pkt->destruct = NULL;
1211 if ((ret = av_dup_packet(&out_pkt)) < 0)
1214 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1215 av_free_packet(&out_pkt);
1216 ret = AVERROR(ENOMEM);
1222 /* end of the stream => close and free the parser */
1223 if (pkt == &flush_pkt) {
1224 av_parser_close(st->parser);
1229 av_free_packet(pkt);
1233 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1234 AVPacketList **pkt_buffer_end,
1238 av_assert0(*pkt_buffer);
1241 *pkt_buffer = pktl->next;
1243 *pkt_buffer_end = NULL;
1248 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1250 int ret = 0, i, got_packet = 0;
1252 av_init_packet(pkt);
1254 while (!got_packet && !s->parse_queue) {
1258 /* read next packet */
1259 ret = ff_read_packet(s, &cur_pkt);
1261 if (ret == AVERROR(EAGAIN))
1263 /* flush the parsers */
1264 for(i = 0; i < s->nb_streams; i++) {
1266 if (st->parser && st->need_parsing)
1267 parse_packet(s, NULL, st->index);
1269 /* all remaining packets are now in parse_queue =>
1270 * really terminate parsing */
1274 st = s->streams[cur_pkt.stream_index];
1276 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1277 cur_pkt.dts != AV_NOPTS_VALUE &&
1278 cur_pkt.pts < cur_pkt.dts) {
1279 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1280 cur_pkt.stream_index,
1285 if (s->debug & FF_FDEBUG_TS)
1286 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1287 cur_pkt.stream_index,
1294 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1295 st->parser = av_parser_init(st->codec->codec_id);
1297 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1298 "%s, packets or times may be invalid.\n",
1299 avcodec_get_name(st->codec->codec_id));
1300 /* no parser available: just output the raw packets */
1301 st->need_parsing = AVSTREAM_PARSE_NONE;
1302 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1303 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1304 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1305 st->parser->flags |= PARSER_FLAG_ONCE;
1309 if (!st->need_parsing || !st->parser) {
1310 /* no parsing needed: we just output the packet as is */
1312 compute_pkt_fields(s, st, NULL, pkt);
1313 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1314 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1315 ff_reduce_index(s, st->index);
1316 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1319 } else if (st->discard < AVDISCARD_ALL) {
1320 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1324 av_free_packet(&cur_pkt);
1326 if (pkt->flags & AV_PKT_FLAG_KEY)
1327 st->skip_to_keyframe = 0;
1328 if (st->skip_to_keyframe) {
1329 av_free_packet(&cur_pkt);
1334 if (!got_packet && s->parse_queue)
1335 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1337 if(s->debug & FF_FDEBUG_TS)
1338 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1349 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1351 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1356 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1357 &s->packet_buffer_end,
1359 read_frame_internal(s, pkt);
1364 AVPacketList *pktl = s->packet_buffer;
1367 AVPacket *next_pkt = &pktl->pkt;
1369 if (next_pkt->dts != AV_NOPTS_VALUE) {
1370 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1371 // last dts seen for this stream. if any of packets following
1372 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1373 int64_t last_dts = next_pkt->dts;
1374 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1375 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1376 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1377 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1378 next_pkt->pts = pktl->pkt.dts;
1380 if (last_dts != AV_NOPTS_VALUE) {
1381 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1382 last_dts = pktl->pkt.dts;
1387 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1388 // Fixing the last reference frame had none pts issue (For MXF etc).
1389 // We only do this when
1391 // 2. we are not able to resolve a pts value for current packet.
1392 // 3. the packets for this stream at the end of the files had valid dts.
1393 next_pkt->pts = last_dts + next_pkt->duration;
1395 pktl = s->packet_buffer;
1398 /* read packet from packet buffer, if there is data */
1399 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1400 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1401 ret = read_from_packet_buffer(&s->packet_buffer,
1402 &s->packet_buffer_end, pkt);
1407 ret = read_frame_internal(s, pkt);
1409 if (pktl && ret != AVERROR(EAGAIN)) {
1416 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1417 &s->packet_buffer_end)) < 0)
1418 return AVERROR(ENOMEM);
1422 if (is_relative(pkt->dts))
1423 pkt->dts -= RELATIVE_TS_BASE;
1424 if (is_relative(pkt->pts))
1425 pkt->pts -= RELATIVE_TS_BASE;
1429 /* XXX: suppress the packet queue */
1430 static void flush_packet_queue(AVFormatContext *s)
1432 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1433 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1434 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1436 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1439 /*******************************************************/
1442 int av_find_default_stream_index(AVFormatContext *s)
1444 int first_audio_index = -1;
1448 if (s->nb_streams <= 0)
1450 for(i = 0; i < s->nb_streams; i++) {
1452 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1453 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1456 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1457 first_audio_index = i;
1459 return first_audio_index >= 0 ? first_audio_index : 0;
1463 * Flush the frame reader.
1465 void ff_read_frame_flush(AVFormatContext *s)
1470 flush_packet_queue(s);
1472 /* for each stream, reset read state */
1473 for(i = 0; i < s->nb_streams; i++) {
1477 av_parser_close(st->parser);
1480 st->last_IP_pts = AV_NOPTS_VALUE;
1481 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1482 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1483 st->reference_dts = AV_NOPTS_VALUE;
1485 st->probe_packets = MAX_PROBE_PACKETS;
1487 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1488 st->pts_buffer[j]= AV_NOPTS_VALUE;
1492 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1496 for(i = 0; i < s->nb_streams; i++) {
1497 AVStream *st = s->streams[i];
1499 st->cur_dts = av_rescale(timestamp,
1500 st->time_base.den * (int64_t)ref_st->time_base.num,
1501 st->time_base.num * (int64_t)ref_st->time_base.den);
1505 void ff_reduce_index(AVFormatContext *s, int stream_index)
1507 AVStream *st= s->streams[stream_index];
1508 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1510 if((unsigned)st->nb_index_entries >= max_entries){
1512 for(i=0; 2*i<st->nb_index_entries; i++)
1513 st->index_entries[i]= st->index_entries[2*i];
1514 st->nb_index_entries= i;
1518 int ff_add_index_entry(AVIndexEntry **index_entries,
1519 int *nb_index_entries,
1520 unsigned int *index_entries_allocated_size,
1521 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1523 AVIndexEntry *entries, *ie;
1526 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1529 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1530 timestamp -= RELATIVE_TS_BASE;
1532 entries = av_fast_realloc(*index_entries,
1533 index_entries_allocated_size,
1534 (*nb_index_entries + 1) *
1535 sizeof(AVIndexEntry));
1539 *index_entries= entries;
1541 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1544 index= (*nb_index_entries)++;
1545 ie= &entries[index];
1546 assert(index==0 || ie[-1].timestamp < timestamp);
1548 ie= &entries[index];
1549 if(ie->timestamp != timestamp){
1550 if(ie->timestamp <= timestamp)
1552 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1553 (*nb_index_entries)++;
1554 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1555 distance= ie->min_distance;
1559 ie->timestamp = timestamp;
1560 ie->min_distance= distance;
1567 int av_add_index_entry(AVStream *st,
1568 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1570 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1571 &st->index_entries_allocated_size, pos,
1572 timestamp, size, distance, flags);
1575 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1576 int64_t wanted_timestamp, int flags)
1584 //optimize appending index entries at the end
1585 if(b && entries[b-1].timestamp < wanted_timestamp)
1590 timestamp = entries[m].timestamp;
1591 if(timestamp >= wanted_timestamp)
1593 if(timestamp <= wanted_timestamp)
1596 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1598 if(!(flags & AVSEEK_FLAG_ANY)){
1599 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1600 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1609 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1612 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1613 wanted_timestamp, flags);
1616 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1618 AVInputFormat *avif= s->iformat;
1619 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1620 int64_t ts_min, ts_max, ts;
1625 if (stream_index < 0)
1628 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1631 ts_min= AV_NOPTS_VALUE;
1632 pos_limit= -1; //gcc falsely says it may be uninitialized
1634 st= s->streams[stream_index];
1635 if(st->index_entries){
1638 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1639 index= FFMAX(index, 0);
1640 e= &st->index_entries[index];
1642 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1644 ts_min= e->timestamp;
1645 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1651 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1652 assert(index < st->nb_index_entries);
1654 e= &st->index_entries[index];
1655 assert(e->timestamp >= target_ts);
1657 ts_max= e->timestamp;
1658 pos_limit= pos_max - e->min_distance;
1659 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1660 pos_max,pos_limit, ts_max);
1664 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1669 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1672 ff_read_frame_flush(s);
1673 ff_update_cur_dts(s, st, ts);
1678 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1679 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1680 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1681 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1684 int64_t start_pos, filesize;
1687 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1689 if(ts_min == AV_NOPTS_VALUE){
1690 pos_min = s->data_offset;
1691 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1692 if (ts_min == AV_NOPTS_VALUE)
1696 if(ts_min >= target_ts){
1701 if(ts_max == AV_NOPTS_VALUE){
1703 filesize = avio_size(s->pb);
1704 pos_max = filesize - 1;
1707 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1709 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1710 if (ts_max == AV_NOPTS_VALUE)
1714 int64_t tmp_pos= pos_max + 1;
1715 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1716 if(tmp_ts == AV_NOPTS_VALUE)
1720 if(tmp_pos >= filesize)
1726 if(ts_max <= target_ts){
1731 if(ts_min > ts_max){
1733 }else if(ts_min == ts_max){
1738 while (pos_min < pos_limit) {
1739 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1740 pos_min, pos_max, ts_min, ts_max);
1741 assert(pos_limit <= pos_max);
1744 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1745 // interpolate position (better than dichotomy)
1746 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1747 + pos_min - approximate_keyframe_distance;
1748 }else if(no_change==1){
1749 // bisection, if interpolation failed to change min or max pos last time
1750 pos = (pos_min + pos_limit)>>1;
1752 /* linear search if bisection failed, can only happen if there
1753 are very few or no keyframes between min/max */
1758 else if(pos > pos_limit)
1762 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1767 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1768 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1769 pos_limit, start_pos, no_change);
1770 if(ts == AV_NOPTS_VALUE){
1771 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1774 assert(ts != AV_NOPTS_VALUE);
1775 if (target_ts <= ts) {
1776 pos_limit = start_pos - 1;
1780 if (target_ts >= ts) {
1786 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1787 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1790 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1792 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1793 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1794 pos, ts_min, target_ts, ts_max);
1800 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1801 int64_t pos_min, pos_max;
1803 pos_min = s->data_offset;
1804 pos_max = avio_size(s->pb) - 1;
1806 if (pos < pos_min) pos= pos_min;
1807 else if(pos > pos_max) pos= pos_max;
1809 avio_seek(s->pb, pos, SEEK_SET);
1814 static int seek_frame_generic(AVFormatContext *s,
1815 int stream_index, int64_t timestamp, int flags)
1822 st = s->streams[stream_index];
1824 index = av_index_search_timestamp(st, timestamp, flags);
1826 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1829 if(index < 0 || index==st->nb_index_entries-1){
1833 if(st->nb_index_entries){
1834 assert(st->index_entries);
1835 ie= &st->index_entries[st->nb_index_entries-1];
1836 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1838 ff_update_cur_dts(s, st, ie->timestamp);
1840 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1846 read_status = av_read_frame(s, &pkt);
1847 } while (read_status == AVERROR(EAGAIN));
1848 if (read_status < 0)
1850 av_free_packet(&pkt);
1851 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1852 if(pkt.flags & AV_PKT_FLAG_KEY)
1854 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1855 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1860 index = av_index_search_timestamp(st, timestamp, flags);
1865 ff_read_frame_flush(s);
1866 AV_NOWARN_DEPRECATED(
1867 if (s->iformat->read_seek){
1868 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1872 ie = &st->index_entries[index];
1873 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1875 ff_update_cur_dts(s, st, ie->timestamp);
1880 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1881 int64_t timestamp, int flags)
1886 if (flags & AVSEEK_FLAG_BYTE) {
1887 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1889 ff_read_frame_flush(s);
1890 return seek_frame_byte(s, stream_index, timestamp, flags);
1893 if(stream_index < 0){
1894 stream_index= av_find_default_stream_index(s);
1895 if(stream_index < 0)
1898 st= s->streams[stream_index];
1899 /* timestamp for default must be expressed in AV_TIME_BASE units */
1900 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1903 /* first, we try the format specific seek */
1904 AV_NOWARN_DEPRECATED(
1905 if (s->iformat->read_seek) {
1906 ff_read_frame_flush(s);
1907 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1915 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1916 ff_read_frame_flush(s);
1917 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1918 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1919 ff_read_frame_flush(s);
1920 return seek_frame_generic(s, stream_index, timestamp, flags);
1926 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1928 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1931 queue_attached_pictures(s);
1936 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1938 if(min_ts > ts || max_ts < ts)
1941 if (s->iformat->read_seek2) {
1943 ff_read_frame_flush(s);
1944 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1947 queue_attached_pictures(s);
1951 if(s->iformat->read_timestamp){
1952 //try to seek via read_timestamp()
1955 //Fallback to old API if new is not implemented but old is
1956 //Note the old has somewat different sematics
1957 AV_NOWARN_DEPRECATED(
1958 if (s->iformat->read_seek || 1) {
1959 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
1960 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
1961 if (ret<0 && ts != min_ts && max_ts != ts) {
1962 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
1964 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
1970 // try some generic seek like seek_frame_generic() but with new ts semantics
1973 /*******************************************************/
1976 * Return TRUE if the stream has accurate duration in any stream.
1978 * @return TRUE if the stream has accurate duration for at least one component.
1980 static int has_duration(AVFormatContext *ic)
1985 for(i = 0;i < ic->nb_streams; i++) {
1986 st = ic->streams[i];
1987 if (st->duration != AV_NOPTS_VALUE)
1990 if (ic->duration != AV_NOPTS_VALUE)
1996 * Estimate the stream timings from the one of each components.
1998 * Also computes the global bitrate if possible.
2000 static void update_stream_timings(AVFormatContext *ic)
2002 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2003 int64_t duration, duration1, filesize;
2007 start_time = INT64_MAX;
2008 start_time_text = INT64_MAX;
2009 end_time = INT64_MIN;
2010 duration = INT64_MIN;
2011 for(i = 0;i < ic->nb_streams; i++) {
2012 st = ic->streams[i];
2013 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2014 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2015 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2016 if (start_time1 < start_time_text)
2017 start_time_text = start_time1;
2019 start_time = FFMIN(start_time, start_time1);
2020 if (st->duration != AV_NOPTS_VALUE) {
2021 end_time1 = start_time1
2022 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2023 end_time = FFMAX(end_time, end_time1);
2026 if (st->duration != AV_NOPTS_VALUE) {
2027 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2028 duration = FFMAX(duration, duration1);
2031 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2032 start_time = start_time_text;
2033 if (start_time != INT64_MAX) {
2034 ic->start_time = start_time;
2035 if (end_time != INT64_MIN)
2036 duration = FFMAX(duration, end_time - start_time);
2038 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2039 ic->duration = duration;
2041 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2042 /* compute the bitrate */
2043 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2044 (double)ic->duration;
2048 static void fill_all_stream_timings(AVFormatContext *ic)
2053 update_stream_timings(ic);
2054 for(i = 0;i < ic->nb_streams; i++) {
2055 st = ic->streams[i];
2056 if (st->start_time == AV_NOPTS_VALUE) {
2057 if(ic->start_time != AV_NOPTS_VALUE)
2058 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2059 if(ic->duration != AV_NOPTS_VALUE)
2060 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2065 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2067 int64_t filesize, duration;
2071 /* if bit_rate is already set, we believe it */
2072 if (ic->bit_rate <= 0) {
2074 for(i=0;i<ic->nb_streams;i++) {
2075 st = ic->streams[i];
2076 if (st->codec->bit_rate > 0)
2077 bit_rate += st->codec->bit_rate;
2079 ic->bit_rate = bit_rate;
2082 /* if duration is already set, we believe it */
2083 if (ic->duration == AV_NOPTS_VALUE &&
2084 ic->bit_rate != 0) {
2085 filesize = ic->pb ? avio_size(ic->pb) : 0;
2087 for(i = 0; i < ic->nb_streams; i++) {
2088 st = ic->streams[i];
2089 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2090 if (st->duration == AV_NOPTS_VALUE)
2091 st->duration = duration;
2097 #define DURATION_MAX_READ_SIZE 250000
2098 #define DURATION_MAX_RETRY 3
2100 /* only usable for MPEG-PS streams */
2101 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2103 AVPacket pkt1, *pkt = &pkt1;
2105 int read_size, i, ret;
2107 int64_t filesize, offset, duration;
2110 /* flush packet queue */
2111 flush_packet_queue(ic);
2113 for (i=0; i<ic->nb_streams; i++) {
2114 st = ic->streams[i];
2115 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2116 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2119 av_parser_close(st->parser);
2124 /* estimate the end time (duration) */
2125 /* XXX: may need to support wrapping */
2126 filesize = ic->pb ? avio_size(ic->pb) : 0;
2127 end_time = AV_NOPTS_VALUE;
2129 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2133 avio_seek(ic->pb, offset, SEEK_SET);
2136 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2140 ret = ff_read_packet(ic, pkt);
2141 } while(ret == AVERROR(EAGAIN));
2144 read_size += pkt->size;
2145 st = ic->streams[pkt->stream_index];
2146 if (pkt->pts != AV_NOPTS_VALUE &&
2147 (st->start_time != AV_NOPTS_VALUE ||
2148 st->first_dts != AV_NOPTS_VALUE)) {
2149 duration = end_time = pkt->pts;
2150 if (st->start_time != AV_NOPTS_VALUE)
2151 duration -= st->start_time;
2153 duration -= st->first_dts;
2155 duration += 1LL<<st->pts_wrap_bits;
2157 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2158 st->duration = duration;
2161 av_free_packet(pkt);
2163 }while( end_time==AV_NOPTS_VALUE
2164 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2165 && ++retry <= DURATION_MAX_RETRY);
2167 fill_all_stream_timings(ic);
2169 avio_seek(ic->pb, old_offset, SEEK_SET);
2170 for (i=0; i<ic->nb_streams; i++) {
2172 st->cur_dts= st->first_dts;
2173 st->last_IP_pts = AV_NOPTS_VALUE;
2174 st->reference_dts = AV_NOPTS_VALUE;
2178 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2182 /* get the file size, if possible */
2183 if (ic->iformat->flags & AVFMT_NOFILE) {
2186 file_size = avio_size(ic->pb);
2187 file_size = FFMAX(0, file_size);
2190 if ((!strcmp(ic->iformat->name, "mpeg") ||
2191 !strcmp(ic->iformat->name, "mpegts")) &&
2192 file_size && ic->pb->seekable) {
2193 /* get accurate estimate from the PTSes */
2194 estimate_timings_from_pts(ic, old_offset);
2195 } else if (has_duration(ic)) {
2196 /* at least one component has timings - we use them for all
2198 fill_all_stream_timings(ic);
2200 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2201 /* less precise: use bitrate info */
2202 estimate_timings_from_bit_rate(ic);
2204 update_stream_timings(ic);
2208 AVStream av_unused *st;
2209 for(i = 0;i < ic->nb_streams; i++) {
2210 st = ic->streams[i];
2211 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2212 (double) st->start_time / AV_TIME_BASE,
2213 (double) st->duration / AV_TIME_BASE);
2215 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2216 (double) ic->start_time / AV_TIME_BASE,
2217 (double) ic->duration / AV_TIME_BASE,
2218 ic->bit_rate / 1000);
2222 static int has_codec_parameters(AVStream *st)
2224 AVCodecContext *avctx = st->codec;
2226 switch (avctx->codec_type) {
2227 case AVMEDIA_TYPE_AUDIO:
2228 val = avctx->sample_rate && avctx->channels;
2229 if (!avctx->frame_size && determinable_frame_size(avctx))
2231 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2234 case AVMEDIA_TYPE_VIDEO:
2236 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2239 case AVMEDIA_TYPE_DATA:
2240 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2245 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2248 static int has_decode_delay_been_guessed(AVStream *st)
2250 return st->codec->codec_id != CODEC_ID_H264 ||
2251 st->info->nb_decoded_frames >= 6;
2254 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2255 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2258 int got_picture = 1, ret = 0;
2260 AVPacket pkt = *avpkt;
2262 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2263 AVDictionary *thread_opt = NULL;
2265 codec = st->codec->codec ? st->codec->codec :
2266 avcodec_find_decoder(st->codec->codec_id);
2269 st->info->found_decoder = -1;
2273 /* force thread count to 1 since the h264 decoder will not extract SPS
2274 * and PPS to extradata during multi-threaded decoding */
2275 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2276 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2278 av_dict_free(&thread_opt);
2280 st->info->found_decoder = -1;
2283 st->info->found_decoder = 1;
2284 } else if (!st->info->found_decoder)
2285 st->info->found_decoder = 1;
2287 if (st->info->found_decoder < 0)
2290 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2292 (!has_codec_parameters(st) ||
2293 !has_decode_delay_been_guessed(st) ||
2294 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2296 avcodec_get_frame_defaults(&picture);
2297 switch(st->codec->codec_type) {
2298 case AVMEDIA_TYPE_VIDEO:
2299 ret = avcodec_decode_video2(st->codec, &picture,
2300 &got_picture, &pkt);
2302 case AVMEDIA_TYPE_AUDIO:
2303 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2310 st->info->nb_decoded_frames++;
2316 if(!pkt.data && !got_picture)
2321 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2323 while (tags->id != CODEC_ID_NONE) {
2331 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2334 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2335 if(tag == tags[i].tag)
2338 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2339 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2342 return CODEC_ID_NONE;
2345 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2348 for(i=0; tags && tags[i]; i++){
2349 int tag= ff_codec_get_tag(tags[i], id);
2355 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2358 for(i=0; tags && tags[i]; i++){
2359 enum CodecID id= ff_codec_get_id(tags[i], tag);
2360 if(id!=CODEC_ID_NONE) return id;
2362 return CODEC_ID_NONE;
2365 static void compute_chapters_end(AVFormatContext *s)
2368 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2370 for (i = 0; i < s->nb_chapters; i++)
2371 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2372 AVChapter *ch = s->chapters[i];
2373 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2376 for (j = 0; j < s->nb_chapters; j++) {
2377 AVChapter *ch1 = s->chapters[j];
2378 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2379 if (j != i && next_start > ch->start && next_start < end)
2382 ch->end = (end == INT64_MAX) ? ch->start : end;
2386 static int get_std_framerate(int i){
2387 if(i<60*12) return i*1001;
2388 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2392 * Is the time base unreliable.
2393 * This is a heuristic to balance between quick acceptance of the values in
2394 * the headers vs. some extra checks.
2395 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2396 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2397 * And there are "variable" fps files this needs to detect as well.
2399 static int tb_unreliable(AVCodecContext *c){
2400 if( c->time_base.den >= 101L*c->time_base.num
2401 || c->time_base.den < 5L*c->time_base.num
2402 /* || c->codec_tag == AV_RL32("DIVX")
2403 || c->codec_tag == AV_RL32("XVID")*/
2404 || c->codec_id == CODEC_ID_MPEG2VIDEO
2405 || c->codec_id == CODEC_ID_H264
2411 #if FF_API_FORMAT_PARAMETERS
2412 int av_find_stream_info(AVFormatContext *ic)
2414 return avformat_find_stream_info(ic, NULL);
2418 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2420 int i, count, ret, read_size, j;
2422 AVPacket pkt1, *pkt;
2423 int64_t old_offset = avio_tell(ic->pb);
2424 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2425 int flush_codecs = 1;
2428 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2430 for(i=0;i<ic->nb_streams;i++) {
2432 AVDictionary *thread_opt = NULL;
2433 st = ic->streams[i];
2435 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2436 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2437 /* if(!st->time_base.num)
2439 if(!st->codec->time_base.num)
2440 st->codec->time_base= st->time_base;
2442 //only for the split stuff
2443 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2444 st->parser = av_parser_init(st->codec->codec_id);
2445 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2446 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2449 codec = st->codec->codec ? st->codec->codec :
2450 avcodec_find_decoder(st->codec->codec_id);
2452 /* force thread count to 1 since the h264 decoder will not extract SPS
2453 * and PPS to extradata during multi-threaded decoding */
2454 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2456 /* Ensure that subtitle_header is properly set. */
2457 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2458 && codec && !st->codec->codec)
2459 avcodec_open2(st->codec, codec, options ? &options[i]
2462 //try to just open decoders, in case this is enough to get parameters
2463 if (!has_codec_parameters(st)) {
2464 if (codec && !st->codec->codec)
2465 avcodec_open2(st->codec, codec, options ? &options[i]
2469 av_dict_free(&thread_opt);
2472 for (i=0; i<ic->nb_streams; i++) {
2473 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2479 if (ff_check_interrupt(&ic->interrupt_callback)){
2481 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2485 /* check if one codec still needs to be handled */
2486 for(i=0;i<ic->nb_streams;i++) {
2487 int fps_analyze_framecount = 20;
2489 st = ic->streams[i];
2490 if (!has_codec_parameters(st))
2492 /* if the timebase is coarse (like the usual millisecond precision
2493 of mkv), we need to analyze more frames to reliably arrive at
2495 if (av_q2d(st->time_base) > 0.0005)
2496 fps_analyze_framecount *= 2;
2497 if (ic->fps_probe_size >= 0)
2498 fps_analyze_framecount = ic->fps_probe_size;
2499 /* variable fps and no guess at the real fps */
2500 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2501 && st->info->duration_count < fps_analyze_framecount
2502 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2504 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2506 if (st->first_dts == AV_NOPTS_VALUE &&
2507 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2508 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2511 if (i == ic->nb_streams) {
2512 /* NOTE: if the format has no header, then we need to read
2513 some packets to get most of the streams, so we cannot
2515 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2516 /* if we found the info for all the codecs, we can stop */
2518 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2523 /* we did not get all the codec info, but we read too much data */
2524 if (read_size >= ic->probesize) {
2526 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2527 for (i = 0; i < ic->nb_streams; i++)
2528 if (!ic->streams[i]->r_frame_rate.num &&
2529 ic->streams[i]->info->duration_count <= 1)
2530 av_log(ic, AV_LOG_WARNING,
2531 "Stream #%d: not enough frames to estimate rate; "
2532 "consider increasing probesize\n", i);
2536 /* NOTE: a new stream can be added there if no header in file
2537 (AVFMTCTX_NOHEADER) */
2538 ret = read_frame_internal(ic, &pkt1);
2539 if (ret == AVERROR(EAGAIN))
2547 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2548 if ((ret = av_dup_packet(pkt)) < 0)
2549 goto find_stream_info_err;
2551 read_size += pkt->size;
2553 st = ic->streams[pkt->stream_index];
2554 if (st->codec_info_nb_frames>1) {
2556 if (st->time_base.den > 0)
2557 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2558 if (st->avg_frame_rate.num > 0)
2559 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, (AVRational){st->avg_frame_rate.den, st->avg_frame_rate.num}, AV_TIME_BASE_Q));
2561 if (t >= ic->max_analyze_duration) {
2562 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2565 st->info->codec_info_duration += pkt->duration;
2568 int64_t last = st->info->last_dts;
2570 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2571 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2572 int64_t duration= pkt->dts - last;
2574 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2575 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2576 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2577 int framerate= get_std_framerate(i);
2578 double sdts= dts*framerate/(1001*12);
2580 int ticks= lrintf(sdts+j*0.5);
2581 double error= sdts - ticks + j*0.5;
2582 st->info->duration_error[j][0][i] += error;
2583 st->info->duration_error[j][1][i] += error*error;
2586 st->info->duration_count++;
2587 // ignore the first 4 values, they might have some random jitter
2588 if (st->info->duration_count > 3)
2589 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2591 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2592 st->info->last_dts = pkt->dts;
2594 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2595 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2596 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2597 st->codec->extradata_size= i;
2598 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2599 if (!st->codec->extradata)
2600 return AVERROR(ENOMEM);
2601 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2602 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2606 /* if still no information, we try to open the codec and to
2607 decompress the frame. We try to avoid that in most cases as
2608 it takes longer and uses more memory. For MPEG-4, we need to
2609 decompress for QuickTime.
2611 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2612 least one frame of codec data, this makes sure the codec initializes
2613 the channel configuration and does not only trust the values from the container.
2615 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2617 st->codec_info_nb_frames++;
2622 AVPacket empty_pkt = { 0 };
2624 av_init_packet(&empty_pkt);
2626 ret = -1; /* we could not have all the codec parameters before EOF */
2627 for(i=0;i<ic->nb_streams;i++) {
2628 st = ic->streams[i];
2630 /* flush the decoders */
2631 if (st->info->found_decoder == 1) {
2633 err = try_decode_frame(st, &empty_pkt,
2634 (options && i < orig_nb_streams) ?
2635 &options[i] : NULL);
2636 } while (err > 0 && !has_codec_parameters(st));
2639 av_log(ic, AV_LOG_INFO,
2640 "decoding for stream %d failed\n", st->index);
2644 if (!has_codec_parameters(st)){
2646 avcodec_string(buf, sizeof(buf), st->codec, 0);
2647 av_log(ic, AV_LOG_WARNING,
2648 "Could not find codec parameters (%s)\n", buf);
2655 // close codecs which were opened in try_decode_frame()
2656 for(i=0;i<ic->nb_streams;i++) {
2657 st = ic->streams[i];
2658 avcodec_close(st->codec);
2660 for(i=0;i<ic->nb_streams;i++) {
2661 st = ic->streams[i];
2662 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2663 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2664 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2665 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2666 st->codec->codec_tag= tag;
2669 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2670 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2671 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2672 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2673 // the check for tb_unreliable() is not completely correct, since this is not about handling
2674 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2675 // ipmovie.c produces.
2676 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2677 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2678 if (st->info->duration_count && !st->r_frame_rate.num
2679 && tb_unreliable(st->codec) /*&&
2680 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2681 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2683 double best_error= 0.01;
2685 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2688 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2690 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2693 int n= st->info->duration_count;
2694 double a= st->info->duration_error[k][0][j] / n;
2695 double error= st->info->duration_error[k][1][j]/n - a*a;
2697 if(error < best_error && best_error> 0.000000001){
2699 num = get_std_framerate(j);
2702 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2705 // do not increase frame rate by more than 1 % in order to match a standard rate.
2706 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2707 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2710 if (!st->r_frame_rate.num){
2711 if( st->codec->time_base.den * (int64_t)st->time_base.num
2712 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2713 st->r_frame_rate.num = st->codec->time_base.den;
2714 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2716 st->r_frame_rate.num = st->time_base.den;
2717 st->r_frame_rate.den = st->time_base.num;
2720 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2721 if(!st->codec->bits_per_coded_sample)
2722 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2723 // set stream disposition based on audio service type
2724 switch (st->codec->audio_service_type) {
2725 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2726 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2727 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2728 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2729 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2730 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2731 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2732 st->disposition = AV_DISPOSITION_COMMENT; break;
2733 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2734 st->disposition = AV_DISPOSITION_KARAOKE; break;
2739 estimate_timings(ic, old_offset);
2741 compute_chapters_end(ic);
2743 find_stream_info_err:
2744 for (i=0; i < ic->nb_streams; i++) {
2745 if (ic->streams[i]->codec)
2746 ic->streams[i]->codec->thread_count = 0;
2747 av_freep(&ic->streams[i]->info);
2750 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2754 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2758 for (i = 0; i < ic->nb_programs; i++) {
2759 if (ic->programs[i] == last) {
2763 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2764 if (ic->programs[i]->stream_index[j] == s)
2765 return ic->programs[i];
2771 int av_find_best_stream(AVFormatContext *ic,
2772 enum AVMediaType type,
2773 int wanted_stream_nb,
2775 AVCodec **decoder_ret,
2778 int i, nb_streams = ic->nb_streams;
2779 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2780 unsigned *program = NULL;
2781 AVCodec *decoder = NULL, *best_decoder = NULL;
2783 if (related_stream >= 0 && wanted_stream_nb < 0) {
2784 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2786 program = p->stream_index;
2787 nb_streams = p->nb_stream_indexes;
2790 for (i = 0; i < nb_streams; i++) {
2791 int real_stream_index = program ? program[i] : i;
2792 AVStream *st = ic->streams[real_stream_index];
2793 AVCodecContext *avctx = st->codec;
2794 if (avctx->codec_type != type)
2796 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2798 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2801 decoder = avcodec_find_decoder(st->codec->codec_id);
2804 ret = AVERROR_DECODER_NOT_FOUND;
2808 if (best_count >= st->codec_info_nb_frames)
2810 best_count = st->codec_info_nb_frames;
2811 ret = real_stream_index;
2812 best_decoder = decoder;
2813 if (program && i == nb_streams - 1 && ret < 0) {
2815 nb_streams = ic->nb_streams;
2816 i = 0; /* no related stream found, try again with everything */
2820 *decoder_ret = best_decoder;
2824 /*******************************************************/
2826 int av_read_play(AVFormatContext *s)
2828 if (s->iformat->read_play)
2829 return s->iformat->read_play(s);
2831 return avio_pause(s->pb, 0);
2832 return AVERROR(ENOSYS);
2835 int av_read_pause(AVFormatContext *s)
2837 if (s->iformat->read_pause)
2838 return s->iformat->read_pause(s);
2840 return avio_pause(s->pb, 1);
2841 return AVERROR(ENOSYS);
2844 void avformat_free_context(AVFormatContext *s)
2850 if (s->iformat && s->iformat->priv_class && s->priv_data)
2851 av_opt_free(s->priv_data);
2853 for(i=0;i<s->nb_streams;i++) {
2854 /* free all data in a stream component */
2857 av_parser_close(st->parser);
2859 if (st->attached_pic.data)
2860 av_free_packet(&st->attached_pic);
2861 av_dict_free(&st->metadata);
2862 av_freep(&st->index_entries);
2863 av_freep(&st->codec->extradata);
2864 av_freep(&st->codec->subtitle_header);
2865 av_freep(&st->codec);
2866 av_freep(&st->priv_data);
2867 av_freep(&st->info);
2870 for(i=s->nb_programs-1; i>=0; i--) {
2871 av_dict_free(&s->programs[i]->metadata);
2872 av_freep(&s->programs[i]->stream_index);
2873 av_freep(&s->programs[i]);
2875 av_freep(&s->programs);
2876 av_freep(&s->priv_data);
2877 while(s->nb_chapters--) {
2878 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2879 av_freep(&s->chapters[s->nb_chapters]);
2881 av_freep(&s->chapters);
2882 av_dict_free(&s->metadata);
2883 av_freep(&s->streams);
2887 #if FF_API_CLOSE_INPUT_FILE
2888 void av_close_input_file(AVFormatContext *s)
2890 avformat_close_input(&s);
2894 void avformat_close_input(AVFormatContext **ps)
2896 AVFormatContext *s = *ps;
2897 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2899 flush_packet_queue(s);
2900 if (s->iformat && (s->iformat->read_close))
2901 s->iformat->read_close(s);
2902 avformat_free_context(s);
2908 #if FF_API_NEW_STREAM
2909 AVStream *av_new_stream(AVFormatContext *s, int id)
2911 AVStream *st = avformat_new_stream(s, NULL);
2918 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2924 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2926 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2929 s->streams = streams;
2931 st = av_mallocz(sizeof(AVStream));
2934 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2938 st->info->last_dts = AV_NOPTS_VALUE;
2940 st->codec = avcodec_alloc_context3(c);
2942 /* no default bitrate if decoding */
2943 st->codec->bit_rate = 0;
2945 st->index = s->nb_streams;
2946 st->start_time = AV_NOPTS_VALUE;
2947 st->duration = AV_NOPTS_VALUE;
2948 /* we set the current DTS to 0 so that formats without any timestamps
2949 but durations get some timestamps, formats with some unknown
2950 timestamps have their first few packets buffered and the
2951 timestamps corrected before they are returned to the user */
2952 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
2953 st->first_dts = AV_NOPTS_VALUE;
2954 st->probe_packets = MAX_PROBE_PACKETS;
2956 /* default pts setting is MPEG-like */
2957 avpriv_set_pts_info(st, 33, 1, 90000);
2958 st->last_IP_pts = AV_NOPTS_VALUE;
2959 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2960 st->pts_buffer[i]= AV_NOPTS_VALUE;
2961 st->reference_dts = AV_NOPTS_VALUE;
2963 st->sample_aspect_ratio = (AVRational){0,1};
2965 s->streams[s->nb_streams++] = st;
2969 AVProgram *av_new_program(AVFormatContext *ac, int id)
2971 AVProgram *program=NULL;
2974 av_dlog(ac, "new_program: id=0x%04x\n", id);
2976 for(i=0; i<ac->nb_programs; i++)
2977 if(ac->programs[i]->id == id)
2978 program = ac->programs[i];
2981 program = av_mallocz(sizeof(AVProgram));
2984 dynarray_add(&ac->programs, &ac->nb_programs, program);
2985 program->discard = AVDISCARD_NONE;
2992 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2994 AVChapter *chapter = NULL;
2997 for(i=0; i<s->nb_chapters; i++)
2998 if(s->chapters[i]->id == id)
2999 chapter = s->chapters[i];
3002 chapter= av_mallocz(sizeof(AVChapter));
3005 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3007 av_dict_set(&chapter->metadata, "title", title, 0);
3009 chapter->time_base= time_base;
3010 chapter->start = start;
3016 /************************************************************/
3017 /* output media file */
3019 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3020 const char *format, const char *filename)
3022 AVFormatContext *s = avformat_alloc_context();
3031 oformat = av_guess_format(format, NULL, NULL);
3033 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3034 ret = AVERROR(EINVAL);
3038 oformat = av_guess_format(NULL, filename, NULL);
3040 ret = AVERROR(EINVAL);
3041 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3048 s->oformat = oformat;
3049 if (s->oformat->priv_data_size > 0) {
3050 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3053 if (s->oformat->priv_class) {
3054 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3055 av_opt_set_defaults(s->priv_data);
3058 s->priv_data = NULL;
3061 av_strlcpy(s->filename, filename, sizeof(s->filename));
3065 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3066 ret = AVERROR(ENOMEM);
3068 avformat_free_context(s);
3072 #if FF_API_ALLOC_OUTPUT_CONTEXT
3073 AVFormatContext *avformat_alloc_output_context(const char *format,
3074 AVOutputFormat *oformat, const char *filename)
3076 AVFormatContext *avctx;
3077 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3078 return ret < 0 ? NULL : avctx;
3082 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3084 const AVCodecTag *avctag;
3086 enum CodecID id = CODEC_ID_NONE;
3087 unsigned int tag = 0;
3090 * Check that tag + id is in the table
3091 * If neither is in the table -> OK
3092 * If tag is in the table with another id -> FAIL
3093 * If id is in the table with another tag -> FAIL unless strict < normal
3095 for (n = 0; s->oformat->codec_tag[n]; n++) {
3096 avctag = s->oformat->codec_tag[n];
3097 while (avctag->id != CODEC_ID_NONE) {
3098 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3100 if (id == st->codec->codec_id)
3103 if (avctag->id == st->codec->codec_id)
3108 if (id != CODEC_ID_NONE)
3110 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3115 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3119 AVDictionary *tmp = NULL;
3122 av_dict_copy(&tmp, *options, 0);
3123 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3125 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3126 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3129 // some sanity checks
3130 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3131 av_log(s, AV_LOG_ERROR, "no streams\n");
3132 ret = AVERROR(EINVAL);
3136 for(i=0;i<s->nb_streams;i++) {
3139 switch (st->codec->codec_type) {
3140 case AVMEDIA_TYPE_AUDIO:
3141 if(st->codec->sample_rate<=0){
3142 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3143 ret = AVERROR(EINVAL);
3146 if(!st->codec->block_align)
3147 st->codec->block_align = st->codec->channels *
3148 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3150 case AVMEDIA_TYPE_VIDEO:
3151 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3152 av_log(s, AV_LOG_ERROR, "time base not set\n");
3153 ret = AVERROR(EINVAL);
3156 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3157 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3158 ret = AVERROR(EINVAL);
3161 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3162 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3164 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3165 "(%d/%d) and encoder layer (%d/%d)\n",
3166 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3167 st->codec->sample_aspect_ratio.num,
3168 st->codec->sample_aspect_ratio.den);
3169 ret = AVERROR(EINVAL);
3175 if(s->oformat->codec_tag){
3176 if( st->codec->codec_tag
3177 && st->codec->codec_id == CODEC_ID_RAWVIDEO
3178 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3179 && !validate_codec_tag(s, st)){
3180 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3181 st->codec->codec_tag= 0;
3183 if(st->codec->codec_tag){
3184 if (!validate_codec_tag(s, st)) {
3185 char tagbuf[32], cortag[32];
3186 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3187 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3188 av_log(s, AV_LOG_ERROR,
3189 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3190 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3191 ret = AVERROR_INVALIDDATA;
3195 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3198 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3199 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3200 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3203 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3204 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3205 if (!s->priv_data) {
3206 ret = AVERROR(ENOMEM);
3209 if (s->oformat->priv_class) {
3210 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3211 av_opt_set_defaults(s->priv_data);
3212 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3217 /* set muxer identification string */
3218 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3219 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3222 if(s->oformat->write_header){
3223 ret = s->oformat->write_header(s);
3228 /* init PTS generation */
3229 for(i=0;i<s->nb_streams;i++) {
3230 int64_t den = AV_NOPTS_VALUE;
3233 switch (st->codec->codec_type) {
3234 case AVMEDIA_TYPE_AUDIO:
3235 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3237 case AVMEDIA_TYPE_VIDEO:
3238 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3243 if (den != AV_NOPTS_VALUE) {
3245 ret = AVERROR_INVALIDDATA;
3248 frac_init(&st->pts, 0, 0, den);
3253 av_dict_free(options);
3262 //FIXME merge with compute_pkt_fields
3263 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3264 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3265 int num, den, frame_size, i;
3267 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3268 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3270 /* duration field */
3271 if (pkt->duration == 0) {
3272 compute_frame_duration(&num, &den, st, NULL, pkt);
3274 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3278 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3281 //XXX/FIXME this is a temporary hack until all encoders output pts
3282 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3285 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3289 // pkt->pts= st->cur_dts;
3290 pkt->pts= st->pts.val;
3293 //calculate dts from pts
3294 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3295 st->pts_buffer[0]= pkt->pts;
3296 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3297 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3298 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3299 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3301 pkt->dts= st->pts_buffer[0];
3304 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){
3305 av_log(s, AV_LOG_ERROR,
3306 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3307 st->index, st->cur_dts, pkt->dts);
3308 return AVERROR(EINVAL);
3310 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3311 av_log(s, AV_LOG_ERROR, "pts (%"PRId64") < dts (%"PRId64") in stream %d\n", pkt->pts, pkt->dts, st->index);
3312 return AVERROR(EINVAL);
3315 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3316 st->cur_dts= pkt->dts;
3317 st->pts.val= pkt->dts;
3320 switch (st->codec->codec_type) {
3321 case AVMEDIA_TYPE_AUDIO:
3322 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3324 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3325 likely equal to the encoder delay, but it would be better if we
3326 had the real timestamps from the encoder */
3327 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3328 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3331 case AVMEDIA_TYPE_VIDEO:
3332 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3340 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3345 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3346 return s->oformat->write_packet(s, pkt);
3350 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3352 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3355 ret= s->oformat->write_packet(s, pkt);
3358 s->streams[pkt->stream_index]->nb_frames++;
3362 #define CHUNK_START 0x1000
3364 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3365 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3367 AVPacketList **next_point, *this_pktl;
3368 AVStream *st= s->streams[pkt->stream_index];
3369 int chunked= s->max_chunk_size || s->max_chunk_duration;
3371 this_pktl = av_mallocz(sizeof(AVPacketList));
3373 return AVERROR(ENOMEM);
3374 this_pktl->pkt= *pkt;
3375 pkt->destruct= NULL; // do not free original but only the copy
3376 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3378 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3379 next_point = &(st->last_in_packet_buffer->next);
3381 next_point = &s->packet_buffer;
3386 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3387 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3388 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3389 st->interleaver_chunk_size += pkt->size;
3390 st->interleaver_chunk_duration += pkt->duration;
3393 st->interleaver_chunk_size =
3394 st->interleaver_chunk_duration = 0;
3395 this_pktl->pkt.flags |= CHUNK_START;
3399 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3401 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3402 || !compare(s, &(*next_point)->pkt, pkt))){
3403 next_point= &(*next_point)->next;
3408 next_point = &(s->packet_buffer_end->next);
3411 assert(!*next_point);
3413 s->packet_buffer_end= this_pktl;
3416 this_pktl->next= *next_point;
3418 s->streams[pkt->stream_index]->last_in_packet_buffer=
3419 *next_point= this_pktl;
3423 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3425 AVStream *st = s->streams[ pkt ->stream_index];
3426 AVStream *st2= s->streams[ next->stream_index];
3427 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3429 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3430 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3431 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3433 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3434 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3437 comp= (ts>ts2) - (ts<ts2);
3441 return pkt->stream_index < next->stream_index;
3445 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3446 AVPacket *pkt, int flush)
3449 int stream_count=0, noninterleaved_count=0;
3450 int64_t delta_dts_max = 0;
3454 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3459 for(i=0; i < s->nb_streams; i++) {
3460 if (s->streams[i]->last_in_packet_buffer) {
3462 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3463 ++noninterleaved_count;
3467 if (s->nb_streams == stream_count) {
3470 for(i=0; i < s->nb_streams; i++) {
3471 if (s->streams[i]->last_in_packet_buffer) {
3473 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3474 s->streams[i]->time_base,
3476 av_rescale_q(s->packet_buffer->pkt.dts,
3477 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3479 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3482 if(s->nb_streams == stream_count+noninterleaved_count &&
3483 delta_dts_max > 20*AV_TIME_BASE) {
3484 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3488 if(stream_count && flush){
3489 pktl= s->packet_buffer;
3492 s->packet_buffer= pktl->next;
3493 if(!s->packet_buffer)
3494 s->packet_buffer_end= NULL;
3496 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3497 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3501 av_init_packet(out);
3506 #if FF_API_INTERLEAVE_PACKET
3507 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3508 AVPacket *pkt, int flush)
3510 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3515 * Interleave an AVPacket correctly so it can be muxed.
3516 * @param out the interleaved packet will be output here
3517 * @param in the input packet
3518 * @param flush 1 if no further packets are available as input and all
3519 * remaining packets should be output
3520 * @return 1 if a packet was output, 0 if no packet could be output,
3521 * < 0 if an error occurred
3523 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3524 if (s->oformat->interleave_packet) {
3525 int ret = s->oformat->interleave_packet(s, out, in, flush);
3530 return ff_interleave_packet_per_dts(s, out, in, flush);
3533 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3537 AVStream *st= s->streams[ pkt->stream_index];
3539 //FIXME/XXX/HACK drop zero sized packets
3540 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3543 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3544 pkt->size, pkt->dts, pkt->pts);
3545 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3548 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3549 return AVERROR(EINVAL);
3551 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3557 int ret= interleave_packet(s, &opkt, pkt, flush);
3558 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3561 ret= s->oformat->write_packet(s, &opkt);
3563 s->streams[opkt.stream_index]->nb_frames++;
3565 av_free_packet(&opkt);
3570 if(s->pb && s->pb->error)
3571 return s->pb->error;
3575 int av_write_trailer(AVFormatContext *s)
3581 ret= interleave_packet(s, &pkt, NULL, 1);
3582 if(ret<0) //FIXME cleanup needed for ret<0 ?
3587 ret= s->oformat->write_packet(s, &pkt);
3589 s->streams[pkt.stream_index]->nb_frames++;
3591 av_free_packet(&pkt);
3595 if(s->pb && s->pb->error)
3599 if(s->oformat->write_trailer)
3600 ret = s->oformat->write_trailer(s);
3605 ret = s->pb ? s->pb->error : 0;
3606 for(i=0;i<s->nb_streams;i++) {
3607 av_freep(&s->streams[i]->priv_data);
3608 av_freep(&s->streams[i]->index_entries);
3610 if (s->oformat->priv_class)
3611 av_opt_free(s->priv_data);
3612 av_freep(&s->priv_data);
3616 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3617 int64_t *dts, int64_t *wall)
3619 if (!s->oformat || !s->oformat->get_output_timestamp)
3620 return AVERROR(ENOSYS);
3621 s->oformat->get_output_timestamp(s, stream, dts, wall);
3625 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3628 AVProgram *program=NULL;
3631 if (idx >= ac->nb_streams) {
3632 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3636 for(i=0; i<ac->nb_programs; i++){
3637 if(ac->programs[i]->id != progid)
3639 program = ac->programs[i];
3640 for(j=0; j<program->nb_stream_indexes; j++)
3641 if(program->stream_index[j] == idx)
3644 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3647 program->stream_index = tmp;
3648 program->stream_index[program->nb_stream_indexes++] = idx;
3653 static void print_fps(double d, const char *postfix){
3654 uint64_t v= lrintf(d*100);
3655 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3656 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3657 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3660 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3662 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3663 AVDictionaryEntry *tag=NULL;
3665 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3666 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3667 if(strcmp("language", tag->key)){
3668 const char *p = tag->value;
3669 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3672 size_t len = strcspn(p, "\xd\xa");
3673 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3674 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3676 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3677 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3680 av_log(ctx, AV_LOG_INFO, "\n");
3686 /* "user interface" functions */
3687 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3690 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3691 AVStream *st = ic->streams[i];
3692 int g = av_gcd(st->time_base.num, st->time_base.den);
3693 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3694 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3695 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3696 /* the pid is an important information, so we display it */
3697 /* XXX: add a generic system */
3698 if (flags & AVFMT_SHOW_IDS)
3699 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3701 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3702 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3703 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3704 if (st->sample_aspect_ratio.num && // default
3705 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3706 AVRational display_aspect_ratio;
3707 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3708 st->codec->width*st->sample_aspect_ratio.num,
3709 st->codec->height*st->sample_aspect_ratio.den,
3711 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3712 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3713 display_aspect_ratio.num, display_aspect_ratio.den);
3715 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3716 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3717 print_fps(av_q2d(st->avg_frame_rate), "fps");
3718 if(st->r_frame_rate.den && st->r_frame_rate.num)
3719 print_fps(av_q2d(st->r_frame_rate), "tbr");
3720 if(st->time_base.den && st->time_base.num)
3721 print_fps(1/av_q2d(st->time_base), "tbn");
3722 if(st->codec->time_base.den && st->codec->time_base.num)
3723 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3725 if (st->disposition & AV_DISPOSITION_DEFAULT)
3726 av_log(NULL, AV_LOG_INFO, " (default)");
3727 if (st->disposition & AV_DISPOSITION_DUB)
3728 av_log(NULL, AV_LOG_INFO, " (dub)");
3729 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3730 av_log(NULL, AV_LOG_INFO, " (original)");
3731 if (st->disposition & AV_DISPOSITION_COMMENT)
3732 av_log(NULL, AV_LOG_INFO, " (comment)");
3733 if (st->disposition & AV_DISPOSITION_LYRICS)
3734 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3735 if (st->disposition & AV_DISPOSITION_KARAOKE)
3736 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3737 if (st->disposition & AV_DISPOSITION_FORCED)
3738 av_log(NULL, AV_LOG_INFO, " (forced)");
3739 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3740 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3741 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3742 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3743 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3744 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3745 av_log(NULL, AV_LOG_INFO, "\n");
3746 dump_metadata(NULL, st->metadata, " ");
3749 void av_dump_format(AVFormatContext *ic,
3755 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3756 if (ic->nb_streams && !printed)
3759 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3760 is_output ? "Output" : "Input",
3762 is_output ? ic->oformat->name : ic->iformat->name,
3763 is_output ? "to" : "from", url);
3764 dump_metadata(NULL, ic->metadata, " ");
3766 av_log(NULL, AV_LOG_INFO, " Duration: ");
3767 if (ic->duration != AV_NOPTS_VALUE) {
3768 int hours, mins, secs, us;
3769 secs = ic->duration / AV_TIME_BASE;
3770 us = ic->duration % AV_TIME_BASE;
3775 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3776 (100 * us) / AV_TIME_BASE);
3778 av_log(NULL, AV_LOG_INFO, "N/A");
3780 if (ic->start_time != AV_NOPTS_VALUE) {
3782 av_log(NULL, AV_LOG_INFO, ", start: ");
3783 secs = ic->start_time / AV_TIME_BASE;
3784 us = abs(ic->start_time % AV_TIME_BASE);
3785 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3786 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3788 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3790 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3792 av_log(NULL, AV_LOG_INFO, "N/A");
3794 av_log(NULL, AV_LOG_INFO, "\n");
3796 for (i = 0; i < ic->nb_chapters; i++) {
3797 AVChapter *ch = ic->chapters[i];
3798 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3799 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3800 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3802 dump_metadata(NULL, ch->metadata, " ");
3804 if(ic->nb_programs) {
3805 int j, k, total = 0;
3806 for(j=0; j<ic->nb_programs; j++) {
3807 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3809 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3810 name ? name->value : "");
3811 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3812 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3813 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3814 printed[ic->programs[j]->stream_index[k]] = 1;
3816 total += ic->programs[j]->nb_stream_indexes;
3818 if (total < ic->nb_streams)
3819 av_log(NULL, AV_LOG_INFO, " No Program\n");
3821 for(i=0;i<ic->nb_streams;i++)
3823 dump_stream_format(ic, i, index, is_output);
3828 int64_t av_gettime(void)
3831 gettimeofday(&tv,NULL);
3832 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3835 uint64_t ff_ntp_time(void)
3837 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3840 int av_get_frame_filename(char *buf, int buf_size,
3841 const char *path, int number)
3844 char *q, buf1[20], c;
3845 int nd, len, percentd_found;
3857 while (isdigit(*p)) {
3858 nd = nd * 10 + *p++ - '0';
3861 } while (isdigit(c));
3870 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3872 if ((q - buf + len) > buf_size - 1)
3874 memcpy(q, buf1, len);
3882 if ((q - buf) < buf_size - 1)
3886 if (!percentd_found)
3895 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3899 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3901 for(i=0;i<size;i+=16) {
3908 PRINT(" %02x", buf[i+j]);
3913 for(j=0;j<len;j++) {
3915 if (c < ' ' || c > '~')
3924 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3926 hex_dump_internal(NULL, f, 0, buf, size);
3929 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3931 hex_dump_internal(avcl, NULL, level, buf, size);
3934 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3937 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3938 PRINT("stream #%d:\n", pkt->stream_index);
3939 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3940 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3941 /* DTS is _always_ valid after av_read_frame() */
3943 if (pkt->dts == AV_NOPTS_VALUE)
3946 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3947 /* PTS may not be known if B-frames are present. */
3949 if (pkt->pts == AV_NOPTS_VALUE)
3952 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3954 PRINT(" size=%d\n", pkt->size);
3957 av_hex_dump(f, pkt->data, pkt->size);
3961 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3963 AVRational tb = { 1, AV_TIME_BASE };
3964 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3968 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3970 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3974 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3976 AVRational tb = { 1, AV_TIME_BASE };
3977 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3981 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3984 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3987 void av_url_split(char *proto, int proto_size,
3988 char *authorization, int authorization_size,
3989 char *hostname, int hostname_size,
3991 char *path, int path_size,
3994 const char *p, *ls, *at, *col, *brk;
3996 if (port_ptr) *port_ptr = -1;
3997 if (proto_size > 0) proto[0] = 0;
3998 if (authorization_size > 0) authorization[0] = 0;
3999 if (hostname_size > 0) hostname[0] = 0;
4000 if (path_size > 0) path[0] = 0;
4002 /* parse protocol */
4003 if ((p = strchr(url, ':'))) {
4004 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4009 /* no protocol means plain filename */
4010 av_strlcpy(path, url, path_size);
4014 /* separate path from hostname */
4015 ls = strchr(p, '/');
4017 ls = strchr(p, '?');
4019 av_strlcpy(path, ls, path_size);
4021 ls = &p[strlen(p)]; // XXX
4023 /* the rest is hostname, use that to parse auth/port */
4025 /* authorization (user[:pass]@hostname) */
4026 if ((at = strchr(p, '@')) && at < ls) {
4027 av_strlcpy(authorization, p,
4028 FFMIN(authorization_size, at + 1 - p));
4029 p = at + 1; /* skip '@' */
4032 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4034 av_strlcpy(hostname, p + 1,
4035 FFMIN(hostname_size, brk - p));
4036 if (brk[1] == ':' && port_ptr)
4037 *port_ptr = atoi(brk + 2);
4038 } else if ((col = strchr(p, ':')) && col < ls) {
4039 av_strlcpy(hostname, p,
4040 FFMIN(col + 1 - p, hostname_size));
4041 if (port_ptr) *port_ptr = atoi(col + 1);
4043 av_strlcpy(hostname, p,
4044 FFMIN(ls + 1 - p, hostname_size));
4048 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4051 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4054 'C', 'D', 'E', 'F' };
4055 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4058 'c', 'd', 'e', 'f' };
4059 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4061 for(i = 0; i < s; i++) {
4062 buff[i * 2] = hex_table[src[i] >> 4];
4063 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4069 int ff_hex_to_data(uint8_t *data, const char *p)
4076 p += strspn(p, SPACE_CHARS);
4079 c = toupper((unsigned char) *p++);
4080 if (c >= '0' && c <= '9')
4082 else if (c >= 'A' && c <= 'F')
4097 #if FF_API_SET_PTS_INFO
4098 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4099 unsigned int pts_num, unsigned int pts_den)
4101 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4105 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4106 unsigned int pts_num, unsigned int pts_den)
4109 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4110 if(new_tb.num != pts_num)
4111 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4113 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4115 if(new_tb.num <= 0 || new_tb.den <= 0) {
4116 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
4119 s->time_base = new_tb;
4120 s->pts_wrap_bits = pts_wrap_bits;
4123 int ff_url_join(char *str, int size, const char *proto,
4124 const char *authorization, const char *hostname,
4125 int port, const char *fmt, ...)
4128 struct addrinfo hints = { 0 }, *ai;
4133 av_strlcatf(str, size, "%s://", proto);
4134 if (authorization && authorization[0])
4135 av_strlcatf(str, size, "%s@", authorization);
4136 #if CONFIG_NETWORK && defined(AF_INET6)
4137 /* Determine if hostname is a numerical IPv6 address,
4138 * properly escape it within [] in that case. */
4139 hints.ai_flags = AI_NUMERICHOST;
4140 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4141 if (ai->ai_family == AF_INET6) {
4142 av_strlcat(str, "[", size);
4143 av_strlcat(str, hostname, size);
4144 av_strlcat(str, "]", size);
4146 av_strlcat(str, hostname, size);
4151 /* Not an IPv6 address, just output the plain string. */
4152 av_strlcat(str, hostname, size);
4155 av_strlcatf(str, size, ":%d", port);
4158 int len = strlen(str);
4161 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4167 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4168 AVFormatContext *src)
4173 local_pkt.stream_index = dst_stream;
4174 if (pkt->pts != AV_NOPTS_VALUE)
4175 local_pkt.pts = av_rescale_q(pkt->pts,
4176 src->streams[pkt->stream_index]->time_base,
4177 dst->streams[dst_stream]->time_base);
4178 if (pkt->dts != AV_NOPTS_VALUE)
4179 local_pkt.dts = av_rescale_q(pkt->dts,
4180 src->streams[pkt->stream_index]->time_base,
4181 dst->streams[dst_stream]->time_base);
4182 return av_write_frame(dst, &local_pkt);
4185 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4188 const char *ptr = str;
4190 /* Parse key=value pairs. */
4193 char *dest = NULL, *dest_end;
4194 int key_len, dest_len = 0;
4196 /* Skip whitespace and potential commas. */
4197 while (*ptr && (isspace(*ptr) || *ptr == ','))
4204 if (!(ptr = strchr(key, '=')))
4207 key_len = ptr - key;
4209 callback_get_buf(context, key, key_len, &dest, &dest_len);
4210 dest_end = dest + dest_len - 1;
4214 while (*ptr && *ptr != '\"') {
4218 if (dest && dest < dest_end)
4222 if (dest && dest < dest_end)
4230 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4231 if (dest && dest < dest_end)
4239 int ff_find_stream_index(AVFormatContext *s, int id)
4242 for (i = 0; i < s->nb_streams; i++) {
4243 if (s->streams[i]->id == id)
4249 void ff_make_absolute_url(char *buf, int size, const char *base,
4253 /* Absolute path, relative to the current server */
4254 if (base && strstr(base, "://") && rel[0] == '/') {
4256 av_strlcpy(buf, base, size);
4257 sep = strstr(buf, "://");
4260 sep = strchr(sep, '/');
4264 av_strlcat(buf, rel, size);
4267 /* If rel actually is an absolute url, just copy it */
4268 if (!base || strstr(rel, "://") || rel[0] == '/') {
4269 av_strlcpy(buf, rel, size);
4273 av_strlcpy(buf, base, size);
4274 /* Remove the file name from the base url */
4275 sep = strrchr(buf, '/');
4280 while (av_strstart(rel, "../", NULL) && sep) {
4281 /* Remove the path delimiter at the end */
4283 sep = strrchr(buf, '/');
4284 /* If the next directory name to pop off is "..", break here */
4285 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4286 /* Readd the slash we just removed */
4287 av_strlcat(buf, "/", size);
4290 /* Cut off the directory name */
4297 av_strlcat(buf, rel, size);
4300 int64_t ff_iso8601_to_unix_time(const char *datestr)
4303 struct tm time1 = {0}, time2 = {0};
4305 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4306 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4308 return av_timegm(&time2);
4310 return av_timegm(&time1);
4312 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4313 "the date string.\n");
4318 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4321 if (ofmt->query_codec)
4322 return ofmt->query_codec(codec_id, std_compliance);
4323 else if (ofmt->codec_tag)
4324 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4325 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4326 codec_id == ofmt->subtitle_codec)
4329 return AVERROR_PATCHWELCOME;
4332 int avformat_network_init(void)
4336 ff_network_inited_globally = 1;
4337 if ((ret = ff_network_init()) < 0)
4344 int avformat_network_deinit(void)
4353 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4354 uint64_t channel_layout, int32_t sample_rate,
4355 int32_t width, int32_t height)
4361 return AVERROR(EINVAL);
4364 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4366 if (channel_layout) {
4368 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4372 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4374 if (width || height) {
4376 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4378 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4380 return AVERROR(ENOMEM);
4381 bytestream_put_le32(&data, flags);
4383 bytestream_put_le32(&data, channels);
4385 bytestream_put_le64(&data, channel_layout);
4387 bytestream_put_le32(&data, sample_rate);
4388 if (width || height) {
4389 bytestream_put_le32(&data, width);
4390 bytestream_put_le32(&data, height);
4395 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4397 return ff_codec_bmp_tags;
4399 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4401 return ff_codec_wav_tags;