2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avstring.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/parseutils.h"
40 #include "audiointerleave.h"
54 * various utility functions for use within FFmpeg
57 unsigned avformat_version(void)
59 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return FFMPEG_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* fraction handling */
77 * f = val + (num / den) + 0.5.
79 * 'num' is normalized so that it is such as 0 <= num < den.
81 * @param f fractional number
82 * @param val integer value
83 * @param num must be >= 0
84 * @param den must be >= 1
86 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
99 * Fractional addition to f: f = f + (incr / f->den).
101 * @param f fractional number
102 * @param incr increment, can be positive or negative
104 static void frac_add(AVFrac *f, int64_t incr)
117 } else if (num >= den) {
124 /** head of registered input format linked list */
125 static AVInputFormat *first_iformat = NULL;
126 /** head of registered output format linked list */
127 static AVOutputFormat *first_oformat = NULL;
129 AVInputFormat *av_iformat_next(AVInputFormat *f)
131 if(f) return f->next;
132 else return first_iformat;
135 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
137 if(f) return f->next;
138 else return first_oformat;
141 void av_register_input_format(AVInputFormat *format)
145 while (*p != NULL) p = &(*p)->next;
150 void av_register_output_format(AVOutputFormat *format)
154 while (*p != NULL) p = &(*p)->next;
159 int av_match_ext(const char *filename, const char *extensions)
167 ext = strrchr(filename, '.');
173 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
176 if (!av_strcasecmp(ext1, ext))
186 static int match_format(const char *name, const char *names)
194 namelen = strlen(name);
195 while ((p = strchr(names, ','))) {
196 len = FFMAX(p - names, namelen);
197 if (!av_strncasecmp(name, names, len))
201 return !av_strcasecmp(name, names);
204 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
205 const char *mime_type)
207 AVOutputFormat *fmt = NULL, *fmt_found;
208 int score_max, score;
210 /* specific test for image sequences */
211 #if CONFIG_IMAGE2_MUXER
212 if (!short_name && filename &&
213 av_filename_number_test(filename) &&
214 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
215 return av_guess_format("image2", NULL, NULL);
218 /* Find the proper file type. */
221 while ((fmt = av_oformat_next(fmt))) {
223 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
225 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
227 if (filename && fmt->extensions &&
228 av_match_ext(filename, fmt->extensions)) {
231 if (score > score_max) {
239 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
240 const char *filename, const char *mime_type, enum AVMediaType type){
241 if(type == AVMEDIA_TYPE_VIDEO){
242 enum CodecID codec_id= CODEC_ID_NONE;
244 #if CONFIG_IMAGE2_MUXER
245 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
246 codec_id= ff_guess_image2_codec(filename);
249 if(codec_id == CODEC_ID_NONE)
250 codec_id= fmt->video_codec;
252 }else if(type == AVMEDIA_TYPE_AUDIO)
253 return fmt->audio_codec;
254 else if (type == AVMEDIA_TYPE_SUBTITLE)
255 return fmt->subtitle_codec;
257 return CODEC_ID_NONE;
260 AVInputFormat *av_find_input_format(const char *short_name)
262 AVInputFormat *fmt = NULL;
263 while ((fmt = av_iformat_next(fmt))) {
264 if (match_format(short_name, fmt->name))
270 int ffio_limit(AVIOContext *s, int size)
273 int64_t remaining= s->maxsize - avio_tell(s);
274 if(remaining < size){
275 int64_t newsize= avio_size(s);
276 if(!s->maxsize || s->maxsize<newsize)
278 remaining= s->maxsize - avio_tell(s);
281 if(s->maxsize>=0 && remaining>=0 && remaining+1 < size){
282 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
289 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
292 size= ffio_limit(s, size);
294 ret= av_new_packet(pkt, size);
299 pkt->pos= avio_tell(s);
301 ret= avio_read(s, pkt->data, size);
305 av_shrink_packet(pkt, ret);
310 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
315 return av_get_packet(s, pkt, size);
316 old_size = pkt->size;
317 ret = av_grow_packet(pkt, size);
320 ret = avio_read(s, pkt->data + old_size, size);
321 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
326 int av_filename_number_test(const char *filename)
329 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
332 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
334 AVProbeData lpd = *pd;
335 AVInputFormat *fmt1 = NULL, *fmt;
336 int score, nodat = 0, score_max=0;
338 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
339 int id3len = ff_id3v2_tag_len(lpd.buf);
340 if (lpd.buf_size > id3len + 16) {
342 lpd.buf_size -= id3len;
348 while ((fmt1 = av_iformat_next(fmt1))) {
349 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
352 if (fmt1->read_probe) {
353 score = fmt1->read_probe(&lpd);
354 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
355 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
356 } else if (fmt1->extensions) {
357 if (av_match_ext(lpd.filename, fmt1->extensions)) {
361 if (score > score_max) {
364 }else if (score == score_max)
367 *score_ret= score_max;
372 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
375 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
376 if(score_ret > *score_max){
377 *score_max= score_ret;
383 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
385 return av_probe_input_format2(pd, is_opened, &score);
388 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
390 static const struct {
391 const char *name; enum CodecID id; enum AVMediaType type;
393 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
394 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
395 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
396 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
397 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
398 { "loas" , CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
399 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
400 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
401 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
405 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
409 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
410 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
411 for (i = 0; fmt_id_type[i].name; i++) {
412 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
413 st->codec->codec_id = fmt_id_type[i].id;
414 st->codec->codec_type = fmt_id_type[i].type;
422 /************************************************************/
423 /* input media file */
425 #if FF_API_FORMAT_PARAMETERS
426 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
429 AVDictionary *opts = NULL;
434 AV_NOWARN_DEPRECATED(
435 if (ap->time_base.num) {
436 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
437 av_dict_set(&opts, "framerate", buf, 0);
439 if (ap->sample_rate) {
440 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
441 av_dict_set(&opts, "sample_rate", buf, 0);
444 snprintf(buf, sizeof(buf), "%d", ap->channels);
445 av_dict_set(&opts, "channels", buf, 0);
447 if (ap->width || ap->height) {
448 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
449 av_dict_set(&opts, "video_size", buf, 0);
451 if (ap->pix_fmt != PIX_FMT_NONE) {
452 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
455 snprintf(buf, sizeof(buf), "%d", ap->channel);
456 av_dict_set(&opts, "channel", buf, 0);
459 av_dict_set(&opts, "standard", ap->standard, 0);
461 if (ap->mpeg2ts_compute_pcr) {
462 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
464 if (ap->initial_pause) {
465 av_dict_set(&opts, "initial_pause", "1", 0);
472 * Open a media file from an IO stream. 'fmt' must be specified.
474 int av_open_input_stream(AVFormatContext **ic_ptr,
475 AVIOContext *pb, const char *filename,
476 AVInputFormat *fmt, AVFormatParameters *ap)
481 AVFormatParameters default_ap;
485 memset(ap, 0, sizeof(default_ap));
487 opts = convert_format_parameters(ap);
489 AV_NOWARN_DEPRECATED(
490 if(!ap->prealloced_context)
491 *ic_ptr = ic = avformat_alloc_context();
496 err = AVERROR(ENOMEM);
499 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
500 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
501 "will be ignored with AVFMT_NOFILE format.\n");
505 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
507 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
516 int av_demuxer_open(AVFormatContext *ic, AVFormatParameters *ap){
519 if (ic->iformat->read_header) {
520 err = ic->iformat->read_header(ic, ap);
525 if (ic->pb && !ic->data_offset)
526 ic->data_offset = avio_tell(ic->pb);
532 /** size of probe buffer, for guessing file type from file contents */
533 #define PROBE_BUF_MIN 2048
534 #define PROBE_BUF_MAX (1<<20)
536 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
537 const char *filename, void *logctx,
538 unsigned int offset, unsigned int max_probe_size)
540 AVProbeData pd = { filename ? filename : "", NULL, -offset };
541 unsigned char *buf = NULL;
542 int ret = 0, probe_size;
544 if (!max_probe_size) {
545 max_probe_size = PROBE_BUF_MAX;
546 } else if (max_probe_size > PROBE_BUF_MAX) {
547 max_probe_size = PROBE_BUF_MAX;
548 } else if (max_probe_size < PROBE_BUF_MIN) {
549 return AVERROR(EINVAL);
552 if (offset >= max_probe_size) {
553 return AVERROR(EINVAL);
556 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
557 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
558 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
559 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
562 if (probe_size < offset) {
566 /* read probe data */
567 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
570 return AVERROR(ENOMEM);
573 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
574 /* fail if error was not end of file, otherwise, lower score */
575 if (ret != AVERROR_EOF) {
580 ret = 0; /* error was end of file, nothing read */
583 pd.buf = &buf[offset];
585 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
587 /* guess file format */
588 *fmt = av_probe_input_format2(&pd, 1, &score);
590 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
591 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
593 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
599 return AVERROR_INVALIDDATA;
602 /* rewind. reuse probe buffer to avoid seeking */
603 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
609 #if FF_API_FORMAT_PARAMETERS
610 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
613 AVFormatParameters *ap)
616 AVDictionary *opts = convert_format_parameters(ap);
618 AV_NOWARN_DEPRECATED(
619 if (!ap || !ap->prealloced_context)
623 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
630 /* open input file and probe the format if necessary */
631 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
634 AVProbeData pd = {filename, NULL, 0};
636 if(s->iformat && !strlen(filename))
640 s->flags |= AVFMT_FLAG_CUSTOM_IO;
642 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
643 else if (s->iformat->flags & AVFMT_NOFILE)
644 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
645 "will be ignored with AVFMT_NOFILE format.\n");
649 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
650 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
653 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
654 &s->interrupt_callback, options)) < 0)
658 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
661 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
663 AVFormatContext *s = *ps;
665 AVFormatParameters ap = { { 0 } };
666 AVDictionary *tmp = NULL;
668 if (!s && !(s = avformat_alloc_context()))
669 return AVERROR(ENOMEM);
674 av_dict_copy(&tmp, *options, 0);
676 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
679 if ((ret = init_input(s, filename, &tmp)) < 0)
682 /* check filename in case an image number is expected */
683 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
684 if (!av_filename_number_test(filename)) {
685 ret = AVERROR(EINVAL);
690 s->duration = s->start_time = AV_NOPTS_VALUE;
691 av_strlcpy(s->filename, filename, sizeof(s->filename));
693 /* allocate private data */
694 if (s->iformat->priv_data_size > 0) {
695 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
696 ret = AVERROR(ENOMEM);
699 if (s->iformat->priv_class) {
700 *(const AVClass**)s->priv_data = s->iformat->priv_class;
701 av_opt_set_defaults(s->priv_data);
702 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
707 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
709 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
711 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
712 if ((ret = s->iformat->read_header(s, &ap)) < 0)
715 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
716 s->data_offset = avio_tell(s->pb);
718 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
721 av_dict_free(options);
729 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
731 avformat_free_context(s);
736 /*******************************************************/
738 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
739 AVPacketList **plast_pktl){
740 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
745 (*plast_pktl)->next = pktl;
747 *packet_buffer = pktl;
749 /* add the packet in the buffered packet list */
755 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
761 AVPacketList *pktl = s->raw_packet_buffer;
765 if(s->streams[pkt->stream_index]->request_probe <= 0){
766 s->raw_packet_buffer = pktl->next;
767 s->raw_packet_buffer_remaining_size += pkt->size;
774 ret= s->iformat->read_packet(s, pkt);
776 if (!pktl || ret == AVERROR(EAGAIN))
778 for (i = 0; i < s->nb_streams; i++)
779 if(s->streams[i]->request_probe > 0)
780 s->streams[i]->request_probe = -1;
784 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
785 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
786 av_log(s, AV_LOG_WARNING,
787 "Dropped corrupted packet (stream = %d)\n",
793 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
794 av_packet_merge_side_data(pkt);
796 if(pkt->stream_index >= (unsigned)s->nb_streams){
797 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
801 st= s->streams[pkt->stream_index];
803 switch(st->codec->codec_type){
804 case AVMEDIA_TYPE_VIDEO:
805 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
807 case AVMEDIA_TYPE_AUDIO:
808 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
810 case AVMEDIA_TYPE_SUBTITLE:
811 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
815 if(!pktl && st->request_probe <= 0)
818 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
819 s->raw_packet_buffer_remaining_size -= pkt->size;
821 if(st->request_probe>0){
822 AVProbeData *pd = &st->probe_data;
824 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
827 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
828 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
829 pd->buf_size += pkt->size;
830 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
832 end= s->raw_packet_buffer_remaining_size <= 0
833 || st->probe_packets<=0;
835 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
836 int score= set_codec_from_probe_data(s, st, pd);
837 if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
841 st->request_probe= -1;
842 if(st->codec->codec_id != CODEC_ID_NONE){
843 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
845 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
852 /**********************************************************/
855 * Get the number of samples of an audio frame. Return -1 on error.
857 static int get_audio_frame_size(AVCodecContext *enc, int size)
861 if(enc->codec_id == CODEC_ID_VORBIS)
864 if (enc->frame_size <= 1) {
865 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
867 if (bits_per_sample) {
868 if (enc->channels == 0)
870 frame_size = (size << 3) / (bits_per_sample * enc->channels);
872 /* used for example by ADPCM codecs */
873 if (enc->bit_rate == 0)
875 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
878 frame_size = enc->frame_size;
885 * Return the frame duration in seconds. Return 0 if not available.
887 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
888 AVCodecParserContext *pc, AVPacket *pkt)
894 switch(st->codec->codec_type) {
895 case AVMEDIA_TYPE_VIDEO:
896 if (st->r_frame_rate.num && !pc) {
897 *pnum = st->r_frame_rate.den;
898 *pden = st->r_frame_rate.num;
899 } else if(st->time_base.num*1000LL > st->time_base.den) {
900 *pnum = st->time_base.num;
901 *pden = st->time_base.den;
902 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
903 *pnum = st->codec->time_base.num;
904 *pden = st->codec->time_base.den;
905 if (pc && pc->repeat_pict) {
906 *pnum = (*pnum) * (1 + pc->repeat_pict);
908 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
909 //Thus if we have no parser in such case leave duration undefined.
910 if(st->codec->ticks_per_frame>1 && !pc){
915 case AVMEDIA_TYPE_AUDIO:
916 frame_size = get_audio_frame_size(st->codec, pkt->size);
917 if (frame_size <= 0 || st->codec->sample_rate <= 0)
920 *pden = st->codec->sample_rate;
927 static int is_intra_only(AVCodecContext *enc){
928 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
930 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
931 switch(enc->codec_id){
933 case CODEC_ID_MJPEGB:
935 case CODEC_ID_PRORES:
936 case CODEC_ID_RAWVIDEO:
937 case CODEC_ID_DVVIDEO:
938 case CODEC_ID_HUFFYUV:
939 case CODEC_ID_FFVHUFF:
944 case CODEC_ID_JPEG2000:
952 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
953 int64_t dts, int64_t pts)
955 AVStream *st= s->streams[stream_index];
956 AVPacketList *pktl= s->packet_buffer;
958 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
961 st->first_dts= dts - st->cur_dts;
964 for(; pktl; pktl= pktl->next){
965 if(pktl->pkt.stream_index != stream_index)
967 //FIXME think more about this check
968 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
969 pktl->pkt.pts += st->first_dts;
971 if(pktl->pkt.dts != AV_NOPTS_VALUE)
972 pktl->pkt.dts += st->first_dts;
974 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
975 st->start_time= pktl->pkt.pts;
977 if (st->start_time == AV_NOPTS_VALUE)
978 st->start_time = pts;
981 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
983 AVPacketList *pktl= s->packet_buffer;
986 if(st->first_dts != AV_NOPTS_VALUE){
987 cur_dts= st->first_dts;
988 for(; pktl; pktl= pktl->next){
989 if(pktl->pkt.stream_index == pkt->stream_index){
990 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
992 cur_dts -= pkt->duration;
995 pktl= s->packet_buffer;
996 st->first_dts = cur_dts;
997 }else if(st->cur_dts)
1000 for(; pktl; pktl= pktl->next){
1001 if(pktl->pkt.stream_index != pkt->stream_index)
1003 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
1004 && !pktl->pkt.duration){
1005 pktl->pkt.dts= cur_dts;
1006 if(!st->codec->has_b_frames)
1007 pktl->pkt.pts= cur_dts;
1008 cur_dts += pkt->duration;
1009 pktl->pkt.duration= pkt->duration;
1013 if(st->first_dts == AV_NOPTS_VALUE)
1014 st->cur_dts= cur_dts;
1017 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1018 AVCodecParserContext *pc, AVPacket *pkt)
1020 int num, den, presentation_delayed, delay, i;
1023 if (s->flags & AVFMT_FLAG_NOFILLIN)
1026 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1027 pkt->dts= AV_NOPTS_VALUE;
1029 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1030 //FIXME Set low_delay = 0 when has_b_frames = 1
1031 st->codec->has_b_frames = 1;
1033 /* do we have a video B-frame ? */
1034 delay= st->codec->has_b_frames;
1035 presentation_delayed = 0;
1037 /* XXX: need has_b_frame, but cannot get it if the codec is
1040 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1041 presentation_delayed = 1;
1043 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1044 pkt->dts -= 1LL<<st->pts_wrap_bits;
1047 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1048 // we take the conservative approach and discard both
1049 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1050 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1051 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1052 pkt->dts= AV_NOPTS_VALUE;
1055 if (pkt->duration == 0) {
1056 compute_frame_duration(&num, &den, st, pc, pkt);
1058 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1060 if(pkt->duration != 0 && s->packet_buffer)
1061 update_initial_durations(s, st, pkt);
1065 /* correct timestamps with byte offset if demuxers only have timestamps
1066 on packet boundaries */
1067 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1068 /* this will estimate bitrate based on this frame's duration and size */
1069 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1070 if(pkt->pts != AV_NOPTS_VALUE)
1072 if(pkt->dts != AV_NOPTS_VALUE)
1076 if (pc && pc->dts_sync_point >= 0) {
1077 // we have synchronization info from the parser
1078 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1080 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1081 if (pkt->dts != AV_NOPTS_VALUE) {
1082 // got DTS from the stream, update reference timestamp
1083 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1084 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1085 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1086 // compute DTS based on reference timestamp
1087 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1088 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1090 if (pc->dts_sync_point > 0)
1091 st->reference_dts = pkt->dts; // new reference
1095 /* This may be redundant, but it should not hurt. */
1096 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1097 presentation_delayed = 1;
1099 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1100 /* interpolate PTS and DTS if they are not present */
1101 //We skip H264 currently because delay and has_b_frames are not reliably set
1102 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1103 if (presentation_delayed) {
1104 /* DTS = decompression timestamp */
1105 /* PTS = presentation timestamp */
1106 if (pkt->dts == AV_NOPTS_VALUE)
1107 pkt->dts = st->last_IP_pts;
1108 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1109 if (pkt->dts == AV_NOPTS_VALUE)
1110 pkt->dts = st->cur_dts;
1112 /* this is tricky: the dts must be incremented by the duration
1113 of the frame we are displaying, i.e. the last I- or P-frame */
1114 if (st->last_IP_duration == 0)
1115 st->last_IP_duration = pkt->duration;
1116 if(pkt->dts != AV_NOPTS_VALUE)
1117 st->cur_dts = pkt->dts + st->last_IP_duration;
1118 st->last_IP_duration = pkt->duration;
1119 st->last_IP_pts= pkt->pts;
1120 /* cannot compute PTS if not present (we can compute it only
1121 by knowing the future */
1122 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1123 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1124 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1125 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1126 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1127 pkt->pts += pkt->duration;
1128 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1132 /* presentation is not delayed : PTS and DTS are the same */
1133 if(pkt->pts == AV_NOPTS_VALUE)
1134 pkt->pts = pkt->dts;
1135 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1136 if(pkt->pts == AV_NOPTS_VALUE)
1137 pkt->pts = st->cur_dts;
1138 pkt->dts = pkt->pts;
1139 if(pkt->pts != AV_NOPTS_VALUE)
1140 st->cur_dts = pkt->pts + pkt->duration;
1144 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1145 st->pts_buffer[0]= pkt->pts;
1146 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1147 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1148 if(pkt->dts == AV_NOPTS_VALUE)
1149 pkt->dts= st->pts_buffer[0];
1150 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1151 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1153 if(pkt->dts > st->cur_dts)
1154 st->cur_dts = pkt->dts;
1157 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1160 if(is_intra_only(st->codec))
1161 pkt->flags |= AV_PKT_FLAG_KEY;
1164 /* keyframe computation */
1165 if (pc->key_frame == 1)
1166 pkt->flags |= AV_PKT_FLAG_KEY;
1167 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1168 pkt->flags |= AV_PKT_FLAG_KEY;
1171 pkt->convergence_duration = pc->convergence_duration;
1175 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1180 av_init_packet(pkt);
1183 /* select current input stream component */
1186 if (!st->need_parsing || !st->parser) {
1187 /* no parsing needed: we just output the packet as is */
1188 /* raw data support */
1190 st->cur_pkt.data= NULL;
1191 st->cur_pkt.side_data_elems = 0;
1192 st->cur_pkt.side_data = NULL;
1193 compute_pkt_fields(s, st, NULL, pkt);
1195 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1196 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1197 ff_reduce_index(s, st->index);
1198 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1201 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1202 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1203 st->cur_ptr, st->cur_len,
1204 st->cur_pkt.pts, st->cur_pkt.dts,
1206 st->cur_pkt.pts = AV_NOPTS_VALUE;
1207 st->cur_pkt.dts = AV_NOPTS_VALUE;
1208 /* increment read pointer */
1212 /* return packet if any */
1216 pkt->stream_index = st->index;
1217 pkt->pts = st->parser->pts;
1218 pkt->dts = st->parser->dts;
1219 pkt->pos = st->parser->pos;
1220 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1222 pkt->destruct= st->cur_pkt.destruct;
1223 st->cur_pkt.destruct= NULL;
1224 st->cur_pkt.data = NULL;
1225 assert(st->cur_len == 0);
1227 pkt->destruct = NULL;
1229 compute_pkt_fields(s, st, st->parser, pkt);
1231 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1232 int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
1233 ff_reduce_index(s, st->index);
1234 av_add_index_entry(st, pos, pkt->dts,
1235 0, 0, AVINDEX_KEYFRAME);
1242 av_free_packet(&st->cur_pkt);
1247 /* read next packet */
1248 ret = av_read_packet(s, &cur_pkt);
1250 if (ret == AVERROR(EAGAIN))
1252 /* return the last frames, if any */
1253 for(i = 0; i < s->nb_streams; i++) {
1255 if (st->parser && st->need_parsing) {
1256 av_parser_parse2(st->parser, st->codec,
1257 &pkt->data, &pkt->size,
1259 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1265 /* no more packets: really terminate parsing */
1268 st = s->streams[cur_pkt.stream_index];
1269 st->cur_pkt= cur_pkt;
1271 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1272 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1273 st->cur_pkt.pts < st->cur_pkt.dts){
1274 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1275 st->cur_pkt.stream_index,
1279 // av_free_packet(&st->cur_pkt);
1283 if(s->debug & FF_FDEBUG_TS)
1284 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1285 st->cur_pkt.stream_index,
1289 st->cur_pkt.duration,
1293 st->cur_ptr = st->cur_pkt.data;
1294 st->cur_len = st->cur_pkt.size;
1295 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1296 st->parser = av_parser_init(st->codec->codec_id);
1298 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1299 "%s, packets or times may be invalid.\n",
1300 avcodec_get_name(st->codec->codec_id));
1301 /* no parser available: just output the raw packets */
1302 st->need_parsing = AVSTREAM_PARSE_NONE;
1303 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1304 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1305 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1306 st->parser->flags |= PARSER_FLAG_ONCE;
1311 if(s->debug & FF_FDEBUG_TS)
1312 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1323 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1327 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1330 pktl = s->packet_buffer;
1332 AVPacket *next_pkt= &pktl->pkt;
1334 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1335 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1336 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1337 if( pktl->pkt.stream_index == next_pkt->stream_index
1338 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1339 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1340 next_pkt->pts= pktl->pkt.dts;
1344 pktl = s->packet_buffer;
1347 if( next_pkt->pts != AV_NOPTS_VALUE
1348 || next_pkt->dts == AV_NOPTS_VALUE
1350 /* read packet from packet buffer, if there is data */
1352 s->packet_buffer = pktl->next;
1358 int ret= read_frame_internal(s, pkt);
1360 if(pktl && ret != AVERROR(EAGAIN)){
1367 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1368 &s->packet_buffer_end)) < 0)
1369 return AVERROR(ENOMEM);
1371 assert(!s->packet_buffer);
1372 return read_frame_internal(s, pkt);
1377 /* XXX: suppress the packet queue */
1378 static void flush_packet_queue(AVFormatContext *s)
1383 pktl = s->packet_buffer;
1386 s->packet_buffer = pktl->next;
1387 av_free_packet(&pktl->pkt);
1390 while(s->raw_packet_buffer){
1391 pktl = s->raw_packet_buffer;
1392 s->raw_packet_buffer = pktl->next;
1393 av_free_packet(&pktl->pkt);
1396 s->packet_buffer_end=
1397 s->raw_packet_buffer_end= NULL;
1398 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1401 /*******************************************************/
1404 int av_find_default_stream_index(AVFormatContext *s)
1406 int first_audio_index = -1;
1410 if (s->nb_streams <= 0)
1412 for(i = 0; i < s->nb_streams; i++) {
1414 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1417 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1418 first_audio_index = i;
1420 return first_audio_index >= 0 ? first_audio_index : 0;
1424 * Flush the frame reader.
1426 void ff_read_frame_flush(AVFormatContext *s)
1431 flush_packet_queue(s);
1435 /* for each stream, reset read state */
1436 for(i = 0; i < s->nb_streams; i++) {
1440 av_parser_close(st->parser);
1442 av_free_packet(&st->cur_pkt);
1444 st->last_IP_pts = AV_NOPTS_VALUE;
1445 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = 0;
1446 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1447 st->reference_dts = AV_NOPTS_VALUE;
1452 st->probe_packets = MAX_PROBE_PACKETS;
1454 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1455 st->pts_buffer[j]= AV_NOPTS_VALUE;
1459 #if FF_API_SEEK_PUBLIC
1460 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1462 ff_update_cur_dts(s, ref_st, timestamp);
1466 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1470 for(i = 0; i < s->nb_streams; i++) {
1471 AVStream *st = s->streams[i];
1473 st->cur_dts = av_rescale(timestamp,
1474 st->time_base.den * (int64_t)ref_st->time_base.num,
1475 st->time_base.num * (int64_t)ref_st->time_base.den);
1479 void ff_reduce_index(AVFormatContext *s, int stream_index)
1481 AVStream *st= s->streams[stream_index];
1482 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1484 if((unsigned)st->nb_index_entries >= max_entries){
1486 for(i=0; 2*i<st->nb_index_entries; i++)
1487 st->index_entries[i]= st->index_entries[2*i];
1488 st->nb_index_entries= i;
1492 int ff_add_index_entry(AVIndexEntry **index_entries,
1493 int *nb_index_entries,
1494 unsigned int *index_entries_allocated_size,
1495 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1497 AVIndexEntry *entries, *ie;
1500 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1503 entries = av_fast_realloc(*index_entries,
1504 index_entries_allocated_size,
1505 (*nb_index_entries + 1) *
1506 sizeof(AVIndexEntry));
1510 *index_entries= entries;
1512 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1515 index= (*nb_index_entries)++;
1516 ie= &entries[index];
1517 assert(index==0 || ie[-1].timestamp < timestamp);
1519 ie= &entries[index];
1520 if(ie->timestamp != timestamp){
1521 if(ie->timestamp <= timestamp)
1523 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1524 (*nb_index_entries)++;
1525 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1526 distance= ie->min_distance;
1530 ie->timestamp = timestamp;
1531 ie->min_distance= distance;
1538 int av_add_index_entry(AVStream *st,
1539 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1541 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1542 &st->index_entries_allocated_size, pos,
1543 timestamp, size, distance, flags);
1546 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1547 int64_t wanted_timestamp, int flags)
1555 //optimize appending index entries at the end
1556 if(b && entries[b-1].timestamp < wanted_timestamp)
1561 timestamp = entries[m].timestamp;
1562 if(timestamp >= wanted_timestamp)
1564 if(timestamp <= wanted_timestamp)
1567 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1569 if(!(flags & AVSEEK_FLAG_ANY)){
1570 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1571 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1580 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1583 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1584 wanted_timestamp, flags);
1587 #if FF_API_SEEK_PUBLIC
1588 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1589 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1593 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1595 AVInputFormat *avif= s->iformat;
1596 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1597 int64_t ts_min, ts_max, ts;
1602 if (stream_index < 0)
1605 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1608 ts_min= AV_NOPTS_VALUE;
1609 pos_limit= -1; //gcc falsely says it may be uninitialized
1611 st= s->streams[stream_index];
1612 if(st->index_entries){
1615 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1616 index= FFMAX(index, 0);
1617 e= &st->index_entries[index];
1619 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1621 ts_min= e->timestamp;
1622 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1628 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1629 assert(index < st->nb_index_entries);
1631 e= &st->index_entries[index];
1632 assert(e->timestamp >= target_ts);
1634 ts_max= e->timestamp;
1635 pos_limit= pos_max - e->min_distance;
1636 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1637 pos_max,pos_limit, ts_max);
1641 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1646 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1649 ff_read_frame_flush(s);
1650 ff_update_cur_dts(s, st, ts);
1655 #if FF_API_SEEK_PUBLIC
1656 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1657 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1658 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1659 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1661 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1662 pos_limit, ts_min, ts_max, flags, ts_ret,
1667 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1668 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1669 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1670 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1673 int64_t start_pos, filesize;
1676 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1678 if(ts_min == AV_NOPTS_VALUE){
1679 pos_min = s->data_offset;
1680 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1681 if (ts_min == AV_NOPTS_VALUE)
1685 if(ts_min >= target_ts){
1690 if(ts_max == AV_NOPTS_VALUE){
1692 filesize = avio_size(s->pb);
1693 pos_max = filesize - 1;
1696 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1698 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1699 if (ts_max == AV_NOPTS_VALUE)
1703 int64_t tmp_pos= pos_max + 1;
1704 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1705 if(tmp_ts == AV_NOPTS_VALUE)
1709 if(tmp_pos >= filesize)
1715 if(ts_max <= target_ts){
1720 if(ts_min > ts_max){
1722 }else if(ts_min == ts_max){
1727 while (pos_min < pos_limit) {
1728 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1729 pos_min, pos_max, ts_min, ts_max);
1730 assert(pos_limit <= pos_max);
1733 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1734 // interpolate position (better than dichotomy)
1735 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1736 + pos_min - approximate_keyframe_distance;
1737 }else if(no_change==1){
1738 // bisection, if interpolation failed to change min or max pos last time
1739 pos = (pos_min + pos_limit)>>1;
1741 /* linear search if bisection failed, can only happen if there
1742 are very few or no keyframes between min/max */
1747 else if(pos > pos_limit)
1751 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1756 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1757 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1758 pos_limit, start_pos, no_change);
1759 if(ts == AV_NOPTS_VALUE){
1760 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1763 assert(ts != AV_NOPTS_VALUE);
1764 if (target_ts <= ts) {
1765 pos_limit = start_pos - 1;
1769 if (target_ts >= ts) {
1775 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1776 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1779 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1781 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1782 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1783 pos, ts_min, target_ts, ts_max);
1789 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1790 int64_t pos_min, pos_max;
1794 if (stream_index < 0)
1797 st= s->streams[stream_index];
1800 pos_min = s->data_offset;
1801 pos_max = avio_size(s->pb) - 1;
1803 if (pos < pos_min) pos= pos_min;
1804 else if(pos > pos_max) pos= pos_max;
1806 avio_seek(s->pb, pos, SEEK_SET);
1809 av_update_cur_dts(s, st, ts);
1814 static int seek_frame_generic(AVFormatContext *s,
1815 int stream_index, int64_t timestamp, int flags)
1822 st = s->streams[stream_index];
1824 index = av_index_search_timestamp(st, timestamp, flags);
1826 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1829 if(index < 0 || index==st->nb_index_entries-1){
1833 if(st->nb_index_entries){
1834 assert(st->index_entries);
1835 ie= &st->index_entries[st->nb_index_entries-1];
1836 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1838 ff_update_cur_dts(s, st, ie->timestamp);
1840 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1846 read_status = av_read_frame(s, &pkt);
1847 } while (read_status == AVERROR(EAGAIN));
1848 if (read_status < 0)
1850 av_free_packet(&pkt);
1851 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1852 if(pkt.flags & AV_PKT_FLAG_KEY)
1854 if(nonkey++ > 1000 && st->codec->codec_id != CODEC_ID_CDGRAPHICS){
1855 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1860 index = av_index_search_timestamp(st, timestamp, flags);
1865 ff_read_frame_flush(s);
1866 AV_NOWARN_DEPRECATED(
1867 if (s->iformat->read_seek){
1868 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1872 ie = &st->index_entries[index];
1873 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1875 ff_update_cur_dts(s, st, ie->timestamp);
1880 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1885 if (flags & AVSEEK_FLAG_BYTE) {
1886 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1888 ff_read_frame_flush(s);
1889 return seek_frame_byte(s, stream_index, timestamp, flags);
1892 if(stream_index < 0){
1893 stream_index= av_find_default_stream_index(s);
1894 if(stream_index < 0)
1897 st= s->streams[stream_index];
1898 /* timestamp for default must be expressed in AV_TIME_BASE units */
1899 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1902 /* first, we try the format specific seek */
1903 AV_NOWARN_DEPRECATED(
1904 if (s->iformat->read_seek) {
1905 ff_read_frame_flush(s);
1906 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1914 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1915 ff_read_frame_flush(s);
1916 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1917 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1918 ff_read_frame_flush(s);
1919 return seek_frame_generic(s, stream_index, timestamp, flags);
1925 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1927 if(min_ts > ts || max_ts < ts)
1930 if (s->iformat->read_seek2) {
1931 ff_read_frame_flush(s);
1932 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1935 if(s->iformat->read_timestamp){
1936 //try to seek via read_timestamp()
1939 //Fallback to old API if new is not implemented but old is
1940 //Note the old has somewat different sematics
1941 AV_NOWARN_DEPRECATED(
1942 if(s->iformat->read_seek || 1)
1943 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1946 // try some generic seek like seek_frame_generic() but with new ts semantics
1949 /*******************************************************/
1952 * Return TRUE if the stream has accurate duration in any stream.
1954 * @return TRUE if the stream has accurate duration for at least one component.
1956 static int has_duration(AVFormatContext *ic)
1960 if(ic->duration != AV_NOPTS_VALUE)
1963 for(i = 0;i < ic->nb_streams; i++) {
1964 st = ic->streams[i];
1965 if (st->duration != AV_NOPTS_VALUE)
1972 * Estimate the stream timings from the one of each components.
1974 * Also computes the global bitrate if possible.
1976 static void update_stream_timings(AVFormatContext *ic)
1978 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
1979 int64_t duration, duration1, filesize;
1983 start_time = INT64_MAX;
1984 start_time_text = INT64_MAX;
1985 end_time = INT64_MIN;
1986 duration = INT64_MIN;
1987 for(i = 0;i < ic->nb_streams; i++) {
1988 st = ic->streams[i];
1989 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1990 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1991 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1992 if (start_time1 < start_time_text)
1993 start_time_text = start_time1;
1995 start_time = FFMIN(start_time, start_time1);
1996 if (st->duration != AV_NOPTS_VALUE) {
1997 end_time1 = start_time1
1998 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1999 end_time = FFMAX(end_time, end_time1);
2002 if (st->duration != AV_NOPTS_VALUE) {
2003 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2004 duration = FFMAX(duration, duration1);
2007 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2008 start_time = start_time_text;
2009 if (start_time != INT64_MAX) {
2010 ic->start_time = start_time;
2011 if (end_time != INT64_MIN)
2012 duration = FFMAX(duration, end_time - start_time);
2014 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2015 ic->duration = duration;
2017 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2018 /* compute the bitrate */
2019 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2020 (double)ic->duration;
2024 static void fill_all_stream_timings(AVFormatContext *ic)
2029 update_stream_timings(ic);
2030 for(i = 0;i < ic->nb_streams; i++) {
2031 st = ic->streams[i];
2032 if (st->start_time == AV_NOPTS_VALUE) {
2033 if(ic->start_time != AV_NOPTS_VALUE)
2034 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2035 if(ic->duration != AV_NOPTS_VALUE)
2036 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2041 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2043 int64_t filesize, duration;
2047 /* if bit_rate is already set, we believe it */
2048 if (ic->bit_rate <= 0) {
2050 for(i=0;i<ic->nb_streams;i++) {
2051 st = ic->streams[i];
2052 if (st->codec->bit_rate > 0)
2053 bit_rate += st->codec->bit_rate;
2055 ic->bit_rate = bit_rate;
2058 /* if duration is already set, we believe it */
2059 if (ic->duration == AV_NOPTS_VALUE &&
2060 ic->bit_rate != 0) {
2061 filesize = ic->pb ? avio_size(ic->pb) : 0;
2063 for(i = 0; i < ic->nb_streams; i++) {
2064 st = ic->streams[i];
2065 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2066 if (st->duration == AV_NOPTS_VALUE)
2067 st->duration = duration;
2073 #define DURATION_MAX_READ_SIZE 250000
2074 #define DURATION_MAX_RETRY 3
2076 /* only usable for MPEG-PS streams */
2077 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2079 AVPacket pkt1, *pkt = &pkt1;
2081 int read_size, i, ret;
2083 int64_t filesize, offset, duration;
2088 /* flush packet queue */
2089 flush_packet_queue(ic);
2091 for (i=0; i<ic->nb_streams; i++) {
2092 st = ic->streams[i];
2093 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2094 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2097 av_parser_close(st->parser);
2099 av_free_packet(&st->cur_pkt);
2103 /* estimate the end time (duration) */
2104 /* XXX: may need to support wrapping */
2105 filesize = ic->pb ? avio_size(ic->pb) : 0;
2106 end_time = AV_NOPTS_VALUE;
2108 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2112 avio_seek(ic->pb, offset, SEEK_SET);
2115 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2119 ret = av_read_packet(ic, pkt);
2120 } while(ret == AVERROR(EAGAIN));
2123 read_size += pkt->size;
2124 st = ic->streams[pkt->stream_index];
2125 if (pkt->pts != AV_NOPTS_VALUE &&
2126 (st->start_time != AV_NOPTS_VALUE ||
2127 st->first_dts != AV_NOPTS_VALUE)) {
2128 duration = end_time = pkt->pts;
2129 if (st->start_time != AV_NOPTS_VALUE)
2130 duration -= st->start_time;
2132 duration -= st->first_dts;
2134 duration += 1LL<<st->pts_wrap_bits;
2136 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2137 st->duration = duration;
2140 av_free_packet(pkt);
2142 }while( end_time==AV_NOPTS_VALUE
2143 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2144 && ++retry <= DURATION_MAX_RETRY);
2146 fill_all_stream_timings(ic);
2148 avio_seek(ic->pb, old_offset, SEEK_SET);
2149 for (i=0; i<ic->nb_streams; i++) {
2151 st->cur_dts= st->first_dts;
2152 st->last_IP_pts = AV_NOPTS_VALUE;
2153 st->reference_dts = AV_NOPTS_VALUE;
2157 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2161 /* get the file size, if possible */
2162 if (ic->iformat->flags & AVFMT_NOFILE) {
2165 file_size = avio_size(ic->pb);
2166 file_size = FFMAX(0, file_size);
2169 if ((!strcmp(ic->iformat->name, "mpeg") ||
2170 !strcmp(ic->iformat->name, "mpegts")) &&
2171 file_size && ic->pb->seekable) {
2172 /* get accurate estimate from the PTSes */
2173 estimate_timings_from_pts(ic, old_offset);
2174 } else if (has_duration(ic)) {
2175 /* at least one component has timings - we use them for all
2177 fill_all_stream_timings(ic);
2179 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2180 /* less precise: use bitrate info */
2181 estimate_timings_from_bit_rate(ic);
2183 update_stream_timings(ic);
2187 AVStream av_unused *st;
2188 for(i = 0;i < ic->nb_streams; i++) {
2189 st = ic->streams[i];
2190 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2191 (double) st->start_time / AV_TIME_BASE,
2192 (double) st->duration / AV_TIME_BASE);
2194 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2195 (double) ic->start_time / AV_TIME_BASE,
2196 (double) ic->duration / AV_TIME_BASE,
2197 ic->bit_rate / 1000);
2201 static int has_codec_parameters(AVCodecContext *avctx)
2204 switch (avctx->codec_type) {
2205 case AVMEDIA_TYPE_AUDIO:
2206 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2207 if (!avctx->frame_size &&
2208 (avctx->codec_id == CODEC_ID_VORBIS ||
2209 avctx->codec_id == CODEC_ID_AAC ||
2210 avctx->codec_id == CODEC_ID_MP1 ||
2211 avctx->codec_id == CODEC_ID_MP2 ||
2212 avctx->codec_id == CODEC_ID_MP3 ||
2213 avctx->codec_id == CODEC_ID_CELT))
2216 case AVMEDIA_TYPE_VIDEO:
2217 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2219 case AVMEDIA_TYPE_DATA:
2220 if(avctx->codec_id == CODEC_ID_NONE) return 1;
2225 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2228 static int has_decode_delay_been_guessed(AVStream *st)
2230 return st->codec->codec_id != CODEC_ID_H264 ||
2231 st->info->nb_decoded_frames >= 6;
2234 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2237 int got_picture, ret = 0;
2239 AVPacket pkt = *avpkt;
2241 if(!st->codec->codec){
2242 codec = avcodec_find_decoder(st->codec->codec_id);
2245 ret = avcodec_open2(st->codec, codec, options);
2250 while (pkt.size > 0 && ret >= 0 &&
2251 (!has_codec_parameters(st->codec) ||
2252 !has_decode_delay_been_guessed(st) ||
2253 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2255 avcodec_get_frame_defaults(&picture);
2256 switch(st->codec->codec_type) {
2257 case AVMEDIA_TYPE_VIDEO:
2258 ret = avcodec_decode_video2(st->codec, &picture,
2259 &got_picture, &pkt);
2261 case AVMEDIA_TYPE_AUDIO:
2262 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2269 st->info->nb_decoded_frames++;
2277 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2279 while (tags->id != CODEC_ID_NONE) {
2287 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2290 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2291 if(tag == tags[i].tag)
2294 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2295 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2298 return CODEC_ID_NONE;
2301 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2304 for(i=0; tags && tags[i]; i++){
2305 int tag= ff_codec_get_tag(tags[i], id);
2311 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2314 for(i=0; tags && tags[i]; i++){
2315 enum CodecID id= ff_codec_get_id(tags[i], tag);
2316 if(id!=CODEC_ID_NONE) return id;
2318 return CODEC_ID_NONE;
2321 static void compute_chapters_end(AVFormatContext *s)
2324 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2326 for (i = 0; i < s->nb_chapters; i++)
2327 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2328 AVChapter *ch = s->chapters[i];
2329 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2332 for (j = 0; j < s->nb_chapters; j++) {
2333 AVChapter *ch1 = s->chapters[j];
2334 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2335 if (j != i && next_start > ch->start && next_start < end)
2338 ch->end = (end == INT64_MAX) ? ch->start : end;
2342 static int get_std_framerate(int i){
2343 if(i<60*12) return i*1001;
2344 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2348 * Is the time base unreliable.
2349 * This is a heuristic to balance between quick acceptance of the values in
2350 * the headers vs. some extra checks.
2351 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2352 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2353 * And there are "variable" fps files this needs to detect as well.
2355 static int tb_unreliable(AVCodecContext *c){
2356 if( c->time_base.den >= 101L*c->time_base.num
2357 || c->time_base.den < 5L*c->time_base.num
2358 /* || c->codec_tag == AV_RL32("DIVX")
2359 || c->codec_tag == AV_RL32("XVID")*/
2360 || c->codec_id == CODEC_ID_MPEG2VIDEO
2361 || c->codec_id == CODEC_ID_H264
2367 #if FF_API_FORMAT_PARAMETERS
2368 int av_find_stream_info(AVFormatContext *ic)
2370 return avformat_find_stream_info(ic, NULL);
2374 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2376 int i, count, ret, read_size, j;
2378 AVPacket pkt1, *pkt;
2379 int64_t old_offset = avio_tell(ic->pb);
2380 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2382 for(i=0;i<ic->nb_streams;i++) {
2384 st = ic->streams[i];
2386 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2387 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2388 /* if(!st->time_base.num)
2390 if(!st->codec->time_base.num)
2391 st->codec->time_base= st->time_base;
2393 //only for the split stuff
2394 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2395 st->parser = av_parser_init(st->codec->codec_id);
2396 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2397 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2400 assert(!st->codec->codec);
2401 codec = avcodec_find_decoder(st->codec->codec_id);
2403 /* this function doesn't flush the decoders, so force thread count
2404 * to 1 to fix behavior when thread count > number of frames in the file */
2406 av_dict_set(&options[i], "threads", 0, 0);
2408 /* Ensure that subtitle_header is properly set. */
2409 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2410 && codec && !st->codec->codec)
2411 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2413 //try to just open decoders, in case this is enough to get parameters
2414 if(!has_codec_parameters(st->codec)){
2415 if (codec && !st->codec->codec)
2416 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2420 for (i=0; i<ic->nb_streams; i++) {
2421 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2427 if (ff_check_interrupt(&ic->interrupt_callback)){
2429 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2433 /* check if one codec still needs to be handled */
2434 for(i=0;i<ic->nb_streams;i++) {
2435 int fps_analyze_framecount = 20;
2437 st = ic->streams[i];
2438 if (!has_codec_parameters(st->codec))
2440 /* if the timebase is coarse (like the usual millisecond precision
2441 of mkv), we need to analyze more frames to reliably arrive at
2443 if (av_q2d(st->time_base) > 0.0005)
2444 fps_analyze_framecount *= 2;
2445 if (ic->fps_probe_size >= 0)
2446 fps_analyze_framecount = ic->fps_probe_size;
2447 /* variable fps and no guess at the real fps */
2448 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2449 && st->info->duration_count < fps_analyze_framecount
2450 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2452 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2454 if(st->first_dts == AV_NOPTS_VALUE && (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2457 if (i == ic->nb_streams) {
2458 /* NOTE: if the format has no header, then we need to read
2459 some packets to get most of the streams, so we cannot
2461 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2462 /* if we found the info for all the codecs, we can stop */
2464 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2468 /* we did not get all the codec info, but we read too much data */
2469 if (read_size >= ic->probesize) {
2471 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2475 /* NOTE: a new stream can be added there if no header in file
2476 (AVFMTCTX_NOHEADER) */
2477 ret = read_frame_internal(ic, &pkt1);
2478 if (ret == AVERROR(EAGAIN))
2483 ret = -1; /* we could not have all the codec parameters before EOF */
2484 for(i=0;i<ic->nb_streams;i++) {
2485 st = ic->streams[i];
2486 if (!has_codec_parameters(st->codec)){
2488 avcodec_string(buf, sizeof(buf), st->codec, 0);
2489 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2497 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2498 if ((ret = av_dup_packet(pkt)) < 0)
2499 goto find_stream_info_err;
2501 read_size += pkt->size;
2503 st = ic->streams[pkt->stream_index];
2504 if (st->codec_info_nb_frames>1) {
2506 if (st->time_base.den > 0 && (t=av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q)) >= ic->max_analyze_duration) {
2507 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2510 st->info->codec_info_duration += pkt->duration;
2513 int64_t last = st->info->last_dts;
2515 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2516 double dts= pkt->dts * av_q2d(st->time_base);
2517 int64_t duration= pkt->dts - last;
2519 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2520 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2521 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2522 int framerate= get_std_framerate(i);
2523 double sdts= dts*framerate/(1001*12);
2525 int ticks= lrintf(sdts+j*0.5);
2526 double error= sdts - ticks + j*0.5;
2527 st->info->duration_error[j][0][i] += error;
2528 st->info->duration_error[j][1][i] += error*error;
2531 st->info->duration_count++;
2532 // ignore the first 4 values, they might have some random jitter
2533 if (st->info->duration_count > 3)
2534 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2536 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2537 st->info->last_dts = pkt->dts;
2539 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2540 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2542 st->codec->extradata_size= i;
2543 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2544 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2545 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2549 /* if still no information, we try to open the codec and to
2550 decompress the frame. We try to avoid that in most cases as
2551 it takes longer and uses more memory. For MPEG-4, we need to
2552 decompress for QuickTime.
2554 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2555 least one frame of codec data, this makes sure the codec initializes
2556 the channel configuration and does not only trust the values from the container.
2558 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2560 st->codec_info_nb_frames++;
2564 // close codecs which were opened in try_decode_frame()
2565 for(i=0;i<ic->nb_streams;i++) {
2566 st = ic->streams[i];
2567 if(st->codec->codec)
2568 avcodec_close(st->codec);
2570 for(i=0;i<ic->nb_streams;i++) {
2571 st = ic->streams[i];
2572 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2573 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2574 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2575 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2576 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2577 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2578 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2579 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2580 st->codec->codec_tag= tag;
2583 // the check for tb_unreliable() is not completely correct, since this is not about handling
2584 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2585 // ipmovie.c produces.
2586 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2587 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2588 if (st->info->duration_count && !st->r_frame_rate.num
2589 && tb_unreliable(st->codec) /*&&
2590 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2591 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2593 double best_error= 0.01;
2595 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2598 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2600 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2603 int n= st->info->duration_count;
2604 double a= st->info->duration_error[k][0][j] / n;
2605 double error= st->info->duration_error[k][1][j]/n - a*a;
2607 if(error < best_error && best_error> 0.000000001){
2609 num = get_std_framerate(j);
2612 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2615 // do not increase frame rate by more than 1 % in order to match a standard rate.
2616 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2617 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2620 if (!st->r_frame_rate.num){
2621 if( st->codec->time_base.den * (int64_t)st->time_base.num
2622 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2623 st->r_frame_rate.num = st->codec->time_base.den;
2624 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2626 st->r_frame_rate.num = st->time_base.den;
2627 st->r_frame_rate.den = st->time_base.num;
2630 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2631 if(!st->codec->bits_per_coded_sample)
2632 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2633 // set stream disposition based on audio service type
2634 switch (st->codec->audio_service_type) {
2635 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2636 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2637 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2638 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2639 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2640 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2641 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2642 st->disposition = AV_DISPOSITION_COMMENT; break;
2643 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2644 st->disposition = AV_DISPOSITION_KARAOKE; break;
2649 estimate_timings(ic, old_offset);
2651 compute_chapters_end(ic);
2654 /* correct DTS for B-frame streams with no timestamps */
2655 for(i=0;i<ic->nb_streams;i++) {
2656 st = ic->streams[i];
2657 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2659 ppktl = &ic->packet_buffer;
2661 if(ppkt1->stream_index != i)
2663 if(ppkt1->pkt->dts < 0)
2665 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2667 ppkt1->pkt->dts -= delta;
2672 st->cur_dts -= delta;
2678 find_stream_info_err:
2679 for (i=0; i < ic->nb_streams; i++)
2680 av_freep(&ic->streams[i]->info);
2684 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2688 for (i = 0; i < ic->nb_programs; i++) {
2689 if (ic->programs[i] == last) {
2693 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2694 if (ic->programs[i]->stream_index[j] == s)
2695 return ic->programs[i];
2701 int av_find_best_stream(AVFormatContext *ic,
2702 enum AVMediaType type,
2703 int wanted_stream_nb,
2705 AVCodec **decoder_ret,
2708 int i, nb_streams = ic->nb_streams;
2709 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2710 unsigned *program = NULL;
2711 AVCodec *decoder = NULL, *best_decoder = NULL;
2713 if (related_stream >= 0 && wanted_stream_nb < 0) {
2714 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2716 program = p->stream_index;
2717 nb_streams = p->nb_stream_indexes;
2720 for (i = 0; i < nb_streams; i++) {
2721 int real_stream_index = program ? program[i] : i;
2722 AVStream *st = ic->streams[real_stream_index];
2723 AVCodecContext *avctx = st->codec;
2724 if (avctx->codec_type != type)
2726 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2728 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2731 decoder = avcodec_find_decoder(st->codec->codec_id);
2734 ret = AVERROR_DECODER_NOT_FOUND;
2738 if (best_count >= st->codec_info_nb_frames)
2740 best_count = st->codec_info_nb_frames;
2741 ret = real_stream_index;
2742 best_decoder = decoder;
2743 if (program && i == nb_streams - 1 && ret < 0) {
2745 nb_streams = ic->nb_streams;
2746 i = 0; /* no related stream found, try again with everything */
2750 *decoder_ret = best_decoder;
2754 /*******************************************************/
2756 int av_read_play(AVFormatContext *s)
2758 if (s->iformat->read_play)
2759 return s->iformat->read_play(s);
2761 return avio_pause(s->pb, 0);
2762 return AVERROR(ENOSYS);
2765 int av_read_pause(AVFormatContext *s)
2767 if (s->iformat->read_pause)
2768 return s->iformat->read_pause(s);
2770 return avio_pause(s->pb, 1);
2771 return AVERROR(ENOSYS);
2774 #if FF_API_FORMAT_PARAMETERS
2775 void av_close_input_stream(AVFormatContext *s)
2777 flush_packet_queue(s);
2778 if (s->iformat->read_close)
2779 s->iformat->read_close(s);
2780 avformat_free_context(s);
2784 void avformat_free_context(AVFormatContext *s)
2790 if (s->iformat && s->iformat->priv_class && s->priv_data)
2791 av_opt_free(s->priv_data);
2793 for(i=0;i<s->nb_streams;i++) {
2794 /* free all data in a stream component */
2797 av_parser_close(st->parser);
2798 av_free_packet(&st->cur_pkt);
2800 av_dict_free(&st->metadata);
2801 av_freep(&st->index_entries);
2802 av_freep(&st->codec->extradata);
2803 av_freep(&st->codec->subtitle_header);
2804 av_freep(&st->codec);
2805 av_freep(&st->priv_data);
2806 av_freep(&st->info);
2809 for(i=s->nb_programs-1; i>=0; i--) {
2810 av_dict_free(&s->programs[i]->metadata);
2811 av_freep(&s->programs[i]->stream_index);
2812 av_freep(&s->programs[i]);
2814 av_freep(&s->programs);
2815 av_freep(&s->priv_data);
2816 while(s->nb_chapters--) {
2817 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2818 av_freep(&s->chapters[s->nb_chapters]);
2820 av_freep(&s->chapters);
2821 av_dict_free(&s->metadata);
2822 av_freep(&s->streams);
2826 #if FF_API_CLOSE_INPUT_FILE
2827 void av_close_input_file(AVFormatContext *s)
2829 avformat_close_input(&s);
2833 void avformat_close_input(AVFormatContext **ps)
2835 AVFormatContext *s = *ps;
2836 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2838 flush_packet_queue(s);
2839 if (s->iformat && (s->iformat->read_close))
2840 s->iformat->read_close(s);
2841 avformat_free_context(s);
2847 #if FF_API_NEW_STREAM
2848 AVStream *av_new_stream(AVFormatContext *s, int id)
2850 AVStream *st = avformat_new_stream(s, NULL);
2857 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2863 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2865 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2868 s->streams = streams;
2870 st = av_mallocz(sizeof(AVStream));
2873 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2878 st->codec = avcodec_alloc_context3(c);
2880 /* no default bitrate if decoding */
2881 st->codec->bit_rate = 0;
2883 st->index = s->nb_streams;
2884 st->start_time = AV_NOPTS_VALUE;
2885 st->duration = AV_NOPTS_VALUE;
2886 /* we set the current DTS to 0 so that formats without any timestamps
2887 but durations get some timestamps, formats with some unknown
2888 timestamps have their first few packets buffered and the
2889 timestamps corrected before they are returned to the user */
2891 st->first_dts = AV_NOPTS_VALUE;
2892 st->probe_packets = MAX_PROBE_PACKETS;
2894 /* default pts setting is MPEG-like */
2895 avpriv_set_pts_info(st, 33, 1, 90000);
2896 st->last_IP_pts = AV_NOPTS_VALUE;
2897 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2898 st->pts_buffer[i]= AV_NOPTS_VALUE;
2899 st->reference_dts = AV_NOPTS_VALUE;
2901 st->sample_aspect_ratio = (AVRational){0,1};
2903 s->streams[s->nb_streams++] = st;
2907 AVProgram *av_new_program(AVFormatContext *ac, int id)
2909 AVProgram *program=NULL;
2912 av_dlog(ac, "new_program: id=0x%04x\n", id);
2914 for(i=0; i<ac->nb_programs; i++)
2915 if(ac->programs[i]->id == id)
2916 program = ac->programs[i];
2919 program = av_mallocz(sizeof(AVProgram));
2922 dynarray_add(&ac->programs, &ac->nb_programs, program);
2923 program->discard = AVDISCARD_NONE;
2930 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2932 AVChapter *chapter = NULL;
2935 for(i=0; i<s->nb_chapters; i++)
2936 if(s->chapters[i]->id == id)
2937 chapter = s->chapters[i];
2940 chapter= av_mallocz(sizeof(AVChapter));
2943 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2945 av_dict_set(&chapter->metadata, "title", title, 0);
2947 chapter->time_base= time_base;
2948 chapter->start = start;
2954 /************************************************************/
2955 /* output media file */
2957 #if FF_API_FORMAT_PARAMETERS
2958 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2960 if (s->oformat->priv_data_size > 0) {
2961 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2963 return AVERROR(ENOMEM);
2964 if (s->oformat->priv_class) {
2965 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2966 av_opt_set_defaults(s->priv_data);
2969 s->priv_data = NULL;
2975 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
2976 const char *format, const char *filename)
2978 AVFormatContext *s = avformat_alloc_context();
2987 oformat = av_guess_format(format, NULL, NULL);
2989 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
2990 ret = AVERROR(EINVAL);
2994 oformat = av_guess_format(NULL, filename, NULL);
2996 ret = AVERROR(EINVAL);
2997 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3004 s->oformat = oformat;
3005 if (s->oformat->priv_data_size > 0) {
3006 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3009 if (s->oformat->priv_class) {
3010 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3011 av_opt_set_defaults(s->priv_data);
3014 s->priv_data = NULL;
3017 av_strlcpy(s->filename, filename, sizeof(s->filename));
3021 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3022 ret = AVERROR(ENOMEM);
3024 avformat_free_context(s);
3028 #if FF_API_ALLOC_OUTPUT_CONTEXT
3029 AVFormatContext *avformat_alloc_output_context(const char *format,
3030 AVOutputFormat *oformat, const char *filename)
3032 AVFormatContext *avctx;
3033 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3034 return ret < 0 ? NULL : avctx;
3038 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3040 const AVCodecTag *avctag;
3042 enum CodecID id = CODEC_ID_NONE;
3043 unsigned int tag = 0;
3046 * Check that tag + id is in the table
3047 * If neither is in the table -> OK
3048 * If tag is in the table with another id -> FAIL
3049 * If id is in the table with another tag -> FAIL unless strict < normal
3051 for (n = 0; s->oformat->codec_tag[n]; n++) {
3052 avctag = s->oformat->codec_tag[n];
3053 while (avctag->id != CODEC_ID_NONE) {
3054 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3056 if (id == st->codec->codec_id)
3059 if (avctag->id == st->codec->codec_id)
3064 if (id != CODEC_ID_NONE)
3066 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3071 #if FF_API_FORMAT_PARAMETERS
3072 int av_write_header(AVFormatContext *s)
3074 return avformat_write_header(s, NULL);
3078 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3082 AVDictionary *tmp = NULL;
3085 av_dict_copy(&tmp, *options, 0);
3086 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3088 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3089 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3092 // some sanity checks
3093 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3094 av_log(s, AV_LOG_ERROR, "no streams\n");
3095 ret = AVERROR(EINVAL);
3099 for(i=0;i<s->nb_streams;i++) {
3102 switch (st->codec->codec_type) {
3103 case AVMEDIA_TYPE_AUDIO:
3104 if(st->codec->sample_rate<=0){
3105 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3106 ret = AVERROR(EINVAL);
3109 if(!st->codec->block_align)
3110 st->codec->block_align = st->codec->channels *
3111 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3113 case AVMEDIA_TYPE_VIDEO:
3114 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3115 av_log(s, AV_LOG_ERROR, "time base not set\n");
3116 ret = AVERROR(EINVAL);
3119 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3120 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3121 ret = AVERROR(EINVAL);
3124 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3125 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3127 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
3128 ret = AVERROR(EINVAL);
3134 if(s->oformat->codec_tag){
3135 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
3136 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
3137 st->codec->codec_tag= 0;
3139 if(st->codec->codec_tag){
3140 if (!validate_codec_tag(s, st)) {
3142 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3143 av_log(s, AV_LOG_ERROR,
3144 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
3145 tagbuf, st->codec->codec_tag, st->codec->codec_id);
3146 ret = AVERROR_INVALIDDATA;
3150 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3153 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3154 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3155 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3158 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3159 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3160 if (!s->priv_data) {
3161 ret = AVERROR(ENOMEM);
3164 if (s->oformat->priv_class) {
3165 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3166 av_opt_set_defaults(s->priv_data);
3167 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3172 /* set muxer identification string */
3173 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3174 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3177 if(s->oformat->write_header){
3178 ret = s->oformat->write_header(s);
3183 /* init PTS generation */
3184 for(i=0;i<s->nb_streams;i++) {
3185 int64_t den = AV_NOPTS_VALUE;
3188 switch (st->codec->codec_type) {
3189 case AVMEDIA_TYPE_AUDIO:
3190 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3192 case AVMEDIA_TYPE_VIDEO:
3193 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3198 if (den != AV_NOPTS_VALUE) {
3200 ret = AVERROR_INVALIDDATA;
3203 frac_init(&st->pts, 0, 0, den);
3208 av_dict_free(options);
3217 //FIXME merge with compute_pkt_fields
3218 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3219 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3220 int num, den, frame_size, i;
3222 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3223 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3225 /* duration field */
3226 if (pkt->duration == 0) {
3227 compute_frame_duration(&num, &den, st, NULL, pkt);
3229 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3233 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3236 //XXX/FIXME this is a temporary hack until all encoders output pts
3237 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3239 // pkt->pts= st->cur_dts;
3240 pkt->pts= st->pts.val;
3243 //calculate dts from pts
3244 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3245 st->pts_buffer[0]= pkt->pts;
3246 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3247 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3248 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3249 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3251 pkt->dts= st->pts_buffer[0];
3254 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){
3255 av_log(s, AV_LOG_ERROR,
3256 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3257 st->index, st->cur_dts, pkt->dts);
3258 return AVERROR(EINVAL);
3260 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3261 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3262 return AVERROR(EINVAL);
3265 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3266 st->cur_dts= pkt->dts;
3267 st->pts.val= pkt->dts;
3270 switch (st->codec->codec_type) {
3271 case AVMEDIA_TYPE_AUDIO:
3272 frame_size = get_audio_frame_size(st->codec, pkt->size);
3274 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3275 likely equal to the encoder delay, but it would be better if we
3276 had the real timestamps from the encoder */
3277 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3278 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3281 case AVMEDIA_TYPE_VIDEO:
3282 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3290 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3292 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3294 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3297 ret= s->oformat->write_packet(s, pkt);
3300 s->streams[pkt->stream_index]->nb_frames++;
3304 #define CHUNK_START 0x1000
3306 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3307 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3309 AVPacketList **next_point, *this_pktl;
3310 AVStream *st= s->streams[pkt->stream_index];
3311 int chunked= s->max_chunk_size || s->max_chunk_duration;
3313 this_pktl = av_mallocz(sizeof(AVPacketList));
3315 return AVERROR(ENOMEM);
3316 this_pktl->pkt= *pkt;
3317 pkt->destruct= NULL; // do not free original but only the copy
3318 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3320 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3321 next_point = &(st->last_in_packet_buffer->next);
3323 next_point = &s->packet_buffer;
3328 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3329 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3330 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3331 st->interleaver_chunk_size += pkt->size;
3332 st->interleaver_chunk_duration += pkt->duration;
3335 st->interleaver_chunk_size =
3336 st->interleaver_chunk_duration = 0;
3337 this_pktl->pkt.flags |= CHUNK_START;
3341 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3343 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3344 || !compare(s, &(*next_point)->pkt, pkt))){
3345 next_point= &(*next_point)->next;
3350 next_point = &(s->packet_buffer_end->next);
3353 assert(!*next_point);
3355 s->packet_buffer_end= this_pktl;
3358 this_pktl->next= *next_point;
3360 s->streams[pkt->stream_index]->last_in_packet_buffer=
3361 *next_point= this_pktl;
3365 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3367 AVStream *st = s->streams[ pkt ->stream_index];
3368 AVStream *st2= s->streams[ next->stream_index];
3369 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3371 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3372 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3373 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3375 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3376 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3379 comp= (ts>ts2) - (ts<ts2);
3383 return pkt->stream_index < next->stream_index;
3387 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3389 int stream_count=0, noninterleaved_count=0;
3390 int64_t delta_dts_max = 0;
3394 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3399 for(i=0; i < s->nb_streams; i++) {
3400 if (s->streams[i]->last_in_packet_buffer) {
3402 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3403 ++noninterleaved_count;
3407 if (s->nb_streams == stream_count) {
3410 for(i=0; i < s->nb_streams; i++) {
3411 if (s->streams[i]->last_in_packet_buffer) {
3413 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3414 s->streams[i]->time_base,
3416 av_rescale_q(s->packet_buffer->pkt.dts,
3417 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3419 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3422 if(s->nb_streams == stream_count+noninterleaved_count &&
3423 delta_dts_max > 20*AV_TIME_BASE) {
3424 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3428 if(stream_count && flush){
3429 pktl= s->packet_buffer;
3432 s->packet_buffer= pktl->next;
3433 if(!s->packet_buffer)
3434 s->packet_buffer_end= NULL;
3436 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3437 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3441 av_init_packet(out);
3447 * Interleave an AVPacket correctly so it can be muxed.
3448 * @param out the interleaved packet will be output here
3449 * @param in the input packet
3450 * @param flush 1 if no further packets are available as input and all
3451 * remaining packets should be output
3452 * @return 1 if a packet was output, 0 if no packet could be output,
3453 * < 0 if an error occurred
3455 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3456 if(s->oformat->interleave_packet)
3457 return s->oformat->interleave_packet(s, out, in, flush);
3459 return av_interleave_packet_per_dts(s, out, in, flush);
3462 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3463 AVStream *st= s->streams[ pkt->stream_index];
3466 //FIXME/XXX/HACK drop zero sized packets
3467 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3470 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3471 pkt->size, pkt->dts, pkt->pts);
3472 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3475 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3476 return AVERROR(EINVAL);
3480 int ret= interleave_packet(s, &opkt, pkt, 0);
3481 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3484 ret= s->oformat->write_packet(s, &opkt);
3486 s->streams[opkt.stream_index]->nb_frames++;
3488 av_free_packet(&opkt);
3493 if(s->pb && s->pb->error)
3494 return s->pb->error;
3498 int av_write_trailer(AVFormatContext *s)
3504 ret= interleave_packet(s, &pkt, NULL, 1);
3505 if(ret<0) //FIXME cleanup needed for ret<0 ?
3510 ret= s->oformat->write_packet(s, &pkt);
3512 s->streams[pkt.stream_index]->nb_frames++;
3514 av_free_packet(&pkt);
3518 if(s->pb && s->pb->error)
3522 if(s->oformat->write_trailer)
3523 ret = s->oformat->write_trailer(s);
3526 ret = s->pb ? s->pb->error : 0;
3527 for(i=0;i<s->nb_streams;i++) {
3528 av_freep(&s->streams[i]->priv_data);
3529 av_freep(&s->streams[i]->index_entries);
3531 if (s->oformat->priv_class)
3532 av_opt_free(s->priv_data);
3533 av_freep(&s->priv_data);
3537 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3538 int64_t *dts, int64_t *wall)
3540 if (!s->oformat || !s->oformat->get_output_timestamp)
3541 return AVERROR(ENOSYS);
3542 s->oformat->get_output_timestamp(s, stream, dts, wall);
3546 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3549 AVProgram *program=NULL;
3552 if (idx >= ac->nb_streams) {
3553 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3557 for(i=0; i<ac->nb_programs; i++){
3558 if(ac->programs[i]->id != progid)
3560 program = ac->programs[i];
3561 for(j=0; j<program->nb_stream_indexes; j++)
3562 if(program->stream_index[j] == idx)
3565 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3568 program->stream_index = tmp;
3569 program->stream_index[program->nb_stream_indexes++] = idx;
3574 static void print_fps(double d, const char *postfix){
3575 uint64_t v= lrintf(d*100);
3576 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3577 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3578 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3581 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3583 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3584 AVDictionaryEntry *tag=NULL;
3586 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3587 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3588 if(strcmp("language", tag->key)){
3591 av_strlcpy(tmp, tag->value, sizeof(tmp));
3592 for(i=0; i<strlen(tmp); i++) if(tmp[i]==0xd) tmp[i]=' ';
3593 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tmp);
3599 /* "user interface" functions */
3600 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3603 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3604 AVStream *st = ic->streams[i];
3605 int g = av_gcd(st->time_base.num, st->time_base.den);
3606 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3607 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3608 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3609 /* the pid is an important information, so we display it */
3610 /* XXX: add a generic system */
3611 if (flags & AVFMT_SHOW_IDS)
3612 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3614 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3615 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3616 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3617 if (st->sample_aspect_ratio.num && // default
3618 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3619 AVRational display_aspect_ratio;
3620 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3621 st->codec->width*st->sample_aspect_ratio.num,
3622 st->codec->height*st->sample_aspect_ratio.den,
3624 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3625 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3626 display_aspect_ratio.num, display_aspect_ratio.den);
3628 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3629 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3630 print_fps(av_q2d(st->avg_frame_rate), "fps");
3631 if(st->r_frame_rate.den && st->r_frame_rate.num)
3632 print_fps(av_q2d(st->r_frame_rate), "tbr");
3633 if(st->time_base.den && st->time_base.num)
3634 print_fps(1/av_q2d(st->time_base), "tbn");
3635 if(st->codec->time_base.den && st->codec->time_base.num)
3636 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3638 if (st->disposition & AV_DISPOSITION_DEFAULT)
3639 av_log(NULL, AV_LOG_INFO, " (default)");
3640 if (st->disposition & AV_DISPOSITION_DUB)
3641 av_log(NULL, AV_LOG_INFO, " (dub)");
3642 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3643 av_log(NULL, AV_LOG_INFO, " (original)");
3644 if (st->disposition & AV_DISPOSITION_COMMENT)
3645 av_log(NULL, AV_LOG_INFO, " (comment)");
3646 if (st->disposition & AV_DISPOSITION_LYRICS)
3647 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3648 if (st->disposition & AV_DISPOSITION_KARAOKE)
3649 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3650 if (st->disposition & AV_DISPOSITION_FORCED)
3651 av_log(NULL, AV_LOG_INFO, " (forced)");
3652 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3653 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3654 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3655 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3656 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3657 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3658 av_log(NULL, AV_LOG_INFO, "\n");
3659 dump_metadata(NULL, st->metadata, " ");
3662 #if FF_API_DUMP_FORMAT
3663 void dump_format(AVFormatContext *ic,
3668 av_dump_format(ic, index, url, is_output);
3672 void av_dump_format(AVFormatContext *ic,
3678 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3679 if (ic->nb_streams && !printed)
3682 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3683 is_output ? "Output" : "Input",
3685 is_output ? ic->oformat->name : ic->iformat->name,
3686 is_output ? "to" : "from", url);
3687 dump_metadata(NULL, ic->metadata, " ");
3689 av_log(NULL, AV_LOG_INFO, " Duration: ");
3690 if (ic->duration != AV_NOPTS_VALUE) {
3691 int hours, mins, secs, us;
3692 secs = ic->duration / AV_TIME_BASE;
3693 us = ic->duration % AV_TIME_BASE;
3698 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3699 (100 * us) / AV_TIME_BASE);
3701 av_log(NULL, AV_LOG_INFO, "N/A");
3703 if (ic->start_time != AV_NOPTS_VALUE) {
3705 av_log(NULL, AV_LOG_INFO, ", start: ");
3706 secs = ic->start_time / AV_TIME_BASE;
3707 us = abs(ic->start_time % AV_TIME_BASE);
3708 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3709 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3711 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3713 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3715 av_log(NULL, AV_LOG_INFO, "N/A");
3717 av_log(NULL, AV_LOG_INFO, "\n");
3719 for (i = 0; i < ic->nb_chapters; i++) {
3720 AVChapter *ch = ic->chapters[i];
3721 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3722 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3723 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3725 dump_metadata(NULL, ch->metadata, " ");
3727 if(ic->nb_programs) {
3728 int j, k, total = 0;
3729 for(j=0; j<ic->nb_programs; j++) {
3730 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3732 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3733 name ? name->value : "");
3734 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3735 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3736 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3737 printed[ic->programs[j]->stream_index[k]] = 1;
3739 total += ic->programs[j]->nb_stream_indexes;
3741 if (total < ic->nb_streams)
3742 av_log(NULL, AV_LOG_INFO, " No Program\n");
3744 for(i=0;i<ic->nb_streams;i++)
3746 dump_stream_format(ic, i, index, is_output);
3751 int64_t av_gettime(void)
3754 gettimeofday(&tv,NULL);
3755 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3758 uint64_t ff_ntp_time(void)
3760 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3763 #if FF_API_PARSE_DATE
3764 #include "libavutil/parseutils.h"
3766 int64_t parse_date(const char *timestr, int duration)
3769 av_parse_time(&timeval, timestr, duration);
3774 #if FF_API_FIND_INFO_TAG
3775 #include "libavutil/parseutils.h"
3777 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3779 return av_find_info_tag(arg, arg_size, tag1, info);
3783 int av_get_frame_filename(char *buf, int buf_size,
3784 const char *path, int number)
3787 char *q, buf1[20], c;
3788 int nd, len, percentd_found;
3800 while (isdigit(*p)) {
3801 nd = nd * 10 + *p++ - '0';
3804 } while (isdigit(c));
3813 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3815 if ((q - buf + len) > buf_size - 1)
3817 memcpy(q, buf1, len);
3825 if ((q - buf) < buf_size - 1)
3829 if (!percentd_found)
3838 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3842 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3844 for(i=0;i<size;i+=16) {
3851 PRINT(" %02x", buf[i+j]);
3856 for(j=0;j<len;j++) {
3858 if (c < ' ' || c > '~')
3867 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3869 hex_dump_internal(NULL, f, 0, buf, size);
3872 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3874 hex_dump_internal(avcl, NULL, level, buf, size);
3877 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3880 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3881 PRINT("stream #%d:\n", pkt->stream_index);
3882 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3883 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3884 /* DTS is _always_ valid after av_read_frame() */
3886 if (pkt->dts == AV_NOPTS_VALUE)
3889 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3890 /* PTS may not be known if B-frames are present. */
3892 if (pkt->pts == AV_NOPTS_VALUE)
3895 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3897 PRINT(" size=%d\n", pkt->size);
3900 av_hex_dump(f, pkt->data, pkt->size);
3904 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3906 AVRational tb = { 1, AV_TIME_BASE };
3907 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3911 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3913 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3917 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3919 AVRational tb = { 1, AV_TIME_BASE };
3920 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3924 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3927 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3930 void av_url_split(char *proto, int proto_size,
3931 char *authorization, int authorization_size,
3932 char *hostname, int hostname_size,
3934 char *path, int path_size,
3937 const char *p, *ls, *at, *col, *brk;
3939 if (port_ptr) *port_ptr = -1;
3940 if (proto_size > 0) proto[0] = 0;
3941 if (authorization_size > 0) authorization[0] = 0;
3942 if (hostname_size > 0) hostname[0] = 0;
3943 if (path_size > 0) path[0] = 0;
3945 /* parse protocol */
3946 if ((p = strchr(url, ':'))) {
3947 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3952 /* no protocol means plain filename */
3953 av_strlcpy(path, url, path_size);
3957 /* separate path from hostname */
3958 ls = strchr(p, '/');
3960 ls = strchr(p, '?');
3962 av_strlcpy(path, ls, path_size);
3964 ls = &p[strlen(p)]; // XXX
3966 /* the rest is hostname, use that to parse auth/port */
3968 /* authorization (user[:pass]@hostname) */
3969 if ((at = strchr(p, '@')) && at < ls) {
3970 av_strlcpy(authorization, p,
3971 FFMIN(authorization_size, at + 1 - p));
3972 p = at + 1; /* skip '@' */
3975 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3977 av_strlcpy(hostname, p + 1,
3978 FFMIN(hostname_size, brk - p));
3979 if (brk[1] == ':' && port_ptr)
3980 *port_ptr = atoi(brk + 2);
3981 } else if ((col = strchr(p, ':')) && col < ls) {
3982 av_strlcpy(hostname, p,
3983 FFMIN(col + 1 - p, hostname_size));
3984 if (port_ptr) *port_ptr = atoi(col + 1);
3986 av_strlcpy(hostname, p,
3987 FFMIN(ls + 1 - p, hostname_size));
3991 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3994 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3997 'C', 'D', 'E', 'F' };
3998 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4001 'c', 'd', 'e', 'f' };
4002 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4004 for(i = 0; i < s; i++) {
4005 buff[i * 2] = hex_table[src[i] >> 4];
4006 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4012 int ff_hex_to_data(uint8_t *data, const char *p)
4019 p += strspn(p, SPACE_CHARS);
4022 c = toupper((unsigned char) *p++);
4023 if (c >= '0' && c <= '9')
4025 else if (c >= 'A' && c <= 'F')
4040 #if FF_API_SET_PTS_INFO
4041 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4042 unsigned int pts_num, unsigned int pts_den)
4044 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4048 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4049 unsigned int pts_num, unsigned int pts_den)
4052 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4053 if(new_tb.num != pts_num)
4054 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4056 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4058 if(new_tb.num <= 0 || new_tb.den <= 0) {
4059 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
4062 s->time_base = new_tb;
4063 s->pts_wrap_bits = pts_wrap_bits;
4066 int ff_url_join(char *str, int size, const char *proto,
4067 const char *authorization, const char *hostname,
4068 int port, const char *fmt, ...)
4071 struct addrinfo hints, *ai;
4076 av_strlcatf(str, size, "%s://", proto);
4077 if (authorization && authorization[0])
4078 av_strlcatf(str, size, "%s@", authorization);
4079 #if CONFIG_NETWORK && defined(AF_INET6)
4080 /* Determine if hostname is a numerical IPv6 address,
4081 * properly escape it within [] in that case. */
4082 memset(&hints, 0, sizeof(hints));
4083 hints.ai_flags = AI_NUMERICHOST;
4084 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4085 if (ai->ai_family == AF_INET6) {
4086 av_strlcat(str, "[", size);
4087 av_strlcat(str, hostname, size);
4088 av_strlcat(str, "]", size);
4090 av_strlcat(str, hostname, size);
4095 /* Not an IPv6 address, just output the plain string. */
4096 av_strlcat(str, hostname, size);
4099 av_strlcatf(str, size, ":%d", port);
4102 int len = strlen(str);
4105 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4111 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4112 AVFormatContext *src)
4117 local_pkt.stream_index = dst_stream;
4118 if (pkt->pts != AV_NOPTS_VALUE)
4119 local_pkt.pts = av_rescale_q(pkt->pts,
4120 src->streams[pkt->stream_index]->time_base,
4121 dst->streams[dst_stream]->time_base);
4122 if (pkt->dts != AV_NOPTS_VALUE)
4123 local_pkt.dts = av_rescale_q(pkt->dts,
4124 src->streams[pkt->stream_index]->time_base,
4125 dst->streams[dst_stream]->time_base);
4126 return av_write_frame(dst, &local_pkt);
4129 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4132 const char *ptr = str;
4134 /* Parse key=value pairs. */
4137 char *dest = NULL, *dest_end;
4138 int key_len, dest_len = 0;
4140 /* Skip whitespace and potential commas. */
4141 while (*ptr && (isspace(*ptr) || *ptr == ','))
4148 if (!(ptr = strchr(key, '=')))
4151 key_len = ptr - key;
4153 callback_get_buf(context, key, key_len, &dest, &dest_len);
4154 dest_end = dest + dest_len - 1;
4158 while (*ptr && *ptr != '\"') {
4162 if (dest && dest < dest_end)
4166 if (dest && dest < dest_end)
4174 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4175 if (dest && dest < dest_end)
4183 int ff_find_stream_index(AVFormatContext *s, int id)
4186 for (i = 0; i < s->nb_streams; i++) {
4187 if (s->streams[i]->id == id)
4193 void ff_make_absolute_url(char *buf, int size, const char *base,
4197 /* Absolute path, relative to the current server */
4198 if (base && strstr(base, "://") && rel[0] == '/') {
4200 av_strlcpy(buf, base, size);
4201 sep = strstr(buf, "://");
4204 sep = strchr(sep, '/');
4208 av_strlcat(buf, rel, size);
4211 /* If rel actually is an absolute url, just copy it */
4212 if (!base || strstr(rel, "://") || rel[0] == '/') {
4213 av_strlcpy(buf, rel, size);
4217 av_strlcpy(buf, base, size);
4218 /* Remove the file name from the base url */
4219 sep = strrchr(buf, '/');
4224 while (av_strstart(rel, "../", NULL) && sep) {
4225 /* Remove the path delimiter at the end */
4227 sep = strrchr(buf, '/');
4228 /* If the next directory name to pop off is "..", break here */
4229 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4230 /* Readd the slash we just removed */
4231 av_strlcat(buf, "/", size);
4234 /* Cut off the directory name */
4241 av_strlcat(buf, rel, size);
4244 int64_t ff_iso8601_to_unix_time(const char *datestr)
4247 struct tm time1 = {0}, time2 = {0};
4249 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4250 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4252 return av_timegm(&time2);
4254 return av_timegm(&time1);
4256 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4257 "the date string.\n");
4262 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
4265 if (ofmt->query_codec)
4266 return ofmt->query_codec(codec_id, std_compliance);
4267 else if (ofmt->codec_tag)
4268 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4269 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4270 codec_id == ofmt->subtitle_codec)
4273 return AVERROR_PATCHWELCOME;
4276 int avformat_network_init(void)
4280 ff_network_inited_globally = 1;
4281 if ((ret = ff_network_init()) < 0)
4288 int avformat_network_deinit(void)
4297 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4298 uint64_t channel_layout, int32_t sample_rate,
4299 int32_t width, int32_t height)
4305 return AVERROR(EINVAL);
4308 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4310 if (channel_layout) {
4312 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4316 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4318 if (width || height) {
4320 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4322 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4324 return AVERROR(ENOMEM);
4325 bytestream_put_le32(&data, flags);
4327 bytestream_put_le32(&data, channels);
4329 bytestream_put_le64(&data, channel_layout);
4331 bytestream_put_le32(&data, sample_rate);
4332 if (width || height) {
4333 bytestream_put_le32(&data, width);
4334 bytestream_put_le32(&data, height);