2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
36 #include "audiointerleave.h"
50 * various utility functions for use within Libav
53 unsigned avformat_version(void)
55 return LIBAVFORMAT_VERSION_INT;
58 const char *avformat_configuration(void)
60 return LIBAV_CONFIGURATION;
63 const char *avformat_license(void)
65 #define LICENSE_PREFIX "libavformat license: "
66 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
69 /* fraction handling */
72 * f = val + (num / den) + 0.5.
74 * 'num' is normalized so that it is such as 0 <= num < den.
76 * @param f fractional number
77 * @param val integer value
78 * @param num must be >= 0
79 * @param den must be >= 1
81 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
94 * Fractional addition to f: f = f + (incr / f->den).
96 * @param f fractional number
97 * @param incr increment, can be positive or negative
99 static void frac_add(AVFrac *f, int64_t incr)
112 } else if (num >= den) {
119 /** head of registered input format linked list */
120 static AVInputFormat *first_iformat = NULL;
121 /** head of registered output format linked list */
122 static AVOutputFormat *first_oformat = NULL;
124 AVInputFormat *av_iformat_next(AVInputFormat *f)
126 if(f) return f->next;
127 else return first_iformat;
130 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
132 if(f) return f->next;
133 else return first_oformat;
136 void av_register_input_format(AVInputFormat *format)
140 while (*p != NULL) p = &(*p)->next;
145 void av_register_output_format(AVOutputFormat *format)
149 while (*p != NULL) p = &(*p)->next;
154 int av_match_ext(const char *filename, const char *extensions)
162 ext = strrchr(filename, '.');
168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
171 if (!av_strcasecmp(ext1, ext))
181 static int match_format(const char *name, const char *names)
189 namelen = strlen(name);
190 while ((p = strchr(names, ','))) {
191 len = FFMAX(p - names, namelen);
192 if (!av_strncasecmp(name, names, len))
196 return !av_strcasecmp(name, names);
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
202 AVOutputFormat *fmt = NULL, *fmt_found;
203 int score_max, score;
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
213 /* Find the proper file type. */
216 while ((fmt = av_oformat_next(fmt))) {
218 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
222 if (filename && fmt->extensions &&
223 av_match_ext(filename, fmt->extensions)) {
226 if (score > score_max) {
234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
235 const char *filename, const char *mime_type, enum AVMediaType type){
236 if(type == AVMEDIA_TYPE_VIDEO){
237 enum CodecID codec_id= CODEC_ID_NONE;
239 #if CONFIG_IMAGE2_MUXER
240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
241 codec_id= ff_guess_image2_codec(filename);
244 if(codec_id == CODEC_ID_NONE)
245 codec_id= fmt->video_codec;
247 }else if(type == AVMEDIA_TYPE_AUDIO)
248 return fmt->audio_codec;
249 else if (type == AVMEDIA_TYPE_SUBTITLE)
250 return fmt->subtitle_codec;
252 return CODEC_ID_NONE;
255 AVInputFormat *av_find_input_format(const char *short_name)
257 AVInputFormat *fmt = NULL;
258 while ((fmt = av_iformat_next(fmt))) {
259 if (match_format(short_name, fmt->name))
266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
268 int ret= av_new_packet(pkt, size);
273 pkt->pos= avio_tell(s);
275 ret= avio_read(s, pkt->data, size);
279 av_shrink_packet(pkt, ret);
284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
289 return av_get_packet(s, pkt, size);
290 old_size = pkt->size;
291 ret = av_grow_packet(pkt, size);
294 ret = avio_read(s, pkt->data + old_size, size);
295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
300 int av_filename_number_test(const char *filename)
303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
308 AVProbeData lpd = *pd;
309 AVInputFormat *fmt1 = NULL, *fmt;
312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
313 int id3len = ff_id3v2_tag_len(lpd.buf);
314 if (lpd.buf_size > id3len + 16) {
316 lpd.buf_size -= id3len;
322 while ((fmt1 = av_iformat_next(fmt1))) {
323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
326 if (fmt1->read_probe) {
327 score = fmt1->read_probe(&lpd);
328 } else if (fmt1->extensions) {
329 if (av_match_ext(lpd.filename, fmt1->extensions)) {
333 if (score > *score_max) {
336 }else if (score == *score_max)
340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
341 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
342 while ((fmt = av_iformat_next(fmt)))
343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
344 *score_max = AVPROBE_SCORE_MAX/4;
349 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
350 while ((fmt = av_iformat_next(fmt)))
351 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
352 *score_max = AVPROBE_SCORE_MAX/4-1;
360 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
362 return av_probe_input_format2(pd, is_opened, &score);
365 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
367 static const struct {
368 const char *name; enum CodecID id; enum AVMediaType type;
370 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
371 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
372 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
373 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
374 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
375 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
376 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
377 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
380 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
384 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
385 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
386 for (i = 0; fmt_id_type[i].name; i++) {
387 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
388 st->codec->codec_id = fmt_id_type[i].id;
389 st->codec->codec_type = fmt_id_type[i].type;
397 /************************************************************/
398 /* input media file */
400 #if FF_API_FORMAT_PARAMETERS
401 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
404 AVDictionary *opts = NULL;
409 if (ap->time_base.num) {
410 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
411 av_dict_set(&opts, "framerate", buf, 0);
413 if (ap->sample_rate) {
414 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
415 av_dict_set(&opts, "sample_rate", buf, 0);
418 snprintf(buf, sizeof(buf), "%d", ap->channels);
419 av_dict_set(&opts, "channels", buf, 0);
421 if (ap->width || ap->height) {
422 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
423 av_dict_set(&opts, "video_size", buf, 0);
425 if (ap->pix_fmt != PIX_FMT_NONE) {
426 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
429 snprintf(buf, sizeof(buf), "%d", ap->channel);
430 av_dict_set(&opts, "channel", buf, 0);
433 av_dict_set(&opts, "standard", ap->standard, 0);
435 if (ap->mpeg2ts_compute_pcr) {
436 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
438 if (ap->initial_pause) {
439 av_dict_set(&opts, "initial_pause", "1", 0);
445 * Open a media file from an IO stream. 'fmt' must be specified.
447 int av_open_input_stream(AVFormatContext **ic_ptr,
448 AVIOContext *pb, const char *filename,
449 AVInputFormat *fmt, AVFormatParameters *ap)
454 AVFormatParameters default_ap;
458 memset(ap, 0, sizeof(default_ap));
460 opts = convert_format_parameters(ap);
462 if(!ap->prealloced_context)
463 ic = avformat_alloc_context();
467 err = AVERROR(ENOMEM);
470 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
471 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
472 "will be ignored with AVFMT_NOFILE format.\n");
476 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
478 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
487 /** size of probe buffer, for guessing file type from file contents */
488 #define PROBE_BUF_MIN 2048
489 #define PROBE_BUF_MAX (1<<20)
491 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
492 const char *filename, void *logctx,
493 unsigned int offset, unsigned int max_probe_size)
495 AVProbeData pd = { filename ? filename : "", NULL, -offset };
496 unsigned char *buf = NULL;
497 int ret = 0, probe_size;
499 if (!max_probe_size) {
500 max_probe_size = PROBE_BUF_MAX;
501 } else if (max_probe_size > PROBE_BUF_MAX) {
502 max_probe_size = PROBE_BUF_MAX;
503 } else if (max_probe_size < PROBE_BUF_MIN) {
504 return AVERROR(EINVAL);
507 if (offset >= max_probe_size) {
508 return AVERROR(EINVAL);
511 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
512 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
513 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
514 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
516 if (probe_size < offset) {
520 /* read probe data */
521 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
522 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
523 /* fail if error was not end of file, otherwise, lower score */
524 if (ret != AVERROR_EOF) {
529 ret = 0; /* error was end of file, nothing read */
532 pd.buf = &buf[offset];
534 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
536 /* guess file format */
537 *fmt = av_probe_input_format2(&pd, 1, &score);
539 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
540 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
542 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
548 return AVERROR_INVALIDDATA;
551 /* rewind. reuse probe buffer to avoid seeking */
552 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
558 #if FF_API_FORMAT_PARAMETERS
559 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
562 AVFormatParameters *ap)
565 AVDictionary *opts = convert_format_parameters(ap);
567 if (!ap || !ap->prealloced_context)
570 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
577 /* open input file and probe the format if necessary */
578 static int init_input(AVFormatContext *s, const char *filename)
581 AVProbeData pd = {filename, NULL, 0};
584 s->flags |= AVFMT_FLAG_CUSTOM_IO;
586 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
587 else if (s->iformat->flags & AVFMT_NOFILE)
588 return AVERROR(EINVAL);
592 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
593 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
596 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
600 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
603 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
605 AVFormatContext *s = *ps;
607 AVFormatParameters ap = { { 0 } };
608 AVDictionary *tmp = NULL;
610 if (!s && !(s = avformat_alloc_context()))
611 return AVERROR(ENOMEM);
616 av_dict_copy(&tmp, *options, 0);
618 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
621 if ((ret = init_input(s, filename)) < 0)
624 /* check filename in case an image number is expected */
625 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
626 if (!av_filename_number_test(filename)) {
627 ret = AVERROR(EINVAL);
632 s->duration = s->start_time = AV_NOPTS_VALUE;
633 av_strlcpy(s->filename, filename, sizeof(s->filename));
635 /* allocate private data */
636 if (s->iformat->priv_data_size > 0) {
637 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
638 ret = AVERROR(ENOMEM);
641 if (s->iformat->priv_class) {
642 *(const AVClass**)s->priv_data = s->iformat->priv_class;
643 av_opt_set_defaults(s->priv_data);
644 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
649 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
651 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
653 if (s->iformat->read_header)
654 if ((ret = s->iformat->read_header(s, &ap)) < 0)
657 if (s->pb && !s->data_offset)
658 s->data_offset = avio_tell(s->pb);
660 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
663 av_dict_free(options);
671 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
673 avformat_free_context(s);
678 /*******************************************************/
680 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
681 AVPacketList **plast_pktl){
682 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
687 (*plast_pktl)->next = pktl;
689 *packet_buffer = pktl;
691 /* add the packet in the buffered packet list */
697 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
703 AVPacketList *pktl = s->raw_packet_buffer;
707 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
708 !s->streams[pkt->stream_index]->probe_packets ||
709 s->raw_packet_buffer_remaining_size < pkt->size){
710 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
713 s->raw_packet_buffer = pktl->next;
714 s->raw_packet_buffer_remaining_size += pkt->size;
721 ret= s->iformat->read_packet(s, pkt);
723 if (!pktl || ret == AVERROR(EAGAIN))
725 for (i = 0; i < s->nb_streams; i++)
726 s->streams[i]->probe_packets = 0;
730 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
731 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
732 av_log(s, AV_LOG_WARNING,
733 "Dropped corrupted packet (stream = %d)\n",
739 st= s->streams[pkt->stream_index];
741 switch(st->codec->codec_type){
742 case AVMEDIA_TYPE_VIDEO:
743 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
745 case AVMEDIA_TYPE_AUDIO:
746 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
748 case AVMEDIA_TYPE_SUBTITLE:
749 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
753 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
757 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
758 s->raw_packet_buffer_remaining_size -= pkt->size;
760 if(st->codec->codec_id == CODEC_ID_PROBE){
761 AVProbeData *pd = &st->probe_data;
762 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
765 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
766 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
767 pd->buf_size += pkt->size;
768 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
770 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
771 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
772 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
773 if(st->codec->codec_id != CODEC_ID_PROBE){
776 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
783 /**********************************************************/
786 * Get the number of samples of an audio frame. Return -1 on error.
788 static int get_audio_frame_size(AVCodecContext *enc, int size)
792 if(enc->codec_id == CODEC_ID_VORBIS)
795 if (enc->frame_size <= 1) {
796 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
798 if (bits_per_sample) {
799 if (enc->channels == 0)
801 frame_size = (size << 3) / (bits_per_sample * enc->channels);
803 /* used for example by ADPCM codecs */
804 if (enc->bit_rate == 0)
806 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
809 frame_size = enc->frame_size;
816 * Return the frame duration in seconds. Return 0 if not available.
818 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
819 AVCodecParserContext *pc, AVPacket *pkt)
825 switch(st->codec->codec_type) {
826 case AVMEDIA_TYPE_VIDEO:
827 if(st->time_base.num*1000LL > st->time_base.den){
828 *pnum = st->time_base.num;
829 *pden = st->time_base.den;
830 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
831 *pnum = st->codec->time_base.num;
832 *pden = st->codec->time_base.den;
833 if (pc && pc->repeat_pict) {
834 *pnum = (*pnum) * (1 + pc->repeat_pict);
836 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
837 //Thus if we have no parser in such case leave duration undefined.
838 if(st->codec->ticks_per_frame>1 && !pc){
843 case AVMEDIA_TYPE_AUDIO:
844 frame_size = get_audio_frame_size(st->codec, pkt->size);
845 if (frame_size <= 0 || st->codec->sample_rate <= 0)
848 *pden = st->codec->sample_rate;
855 static int is_intra_only(AVCodecContext *enc){
856 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
858 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
859 switch(enc->codec_id){
861 case CODEC_ID_MJPEGB:
863 case CODEC_ID_PRORES:
864 case CODEC_ID_RAWVIDEO:
865 case CODEC_ID_DVVIDEO:
866 case CODEC_ID_HUFFYUV:
867 case CODEC_ID_FFVHUFF:
872 case CODEC_ID_JPEG2000:
880 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
881 int64_t dts, int64_t pts)
883 AVStream *st= s->streams[stream_index];
884 AVPacketList *pktl= s->packet_buffer;
886 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
889 st->first_dts= dts - st->cur_dts;
892 for(; pktl; pktl= pktl->next){
893 if(pktl->pkt.stream_index != stream_index)
895 //FIXME think more about this check
896 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
897 pktl->pkt.pts += st->first_dts;
899 if(pktl->pkt.dts != AV_NOPTS_VALUE)
900 pktl->pkt.dts += st->first_dts;
902 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
903 st->start_time= pktl->pkt.pts;
905 if (st->start_time == AV_NOPTS_VALUE)
906 st->start_time = pts;
909 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
911 AVPacketList *pktl= s->packet_buffer;
914 if(st->first_dts != AV_NOPTS_VALUE){
915 cur_dts= st->first_dts;
916 for(; pktl; pktl= pktl->next){
917 if(pktl->pkt.stream_index == pkt->stream_index){
918 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
920 cur_dts -= pkt->duration;
923 pktl= s->packet_buffer;
924 st->first_dts = cur_dts;
925 }else if(st->cur_dts)
928 for(; pktl; pktl= pktl->next){
929 if(pktl->pkt.stream_index != pkt->stream_index)
931 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
932 && !pktl->pkt.duration){
933 pktl->pkt.dts= cur_dts;
934 if(!st->codec->has_b_frames)
935 pktl->pkt.pts= cur_dts;
936 cur_dts += pkt->duration;
937 pktl->pkt.duration= pkt->duration;
941 if(st->first_dts == AV_NOPTS_VALUE)
942 st->cur_dts= cur_dts;
945 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
946 AVCodecParserContext *pc, AVPacket *pkt)
948 int num, den, presentation_delayed, delay, i;
951 if (s->flags & AVFMT_FLAG_NOFILLIN)
954 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
955 pkt->dts= AV_NOPTS_VALUE;
957 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
958 //FIXME Set low_delay = 0 when has_b_frames = 1
959 st->codec->has_b_frames = 1;
961 /* do we have a video B-frame ? */
962 delay= st->codec->has_b_frames;
963 presentation_delayed = 0;
965 // ignore delay caused by frame threading so that the mpeg2-without-dts
966 // warning will not trigger
967 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
968 delay -= st->codec->thread_count-1;
970 /* XXX: need has_b_frame, but cannot get it if the codec is
973 pc && pc->pict_type != AV_PICTURE_TYPE_B)
974 presentation_delayed = 1;
976 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
977 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
978 pkt->dts -= 1LL<<st->pts_wrap_bits;
981 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
982 // we take the conservative approach and discard both
983 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
984 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
985 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
986 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
989 if (pkt->duration == 0) {
990 compute_frame_duration(&num, &den, st, pc, pkt);
992 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
994 if(pkt->duration != 0 && s->packet_buffer)
995 update_initial_durations(s, st, pkt);
999 /* correct timestamps with byte offset if demuxers only have timestamps
1000 on packet boundaries */
1001 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1002 /* this will estimate bitrate based on this frame's duration and size */
1003 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1004 if(pkt->pts != AV_NOPTS_VALUE)
1006 if(pkt->dts != AV_NOPTS_VALUE)
1010 if (pc && pc->dts_sync_point >= 0) {
1011 // we have synchronization info from the parser
1012 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1014 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1015 if (pkt->dts != AV_NOPTS_VALUE) {
1016 // got DTS from the stream, update reference timestamp
1017 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1018 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1019 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1020 // compute DTS based on reference timestamp
1021 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1022 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1024 if (pc->dts_sync_point > 0)
1025 st->reference_dts = pkt->dts; // new reference
1029 /* This may be redundant, but it should not hurt. */
1030 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1031 presentation_delayed = 1;
1033 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1034 /* interpolate PTS and DTS if they are not present */
1035 //We skip H264 currently because delay and has_b_frames are not reliably set
1036 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1037 if (presentation_delayed) {
1038 /* DTS = decompression timestamp */
1039 /* PTS = presentation timestamp */
1040 if (pkt->dts == AV_NOPTS_VALUE)
1041 pkt->dts = st->last_IP_pts;
1042 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1043 if (pkt->dts == AV_NOPTS_VALUE)
1044 pkt->dts = st->cur_dts;
1046 /* this is tricky: the dts must be incremented by the duration
1047 of the frame we are displaying, i.e. the last I- or P-frame */
1048 if (st->last_IP_duration == 0)
1049 st->last_IP_duration = pkt->duration;
1050 if(pkt->dts != AV_NOPTS_VALUE)
1051 st->cur_dts = pkt->dts + st->last_IP_duration;
1052 st->last_IP_duration = pkt->duration;
1053 st->last_IP_pts= pkt->pts;
1054 /* cannot compute PTS if not present (we can compute it only
1055 by knowing the future */
1056 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1057 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1058 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1059 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1060 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1061 pkt->pts += pkt->duration;
1062 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1066 /* presentation is not delayed : PTS and DTS are the same */
1067 if(pkt->pts == AV_NOPTS_VALUE)
1068 pkt->pts = pkt->dts;
1069 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1070 if(pkt->pts == AV_NOPTS_VALUE)
1071 pkt->pts = st->cur_dts;
1072 pkt->dts = pkt->pts;
1073 if(pkt->pts != AV_NOPTS_VALUE)
1074 st->cur_dts = pkt->pts + pkt->duration;
1078 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1079 st->pts_buffer[0]= pkt->pts;
1080 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1081 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1082 if(pkt->dts == AV_NOPTS_VALUE)
1083 pkt->dts= st->pts_buffer[0];
1084 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1085 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1087 if(pkt->dts > st->cur_dts)
1088 st->cur_dts = pkt->dts;
1091 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1094 if(is_intra_only(st->codec))
1095 pkt->flags |= AV_PKT_FLAG_KEY;
1098 /* keyframe computation */
1099 if (pc->key_frame == 1)
1100 pkt->flags |= AV_PKT_FLAG_KEY;
1101 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1102 pkt->flags |= AV_PKT_FLAG_KEY;
1105 pkt->convergence_duration = pc->convergence_duration;
1109 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1114 av_init_packet(pkt);
1117 /* select current input stream component */
1120 if (!st->need_parsing || !st->parser) {
1121 /* no parsing needed: we just output the packet as is */
1122 /* raw data support */
1123 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1124 compute_pkt_fields(s, st, NULL, pkt);
1126 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1127 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1128 ff_reduce_index(s, st->index);
1129 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1132 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1133 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1134 st->cur_ptr, st->cur_len,
1135 st->cur_pkt.pts, st->cur_pkt.dts,
1137 st->cur_pkt.pts = AV_NOPTS_VALUE;
1138 st->cur_pkt.dts = AV_NOPTS_VALUE;
1139 /* increment read pointer */
1143 /* return packet if any */
1147 pkt->stream_index = st->index;
1148 pkt->pts = st->parser->pts;
1149 pkt->dts = st->parser->dts;
1150 pkt->pos = st->parser->pos;
1151 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1153 pkt->destruct= st->cur_pkt.destruct;
1154 st->cur_pkt.destruct= NULL;
1155 st->cur_pkt.data = NULL;
1156 assert(st->cur_len == 0);
1158 pkt->destruct = NULL;
1160 compute_pkt_fields(s, st, st->parser, pkt);
1162 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1163 ff_reduce_index(s, st->index);
1164 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1165 0, 0, AVINDEX_KEYFRAME);
1172 av_free_packet(&st->cur_pkt);
1177 /* read next packet */
1178 ret = av_read_packet(s, &cur_pkt);
1180 if (ret == AVERROR(EAGAIN))
1182 /* return the last frames, if any */
1183 for(i = 0; i < s->nb_streams; i++) {
1185 if (st->parser && st->need_parsing) {
1186 av_parser_parse2(st->parser, st->codec,
1187 &pkt->data, &pkt->size,
1189 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1195 /* no more packets: really terminate parsing */
1198 st = s->streams[cur_pkt.stream_index];
1199 st->cur_pkt= cur_pkt;
1201 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1202 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1203 st->cur_pkt.pts < st->cur_pkt.dts){
1204 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1205 st->cur_pkt.stream_index,
1209 // av_free_packet(&st->cur_pkt);
1213 if(s->debug & FF_FDEBUG_TS)
1214 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1215 st->cur_pkt.stream_index,
1219 st->cur_pkt.duration,
1223 st->cur_ptr = st->cur_pkt.data;
1224 st->cur_len = st->cur_pkt.size;
1225 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1226 st->parser = av_parser_init(st->codec->codec_id);
1228 /* no parser available: just output the raw packets */
1229 st->need_parsing = AVSTREAM_PARSE_NONE;
1230 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1231 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1232 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1233 st->parser->flags |= PARSER_FLAG_ONCE;
1238 if(s->debug & FF_FDEBUG_TS)
1239 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1250 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1254 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1257 pktl = s->packet_buffer;
1259 AVPacket *next_pkt= &pktl->pkt;
1261 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1262 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1263 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1264 if( pktl->pkt.stream_index == next_pkt->stream_index
1265 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1266 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1267 next_pkt->pts= pktl->pkt.dts;
1271 pktl = s->packet_buffer;
1274 if( next_pkt->pts != AV_NOPTS_VALUE
1275 || next_pkt->dts == AV_NOPTS_VALUE
1277 /* read packet from packet buffer, if there is data */
1279 s->packet_buffer = pktl->next;
1285 int ret= read_frame_internal(s, pkt);
1287 if(pktl && ret != AVERROR(EAGAIN)){
1294 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1295 &s->packet_buffer_end)) < 0)
1296 return AVERROR(ENOMEM);
1298 assert(!s->packet_buffer);
1299 return read_frame_internal(s, pkt);
1304 /* XXX: suppress the packet queue */
1305 static void flush_packet_queue(AVFormatContext *s)
1310 pktl = s->packet_buffer;
1313 s->packet_buffer = pktl->next;
1314 av_free_packet(&pktl->pkt);
1317 while(s->raw_packet_buffer){
1318 pktl = s->raw_packet_buffer;
1319 s->raw_packet_buffer = pktl->next;
1320 av_free_packet(&pktl->pkt);
1323 s->packet_buffer_end=
1324 s->raw_packet_buffer_end= NULL;
1325 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1328 /*******************************************************/
1331 int av_find_default_stream_index(AVFormatContext *s)
1333 int first_audio_index = -1;
1337 if (s->nb_streams <= 0)
1339 for(i = 0; i < s->nb_streams; i++) {
1341 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1344 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1345 first_audio_index = i;
1347 return first_audio_index >= 0 ? first_audio_index : 0;
1351 * Flush the frame reader.
1353 void ff_read_frame_flush(AVFormatContext *s)
1358 flush_packet_queue(s);
1362 /* for each stream, reset read state */
1363 for(i = 0; i < s->nb_streams; i++) {
1367 av_parser_close(st->parser);
1369 av_free_packet(&st->cur_pkt);
1371 st->last_IP_pts = AV_NOPTS_VALUE;
1372 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1373 st->reference_dts = AV_NOPTS_VALUE;
1378 st->probe_packets = MAX_PROBE_PACKETS;
1380 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1381 st->pts_buffer[j]= AV_NOPTS_VALUE;
1385 #if FF_API_SEEK_PUBLIC
1386 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1388 ff_update_cur_dts(s, ref_st, timestamp);
1392 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1396 for(i = 0; i < s->nb_streams; i++) {
1397 AVStream *st = s->streams[i];
1399 st->cur_dts = av_rescale(timestamp,
1400 st->time_base.den * (int64_t)ref_st->time_base.num,
1401 st->time_base.num * (int64_t)ref_st->time_base.den);
1405 void ff_reduce_index(AVFormatContext *s, int stream_index)
1407 AVStream *st= s->streams[stream_index];
1408 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1410 if((unsigned)st->nb_index_entries >= max_entries){
1412 for(i=0; 2*i<st->nb_index_entries; i++)
1413 st->index_entries[i]= st->index_entries[2*i];
1414 st->nb_index_entries= i;
1418 int ff_add_index_entry(AVIndexEntry **index_entries,
1419 int *nb_index_entries,
1420 unsigned int *index_entries_allocated_size,
1421 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1423 AVIndexEntry *entries, *ie;
1426 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1429 entries = av_fast_realloc(*index_entries,
1430 index_entries_allocated_size,
1431 (*nb_index_entries + 1) *
1432 sizeof(AVIndexEntry));
1436 *index_entries= entries;
1438 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1441 index= (*nb_index_entries)++;
1442 ie= &entries[index];
1443 assert(index==0 || ie[-1].timestamp < timestamp);
1445 ie= &entries[index];
1446 if(ie->timestamp != timestamp){
1447 if(ie->timestamp <= timestamp)
1449 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1450 (*nb_index_entries)++;
1451 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1452 distance= ie->min_distance;
1456 ie->timestamp = timestamp;
1457 ie->min_distance= distance;
1464 int av_add_index_entry(AVStream *st,
1465 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1467 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1468 &st->index_entries_allocated_size, pos,
1469 timestamp, size, distance, flags);
1472 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1473 int64_t wanted_timestamp, int flags)
1481 //optimize appending index entries at the end
1482 if(b && entries[b-1].timestamp < wanted_timestamp)
1487 timestamp = entries[m].timestamp;
1488 if(timestamp >= wanted_timestamp)
1490 if(timestamp <= wanted_timestamp)
1493 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1495 if(!(flags & AVSEEK_FLAG_ANY)){
1496 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1497 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1506 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1509 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1510 wanted_timestamp, flags);
1513 #if FF_API_SEEK_PUBLIC
1514 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1515 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1519 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1521 AVInputFormat *avif= s->iformat;
1522 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1523 int64_t ts_min, ts_max, ts;
1528 if (stream_index < 0)
1531 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1534 ts_min= AV_NOPTS_VALUE;
1535 pos_limit= -1; //gcc falsely says it may be uninitialized
1537 st= s->streams[stream_index];
1538 if(st->index_entries){
1541 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1542 index= FFMAX(index, 0);
1543 e= &st->index_entries[index];
1545 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1547 ts_min= e->timestamp;
1548 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1554 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1555 assert(index < st->nb_index_entries);
1557 e= &st->index_entries[index];
1558 assert(e->timestamp >= target_ts);
1560 ts_max= e->timestamp;
1561 pos_limit= pos_max - e->min_distance;
1562 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1563 pos_max,pos_limit, ts_max);
1567 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1572 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1575 ff_update_cur_dts(s, st, ts);
1580 #if FF_API_SEEK_PUBLIC
1581 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1582 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1583 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1584 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1586 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1587 pos_limit, ts_min, ts_max, flags, ts_ret,
1592 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1593 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1594 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1595 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1598 int64_t start_pos, filesize;
1601 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1603 if(ts_min == AV_NOPTS_VALUE){
1604 pos_min = s->data_offset;
1605 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1606 if (ts_min == AV_NOPTS_VALUE)
1610 if(ts_max == AV_NOPTS_VALUE){
1612 filesize = avio_size(s->pb);
1613 pos_max = filesize - 1;
1616 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1618 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1619 if (ts_max == AV_NOPTS_VALUE)
1623 int64_t tmp_pos= pos_max + 1;
1624 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1625 if(tmp_ts == AV_NOPTS_VALUE)
1629 if(tmp_pos >= filesize)
1635 if(ts_min > ts_max){
1637 }else if(ts_min == ts_max){
1642 while (pos_min < pos_limit) {
1643 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1644 pos_min, pos_max, ts_min, ts_max);
1645 assert(pos_limit <= pos_max);
1648 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1649 // interpolate position (better than dichotomy)
1650 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1651 + pos_min - approximate_keyframe_distance;
1652 }else if(no_change==1){
1653 // bisection, if interpolation failed to change min or max pos last time
1654 pos = (pos_min + pos_limit)>>1;
1656 /* linear search if bisection failed, can only happen if there
1657 are very few or no keyframes between min/max */
1662 else if(pos > pos_limit)
1666 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1671 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1672 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1673 pos_limit, start_pos, no_change);
1674 if(ts == AV_NOPTS_VALUE){
1675 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1678 assert(ts != AV_NOPTS_VALUE);
1679 if (target_ts <= ts) {
1680 pos_limit = start_pos - 1;
1684 if (target_ts >= ts) {
1690 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1691 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1693 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1695 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1696 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1697 pos, ts_min, target_ts, ts_max);
1702 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1703 int64_t pos_min, pos_max;
1707 if (stream_index < 0)
1710 st= s->streams[stream_index];
1713 pos_min = s->data_offset;
1714 pos_max = avio_size(s->pb) - 1;
1716 if (pos < pos_min) pos= pos_min;
1717 else if(pos > pos_max) pos= pos_max;
1719 avio_seek(s->pb, pos, SEEK_SET);
1722 av_update_cur_dts(s, st, ts);
1727 static int seek_frame_generic(AVFormatContext *s,
1728 int stream_index, int64_t timestamp, int flags)
1735 st = s->streams[stream_index];
1737 index = av_index_search_timestamp(st, timestamp, flags);
1739 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1742 if(index < 0 || index==st->nb_index_entries-1){
1745 if(st->nb_index_entries){
1746 assert(st->index_entries);
1747 ie= &st->index_entries[st->nb_index_entries-1];
1748 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1750 ff_update_cur_dts(s, st, ie->timestamp);
1752 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1758 read_status = av_read_frame(s, &pkt);
1759 } while (read_status == AVERROR(EAGAIN));
1760 if (read_status < 0)
1762 av_free_packet(&pkt);
1763 if(stream_index == pkt.stream_index){
1764 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1768 index = av_index_search_timestamp(st, timestamp, flags);
1773 ff_read_frame_flush(s);
1774 if (s->iformat->read_seek){
1775 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1778 ie = &st->index_entries[index];
1779 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1781 ff_update_cur_dts(s, st, ie->timestamp);
1786 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1791 if (flags & AVSEEK_FLAG_BYTE) {
1792 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1794 ff_read_frame_flush(s);
1795 return seek_frame_byte(s, stream_index, timestamp, flags);
1798 if(stream_index < 0){
1799 stream_index= av_find_default_stream_index(s);
1800 if(stream_index < 0)
1803 st= s->streams[stream_index];
1804 /* timestamp for default must be expressed in AV_TIME_BASE units */
1805 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1808 /* first, we try the format specific seek */
1809 if (s->iformat->read_seek) {
1810 ff_read_frame_flush(s);
1811 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1818 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1819 ff_read_frame_flush(s);
1820 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1821 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1822 ff_read_frame_flush(s);
1823 return seek_frame_generic(s, stream_index, timestamp, flags);
1829 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1831 if(min_ts > ts || max_ts < ts)
1834 if (s->iformat->read_seek2) {
1835 ff_read_frame_flush(s);
1836 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1839 if(s->iformat->read_timestamp){
1840 //try to seek via read_timestamp()
1843 //Fallback to old API if new is not implemented but old is
1844 //Note the old has somewat different sematics
1845 if(s->iformat->read_seek || 1)
1846 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1848 // try some generic seek like seek_frame_generic() but with new ts semantics
1851 /*******************************************************/
1854 * Return TRUE if the stream has accurate duration in any stream.
1856 * @return TRUE if the stream has accurate duration for at least one component.
1858 static int has_duration(AVFormatContext *ic)
1863 for(i = 0;i < ic->nb_streams; i++) {
1864 st = ic->streams[i];
1865 if (st->duration != AV_NOPTS_VALUE)
1872 * Estimate the stream timings from the one of each components.
1874 * Also computes the global bitrate if possible.
1876 static void update_stream_timings(AVFormatContext *ic)
1878 int64_t start_time, start_time1, end_time, end_time1;
1879 int64_t duration, duration1, filesize;
1883 start_time = INT64_MAX;
1884 end_time = INT64_MIN;
1885 duration = INT64_MIN;
1886 for(i = 0;i < ic->nb_streams; i++) {
1887 st = ic->streams[i];
1888 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1889 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1890 start_time = FFMIN(start_time, start_time1);
1891 if (st->duration != AV_NOPTS_VALUE) {
1892 end_time1 = start_time1
1893 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1894 end_time = FFMAX(end_time, end_time1);
1897 if (st->duration != AV_NOPTS_VALUE) {
1898 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1899 duration = FFMAX(duration, duration1);
1902 if (start_time != INT64_MAX) {
1903 ic->start_time = start_time;
1904 if (end_time != INT64_MIN)
1905 duration = FFMAX(duration, end_time - start_time);
1907 if (duration != INT64_MIN) {
1908 ic->duration = duration;
1909 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1910 /* compute the bitrate */
1911 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1912 (double)ic->duration;
1917 static void fill_all_stream_timings(AVFormatContext *ic)
1922 update_stream_timings(ic);
1923 for(i = 0;i < ic->nb_streams; i++) {
1924 st = ic->streams[i];
1925 if (st->start_time == AV_NOPTS_VALUE) {
1926 if(ic->start_time != AV_NOPTS_VALUE)
1927 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1928 if(ic->duration != AV_NOPTS_VALUE)
1929 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1934 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1936 int64_t filesize, duration;
1940 /* if bit_rate is already set, we believe it */
1941 if (ic->bit_rate <= 0) {
1943 for(i=0;i<ic->nb_streams;i++) {
1944 st = ic->streams[i];
1945 if (st->codec->bit_rate > 0)
1946 bit_rate += st->codec->bit_rate;
1948 ic->bit_rate = bit_rate;
1951 /* if duration is already set, we believe it */
1952 if (ic->duration == AV_NOPTS_VALUE &&
1953 ic->bit_rate != 0) {
1954 filesize = ic->pb ? avio_size(ic->pb) : 0;
1956 for(i = 0; i < ic->nb_streams; i++) {
1957 st = ic->streams[i];
1958 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1959 if (st->duration == AV_NOPTS_VALUE)
1960 st->duration = duration;
1966 #define DURATION_MAX_READ_SIZE 250000
1967 #define DURATION_MAX_RETRY 3
1969 /* only usable for MPEG-PS streams */
1970 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1972 AVPacket pkt1, *pkt = &pkt1;
1974 int read_size, i, ret;
1976 int64_t filesize, offset, duration;
1981 /* flush packet queue */
1982 flush_packet_queue(ic);
1984 for (i=0; i<ic->nb_streams; i++) {
1985 st = ic->streams[i];
1986 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1987 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1990 av_parser_close(st->parser);
1992 av_free_packet(&st->cur_pkt);
1996 /* estimate the end time (duration) */
1997 /* XXX: may need to support wrapping */
1998 filesize = ic->pb ? avio_size(ic->pb) : 0;
1999 end_time = AV_NOPTS_VALUE;
2001 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2005 avio_seek(ic->pb, offset, SEEK_SET);
2008 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2012 ret = av_read_packet(ic, pkt);
2013 } while(ret == AVERROR(EAGAIN));
2016 read_size += pkt->size;
2017 st = ic->streams[pkt->stream_index];
2018 if (pkt->pts != AV_NOPTS_VALUE &&
2019 (st->start_time != AV_NOPTS_VALUE ||
2020 st->first_dts != AV_NOPTS_VALUE)) {
2021 duration = end_time = pkt->pts;
2022 if (st->start_time != AV_NOPTS_VALUE)
2023 duration -= st->start_time;
2025 duration -= st->first_dts;
2027 duration += 1LL<<st->pts_wrap_bits;
2029 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2030 st->duration = duration;
2033 av_free_packet(pkt);
2035 }while( end_time==AV_NOPTS_VALUE
2036 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2037 && ++retry <= DURATION_MAX_RETRY);
2039 fill_all_stream_timings(ic);
2041 avio_seek(ic->pb, old_offset, SEEK_SET);
2042 for (i=0; i<ic->nb_streams; i++) {
2044 st->cur_dts= st->first_dts;
2045 st->last_IP_pts = AV_NOPTS_VALUE;
2046 st->reference_dts = AV_NOPTS_VALUE;
2050 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2054 /* get the file size, if possible */
2055 if (ic->iformat->flags & AVFMT_NOFILE) {
2058 file_size = avio_size(ic->pb);
2059 file_size = FFMAX(0, file_size);
2062 if ((!strcmp(ic->iformat->name, "mpeg") ||
2063 !strcmp(ic->iformat->name, "mpegts")) &&
2064 file_size && ic->pb->seekable) {
2065 /* get accurate estimate from the PTSes */
2066 estimate_timings_from_pts(ic, old_offset);
2067 } else if (has_duration(ic)) {
2068 /* at least one component has timings - we use them for all
2070 fill_all_stream_timings(ic);
2072 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2073 /* less precise: use bitrate info */
2074 estimate_timings_from_bit_rate(ic);
2076 update_stream_timings(ic);
2080 AVStream av_unused *st;
2081 for(i = 0;i < ic->nb_streams; i++) {
2082 st = ic->streams[i];
2083 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2084 (double) st->start_time / AV_TIME_BASE,
2085 (double) st->duration / AV_TIME_BASE);
2087 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2088 (double) ic->start_time / AV_TIME_BASE,
2089 (double) ic->duration / AV_TIME_BASE,
2090 ic->bit_rate / 1000);
2094 static int has_codec_parameters(AVCodecContext *avctx)
2097 switch (avctx->codec_type) {
2098 case AVMEDIA_TYPE_AUDIO:
2099 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2100 if (!avctx->frame_size &&
2101 (avctx->codec_id == CODEC_ID_VORBIS ||
2102 avctx->codec_id == CODEC_ID_AAC ||
2103 avctx->codec_id == CODEC_ID_MP1 ||
2104 avctx->codec_id == CODEC_ID_MP2 ||
2105 avctx->codec_id == CODEC_ID_MP3 ||
2106 avctx->codec_id == CODEC_ID_CELT))
2109 case AVMEDIA_TYPE_VIDEO:
2110 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2116 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2119 static int has_decode_delay_been_guessed(AVStream *st)
2121 return st->codec->codec_id != CODEC_ID_H264 ||
2122 st->info->nb_decoded_frames >= 6;
2125 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2129 int got_picture, data_size, ret=0;
2132 if(!st->codec->codec){
2133 codec = avcodec_find_decoder(st->codec->codec_id);
2136 ret = avcodec_open2(st->codec, codec, options);
2141 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
2142 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
2143 switch(st->codec->codec_type) {
2144 case AVMEDIA_TYPE_VIDEO:
2145 avcodec_get_frame_defaults(&picture);
2146 ret = avcodec_decode_video2(st->codec, &picture,
2147 &got_picture, avpkt);
2149 st->info->nb_decoded_frames++;
2151 case AVMEDIA_TYPE_AUDIO:
2152 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2153 samples = av_malloc(data_size);
2156 ret = avcodec_decode_audio3(st->codec, samples,
2168 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2170 while (tags->id != CODEC_ID_NONE) {
2178 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2181 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2182 if(tag == tags[i].tag)
2185 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2186 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2189 return CODEC_ID_NONE;
2192 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2195 for(i=0; tags && tags[i]; i++){
2196 int tag= ff_codec_get_tag(tags[i], id);
2202 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2205 for(i=0; tags && tags[i]; i++){
2206 enum CodecID id= ff_codec_get_id(tags[i], tag);
2207 if(id!=CODEC_ID_NONE) return id;
2209 return CODEC_ID_NONE;
2212 static void compute_chapters_end(AVFormatContext *s)
2215 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2217 for (i = 0; i < s->nb_chapters; i++)
2218 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2219 AVChapter *ch = s->chapters[i];
2220 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2223 for (j = 0; j < s->nb_chapters; j++) {
2224 AVChapter *ch1 = s->chapters[j];
2225 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2226 if (j != i && next_start > ch->start && next_start < end)
2229 ch->end = (end == INT64_MAX) ? ch->start : end;
2233 static int get_std_framerate(int i){
2234 if(i<60*12) return i*1001;
2235 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2239 * Is the time base unreliable.
2240 * This is a heuristic to balance between quick acceptance of the values in
2241 * the headers vs. some extra checks.
2242 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2243 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2244 * And there are "variable" fps files this needs to detect as well.
2246 static int tb_unreliable(AVCodecContext *c){
2247 if( c->time_base.den >= 101L*c->time_base.num
2248 || c->time_base.den < 5L*c->time_base.num
2249 /* || c->codec_tag == AV_RL32("DIVX")
2250 || c->codec_tag == AV_RL32("XVID")*/
2251 || c->codec_id == CODEC_ID_MPEG2VIDEO
2252 || c->codec_id == CODEC_ID_H264
2258 #if FF_API_FORMAT_PARAMETERS
2259 int av_find_stream_info(AVFormatContext *ic)
2261 return avformat_find_stream_info(ic, NULL);
2265 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2267 int i, count, ret, read_size, j;
2269 AVPacket pkt1, *pkt;
2270 int64_t old_offset = avio_tell(ic->pb);
2271 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2273 for(i=0;i<ic->nb_streams;i++) {
2275 st = ic->streams[i];
2277 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2278 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2279 /* if(!st->time_base.num)
2281 if(!st->codec->time_base.num)
2282 st->codec->time_base= st->time_base;
2284 //only for the split stuff
2285 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2286 st->parser = av_parser_init(st->codec->codec_id);
2287 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2288 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2291 assert(!st->codec->codec);
2292 codec = avcodec_find_decoder(st->codec->codec_id);
2294 /* Ensure that subtitle_header is properly set. */
2295 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2296 && codec && !st->codec->codec)
2297 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2299 //try to just open decoders, in case this is enough to get parameters
2300 if(!has_codec_parameters(st->codec)){
2301 if (codec && !st->codec->codec)
2302 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2306 for (i=0; i<ic->nb_streams; i++) {
2307 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2313 if(url_interrupt_cb()){
2315 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2319 /* check if one codec still needs to be handled */
2320 for(i=0;i<ic->nb_streams;i++) {
2321 int fps_analyze_framecount = 20;
2323 st = ic->streams[i];
2324 if (!has_codec_parameters(st->codec))
2326 /* if the timebase is coarse (like the usual millisecond precision
2327 of mkv), we need to analyze more frames to reliably arrive at
2329 if (av_q2d(st->time_base) > 0.0005)
2330 fps_analyze_framecount *= 2;
2331 if (ic->fps_probe_size >= 0)
2332 fps_analyze_framecount = ic->fps_probe_size;
2333 /* variable fps and no guess at the real fps */
2334 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2335 && st->info->duration_count < fps_analyze_framecount
2336 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2338 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2340 if(st->first_dts == AV_NOPTS_VALUE)
2343 if (i == ic->nb_streams) {
2344 /* NOTE: if the format has no header, then we need to read
2345 some packets to get most of the streams, so we cannot
2347 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2348 /* if we found the info for all the codecs, we can stop */
2350 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2354 /* we did not get all the codec info, but we read too much data */
2355 if (read_size >= ic->probesize) {
2357 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2361 /* NOTE: a new stream can be added there if no header in file
2362 (AVFMTCTX_NOHEADER) */
2363 ret = read_frame_internal(ic, &pkt1);
2364 if (ret == AVERROR(EAGAIN))
2369 ret = -1; /* we could not have all the codec parameters before EOF */
2370 for(i=0;i<ic->nb_streams;i++) {
2371 st = ic->streams[i];
2372 if (!has_codec_parameters(st->codec)){
2374 avcodec_string(buf, sizeof(buf), st->codec, 0);
2375 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2383 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2384 if ((ret = av_dup_packet(pkt)) < 0)
2385 goto find_stream_info_err;
2387 read_size += pkt->size;
2389 st = ic->streams[pkt->stream_index];
2390 if (st->codec_info_nb_frames>1) {
2391 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2392 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2395 st->info->codec_info_duration += pkt->duration;
2398 int64_t last = st->info->last_dts;
2400 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2401 int64_t duration= pkt->dts - last;
2402 double dur= duration * av_q2d(st->time_base);
2404 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2405 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2406 if (st->info->duration_count < 2)
2407 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2408 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2409 int framerate= get_std_framerate(i);
2410 int ticks= lrintf(dur*framerate/(1001*12));
2411 double error= dur - ticks*1001*12/(double)framerate;
2412 st->info->duration_error[i] += error*error;
2414 st->info->duration_count++;
2415 // ignore the first 4 values, they might have some random jitter
2416 if (st->info->duration_count > 3)
2417 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2419 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2420 st->info->last_dts = pkt->dts;
2422 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2423 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2425 st->codec->extradata_size= i;
2426 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2427 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2428 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2432 /* if still no information, we try to open the codec and to
2433 decompress the frame. We try to avoid that in most cases as
2434 it takes longer and uses more memory. For MPEG-4, we need to
2435 decompress for QuickTime.
2437 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2438 least one frame of codec data, this makes sure the codec initializes
2439 the channel configuration and does not only trust the values from the container.
2441 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2443 st->codec_info_nb_frames++;
2447 // close codecs which were opened in try_decode_frame()
2448 for(i=0;i<ic->nb_streams;i++) {
2449 st = ic->streams[i];
2450 if(st->codec->codec)
2451 avcodec_close(st->codec);
2453 for(i=0;i<ic->nb_streams;i++) {
2454 st = ic->streams[i];
2455 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2456 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2457 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2458 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2459 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2460 // the check for tb_unreliable() is not completely correct, since this is not about handling
2461 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2462 // ipmovie.c produces.
2463 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2464 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2465 if (st->info->duration_count && !st->r_frame_rate.num
2466 && tb_unreliable(st->codec) /*&&
2467 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2468 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2470 double best_error= 2*av_q2d(st->time_base);
2471 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2473 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2474 double error = st->info->duration_error[j] * get_std_framerate(j);
2475 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2476 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2477 if(error < best_error){
2479 num = get_std_framerate(j);
2482 // do not increase frame rate by more than 1 % in order to match a standard rate.
2483 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2484 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2487 if (!st->r_frame_rate.num){
2488 if( st->codec->time_base.den * (int64_t)st->time_base.num
2489 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2490 st->r_frame_rate.num = st->codec->time_base.den;
2491 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2493 st->r_frame_rate.num = st->time_base.den;
2494 st->r_frame_rate.den = st->time_base.num;
2497 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2498 if(!st->codec->bits_per_coded_sample)
2499 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2500 // set stream disposition based on audio service type
2501 switch (st->codec->audio_service_type) {
2502 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2503 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2504 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2505 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2506 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2507 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2508 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2509 st->disposition = AV_DISPOSITION_COMMENT; break;
2510 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2511 st->disposition = AV_DISPOSITION_KARAOKE; break;
2516 estimate_timings(ic, old_offset);
2518 compute_chapters_end(ic);
2521 /* correct DTS for B-frame streams with no timestamps */
2522 for(i=0;i<ic->nb_streams;i++) {
2523 st = ic->streams[i];
2524 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2526 ppktl = &ic->packet_buffer;
2528 if(ppkt1->stream_index != i)
2530 if(ppkt1->pkt->dts < 0)
2532 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2534 ppkt1->pkt->dts -= delta;
2539 st->cur_dts -= delta;
2545 find_stream_info_err:
2546 for (i=0; i < ic->nb_streams; i++)
2547 av_freep(&ic->streams[i]->info);
2551 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2555 for (i = 0; i < ic->nb_programs; i++)
2556 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2557 if (ic->programs[i]->stream_index[j] == s)
2558 return ic->programs[i];
2562 int av_find_best_stream(AVFormatContext *ic,
2563 enum AVMediaType type,
2564 int wanted_stream_nb,
2566 AVCodec **decoder_ret,
2569 int i, nb_streams = ic->nb_streams;
2570 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2571 unsigned *program = NULL;
2572 AVCodec *decoder = NULL, *best_decoder = NULL;
2574 if (related_stream >= 0 && wanted_stream_nb < 0) {
2575 AVProgram *p = find_program_from_stream(ic, related_stream);
2577 program = p->stream_index;
2578 nb_streams = p->nb_stream_indexes;
2581 for (i = 0; i < nb_streams; i++) {
2582 int real_stream_index = program ? program[i] : i;
2583 AVStream *st = ic->streams[real_stream_index];
2584 AVCodecContext *avctx = st->codec;
2585 if (avctx->codec_type != type)
2587 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2589 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2592 decoder = avcodec_find_decoder(st->codec->codec_id);
2595 ret = AVERROR_DECODER_NOT_FOUND;
2599 if (best_count >= st->codec_info_nb_frames)
2601 best_count = st->codec_info_nb_frames;
2602 ret = real_stream_index;
2603 best_decoder = decoder;
2604 if (program && i == nb_streams - 1 && ret < 0) {
2606 nb_streams = ic->nb_streams;
2607 i = 0; /* no related stream found, try again with everything */
2611 *decoder_ret = best_decoder;
2615 /*******************************************************/
2617 int av_read_play(AVFormatContext *s)
2619 if (s->iformat->read_play)
2620 return s->iformat->read_play(s);
2622 return avio_pause(s->pb, 0);
2623 return AVERROR(ENOSYS);
2626 int av_read_pause(AVFormatContext *s)
2628 if (s->iformat->read_pause)
2629 return s->iformat->read_pause(s);
2631 return avio_pause(s->pb, 1);
2632 return AVERROR(ENOSYS);
2635 void av_close_input_stream(AVFormatContext *s)
2637 flush_packet_queue(s);
2638 if (s->iformat->read_close)
2639 s->iformat->read_close(s);
2640 avformat_free_context(s);
2643 void avformat_free_context(AVFormatContext *s)
2649 if (s->iformat && s->iformat->priv_class && s->priv_data)
2650 av_opt_free(s->priv_data);
2652 for(i=0;i<s->nb_streams;i++) {
2653 /* free all data in a stream component */
2656 av_parser_close(st->parser);
2657 av_free_packet(&st->cur_pkt);
2659 av_dict_free(&st->metadata);
2660 av_free(st->index_entries);
2661 av_free(st->codec->extradata);
2662 av_free(st->codec->subtitle_header);
2664 av_free(st->priv_data);
2668 for(i=s->nb_programs-1; i>=0; i--) {
2669 av_dict_free(&s->programs[i]->metadata);
2670 av_freep(&s->programs[i]->stream_index);
2671 av_freep(&s->programs[i]);
2673 av_freep(&s->programs);
2674 av_freep(&s->priv_data);
2675 while(s->nb_chapters--) {
2676 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2677 av_free(s->chapters[s->nb_chapters]);
2679 av_freep(&s->chapters);
2680 av_dict_free(&s->metadata);
2681 av_freep(&s->streams);
2685 void av_close_input_file(AVFormatContext *s)
2687 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2689 av_close_input_stream(s);
2694 #if FF_API_NEW_STREAM
2695 AVStream *av_new_stream(AVFormatContext *s, int id)
2697 AVStream *st = avformat_new_stream(s, NULL);
2704 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2710 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2712 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2715 s->streams = streams;
2717 st = av_mallocz(sizeof(AVStream));
2720 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2725 st->codec = avcodec_alloc_context3(c);
2727 /* no default bitrate if decoding */
2728 st->codec->bit_rate = 0;
2730 st->index = s->nb_streams;
2731 st->start_time = AV_NOPTS_VALUE;
2732 st->duration = AV_NOPTS_VALUE;
2733 /* we set the current DTS to 0 so that formats without any timestamps
2734 but durations get some timestamps, formats with some unknown
2735 timestamps have their first few packets buffered and the
2736 timestamps corrected before they are returned to the user */
2738 st->first_dts = AV_NOPTS_VALUE;
2739 st->probe_packets = MAX_PROBE_PACKETS;
2741 /* default pts setting is MPEG-like */
2742 av_set_pts_info(st, 33, 1, 90000);
2743 st->last_IP_pts = AV_NOPTS_VALUE;
2744 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2745 st->pts_buffer[i]= AV_NOPTS_VALUE;
2746 st->reference_dts = AV_NOPTS_VALUE;
2748 st->sample_aspect_ratio = (AVRational){0,1};
2750 s->streams[s->nb_streams++] = st;
2754 AVProgram *av_new_program(AVFormatContext *ac, int id)
2756 AVProgram *program=NULL;
2759 av_dlog(ac, "new_program: id=0x%04x\n", id);
2761 for(i=0; i<ac->nb_programs; i++)
2762 if(ac->programs[i]->id == id)
2763 program = ac->programs[i];
2766 program = av_mallocz(sizeof(AVProgram));
2769 dynarray_add(&ac->programs, &ac->nb_programs, program);
2770 program->discard = AVDISCARD_NONE;
2777 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2779 AVChapter *chapter = NULL;
2782 for(i=0; i<s->nb_chapters; i++)
2783 if(s->chapters[i]->id == id)
2784 chapter = s->chapters[i];
2787 chapter= av_mallocz(sizeof(AVChapter));
2790 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2792 av_dict_set(&chapter->metadata, "title", title, 0);
2794 chapter->time_base= time_base;
2795 chapter->start = start;
2801 /************************************************************/
2802 /* output media file */
2804 #if FF_API_FORMAT_PARAMETERS
2805 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2809 if (s->oformat->priv_data_size > 0) {
2810 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2812 return AVERROR(ENOMEM);
2813 if (s->oformat->priv_class) {
2814 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2815 av_opt_set_defaults(s->priv_data);
2818 s->priv_data = NULL;
2820 if (s->oformat->set_parameters) {
2821 ret = s->oformat->set_parameters(s, ap);
2829 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2831 const AVCodecTag *avctag;
2833 enum CodecID id = CODEC_ID_NONE;
2834 unsigned int tag = 0;
2837 * Check that tag + id is in the table
2838 * If neither is in the table -> OK
2839 * If tag is in the table with another id -> FAIL
2840 * If id is in the table with another tag -> FAIL unless strict < normal
2842 for (n = 0; s->oformat->codec_tag[n]; n++) {
2843 avctag = s->oformat->codec_tag[n];
2844 while (avctag->id != CODEC_ID_NONE) {
2845 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2847 if (id == st->codec->codec_id)
2850 if (avctag->id == st->codec->codec_id)
2855 if (id != CODEC_ID_NONE)
2857 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2862 #if FF_API_FORMAT_PARAMETERS
2863 int av_write_header(AVFormatContext *s)
2865 return avformat_write_header(s, NULL);
2869 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2873 AVDictionary *tmp = NULL;
2876 av_dict_copy(&tmp, *options, 0);
2877 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2880 // some sanity checks
2881 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2882 av_log(s, AV_LOG_ERROR, "no streams\n");
2883 ret = AVERROR(EINVAL);
2887 for(i=0;i<s->nb_streams;i++) {
2890 switch (st->codec->codec_type) {
2891 case AVMEDIA_TYPE_AUDIO:
2892 if(st->codec->sample_rate<=0){
2893 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2894 ret = AVERROR(EINVAL);
2897 if(!st->codec->block_align)
2898 st->codec->block_align = st->codec->channels *
2899 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2901 case AVMEDIA_TYPE_VIDEO:
2902 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2903 av_log(s, AV_LOG_ERROR, "time base not set\n");
2904 ret = AVERROR(EINVAL);
2907 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2908 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2909 ret = AVERROR(EINVAL);
2912 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2913 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2914 ret = AVERROR(EINVAL);
2920 if(s->oformat->codec_tag){
2921 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2922 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2923 st->codec->codec_tag= 0;
2925 if(st->codec->codec_tag){
2926 if (!validate_codec_tag(s, st)) {
2928 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2929 av_log(s, AV_LOG_ERROR,
2930 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2931 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2932 ret = AVERROR_INVALIDDATA;
2936 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2939 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2940 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2941 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2944 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2945 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2946 if (!s->priv_data) {
2947 ret = AVERROR(ENOMEM);
2950 if (s->oformat->priv_class) {
2951 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2952 av_opt_set_defaults(s->priv_data);
2953 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2958 /* set muxer identification string */
2959 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2960 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2963 if(s->oformat->write_header){
2964 ret = s->oformat->write_header(s);
2969 /* init PTS generation */
2970 for(i=0;i<s->nb_streams;i++) {
2971 int64_t den = AV_NOPTS_VALUE;
2974 switch (st->codec->codec_type) {
2975 case AVMEDIA_TYPE_AUDIO:
2976 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2978 case AVMEDIA_TYPE_VIDEO:
2979 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2984 if (den != AV_NOPTS_VALUE) {
2986 ret = AVERROR_INVALIDDATA;
2989 frac_init(&st->pts, 0, 0, den);
2994 av_dict_free(options);
3003 //FIXME merge with compute_pkt_fields
3004 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3005 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3006 int num, den, frame_size, i;
3008 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3009 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3011 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
3012 return AVERROR(EINVAL);*/
3014 /* duration field */
3015 if (pkt->duration == 0) {
3016 compute_frame_duration(&num, &den, st, NULL, pkt);
3018 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3022 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3025 //XXX/FIXME this is a temporary hack until all encoders output pts
3026 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3028 // pkt->pts= st->cur_dts;
3029 pkt->pts= st->pts.val;
3032 //calculate dts from pts
3033 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3034 st->pts_buffer[0]= pkt->pts;
3035 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3036 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3037 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3038 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3040 pkt->dts= st->pts_buffer[0];
3043 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
3044 av_log(s, AV_LOG_ERROR,
3045 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3046 st->index, st->cur_dts, pkt->dts);
3047 return AVERROR(EINVAL);
3049 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3050 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3051 return AVERROR(EINVAL);
3054 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3055 st->cur_dts= pkt->dts;
3056 st->pts.val= pkt->dts;
3059 switch (st->codec->codec_type) {
3060 case AVMEDIA_TYPE_AUDIO:
3061 frame_size = get_audio_frame_size(st->codec, pkt->size);
3063 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3064 likely equal to the encoder delay, but it would be better if we
3065 had the real timestamps from the encoder */
3066 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3067 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3070 case AVMEDIA_TYPE_VIDEO:
3071 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3079 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3081 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3083 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3086 ret= s->oformat->write_packet(s, pkt);
3089 s->streams[pkt->stream_index]->nb_frames++;
3093 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3094 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3096 AVPacketList **next_point, *this_pktl;
3098 this_pktl = av_mallocz(sizeof(AVPacketList));
3099 this_pktl->pkt= *pkt;
3100 pkt->destruct= NULL; // do not free original but only the copy
3101 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3103 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3104 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3106 next_point = &s->packet_buffer;
3109 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3110 while(!compare(s, &(*next_point)->pkt, pkt)){
3111 next_point= &(*next_point)->next;
3115 next_point = &(s->packet_buffer_end->next);
3118 assert(!*next_point);
3120 s->packet_buffer_end= this_pktl;
3123 this_pktl->next= *next_point;
3125 s->streams[pkt->stream_index]->last_in_packet_buffer=
3126 *next_point= this_pktl;
3129 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3131 AVStream *st = s->streams[ pkt ->stream_index];
3132 AVStream *st2= s->streams[ next->stream_index];
3133 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3137 return pkt->stream_index < next->stream_index;
3141 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3147 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3150 for(i=0; i < s->nb_streams; i++)
3151 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3153 if(stream_count && (s->nb_streams == stream_count || flush)){
3154 pktl= s->packet_buffer;
3157 s->packet_buffer= pktl->next;
3158 if(!s->packet_buffer)
3159 s->packet_buffer_end= NULL;
3161 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3162 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3166 av_init_packet(out);
3172 * Interleave an AVPacket correctly so it can be muxed.
3173 * @param out the interleaved packet will be output here
3174 * @param in the input packet
3175 * @param flush 1 if no further packets are available as input and all
3176 * remaining packets should be output
3177 * @return 1 if a packet was output, 0 if no packet could be output,
3178 * < 0 if an error occurred
3180 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3181 if(s->oformat->interleave_packet)
3182 return s->oformat->interleave_packet(s, out, in, flush);
3184 return av_interleave_packet_per_dts(s, out, in, flush);
3187 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3188 AVStream *st= s->streams[ pkt->stream_index];
3191 //FIXME/XXX/HACK drop zero sized packets
3192 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3195 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3196 pkt->size, pkt->dts, pkt->pts);
3197 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3200 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3201 return AVERROR(EINVAL);
3205 int ret= interleave_packet(s, &opkt, pkt, 0);
3206 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3209 ret= s->oformat->write_packet(s, &opkt);
3211 s->streams[opkt.stream_index]->nb_frames++;
3213 av_free_packet(&opkt);
3221 int av_write_trailer(AVFormatContext *s)
3227 ret= interleave_packet(s, &pkt, NULL, 1);
3228 if(ret<0) //FIXME cleanup needed for ret<0 ?
3233 ret= s->oformat->write_packet(s, &pkt);
3235 s->streams[pkt.stream_index]->nb_frames++;
3237 av_free_packet(&pkt);
3243 if(s->oformat->write_trailer)
3244 ret = s->oformat->write_trailer(s);
3246 for(i=0;i<s->nb_streams;i++) {
3247 av_freep(&s->streams[i]->priv_data);
3248 av_freep(&s->streams[i]->index_entries);
3250 if (s->iformat && s->iformat->priv_class)
3251 av_opt_free(s->priv_data);
3252 av_freep(&s->priv_data);
3256 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3259 AVProgram *program=NULL;
3262 if (idx >= ac->nb_streams) {
3263 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3267 for(i=0; i<ac->nb_programs; i++){
3268 if(ac->programs[i]->id != progid)
3270 program = ac->programs[i];
3271 for(j=0; j<program->nb_stream_indexes; j++)
3272 if(program->stream_index[j] == idx)
3275 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3278 program->stream_index = tmp;
3279 program->stream_index[program->nb_stream_indexes++] = idx;
3284 static void print_fps(double d, const char *postfix){
3285 uint64_t v= lrintf(d*100);
3286 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3287 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3288 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3291 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3293 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3294 AVDictionaryEntry *tag=NULL;
3296 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3297 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3298 if(strcmp("language", tag->key))
3299 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3304 /* "user interface" functions */
3305 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3308 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3309 AVStream *st = ic->streams[i];
3310 int g = av_gcd(st->time_base.num, st->time_base.den);
3311 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3312 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3313 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3314 /* the pid is an important information, so we display it */
3315 /* XXX: add a generic system */
3316 if (flags & AVFMT_SHOW_IDS)
3317 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3319 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3320 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3321 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3322 if (st->sample_aspect_ratio.num && // default
3323 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3324 AVRational display_aspect_ratio;
3325 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3326 st->codec->width*st->sample_aspect_ratio.num,
3327 st->codec->height*st->sample_aspect_ratio.den,
3329 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3330 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3331 display_aspect_ratio.num, display_aspect_ratio.den);
3333 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3334 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3335 print_fps(av_q2d(st->avg_frame_rate), "fps");
3336 if(st->r_frame_rate.den && st->r_frame_rate.num)
3337 print_fps(av_q2d(st->r_frame_rate), "tbr");
3338 if(st->time_base.den && st->time_base.num)
3339 print_fps(1/av_q2d(st->time_base), "tbn");
3340 if(st->codec->time_base.den && st->codec->time_base.num)
3341 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3343 if (st->disposition & AV_DISPOSITION_DEFAULT)
3344 av_log(NULL, AV_LOG_INFO, " (default)");
3345 if (st->disposition & AV_DISPOSITION_DUB)
3346 av_log(NULL, AV_LOG_INFO, " (dub)");
3347 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3348 av_log(NULL, AV_LOG_INFO, " (original)");
3349 if (st->disposition & AV_DISPOSITION_COMMENT)
3350 av_log(NULL, AV_LOG_INFO, " (comment)");
3351 if (st->disposition & AV_DISPOSITION_LYRICS)
3352 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3353 if (st->disposition & AV_DISPOSITION_KARAOKE)
3354 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3355 if (st->disposition & AV_DISPOSITION_FORCED)
3356 av_log(NULL, AV_LOG_INFO, " (forced)");
3357 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3358 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3359 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3360 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3361 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3362 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3363 av_log(NULL, AV_LOG_INFO, "\n");
3364 dump_metadata(NULL, st->metadata, " ");
3367 #if FF_API_DUMP_FORMAT
3368 void dump_format(AVFormatContext *ic,
3373 av_dump_format(ic, index, url, is_output);
3377 void av_dump_format(AVFormatContext *ic,
3383 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3384 if (ic->nb_streams && !printed)
3387 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3388 is_output ? "Output" : "Input",
3390 is_output ? ic->oformat->name : ic->iformat->name,
3391 is_output ? "to" : "from", url);
3392 dump_metadata(NULL, ic->metadata, " ");
3394 av_log(NULL, AV_LOG_INFO, " Duration: ");
3395 if (ic->duration != AV_NOPTS_VALUE) {
3396 int hours, mins, secs, us;
3397 secs = ic->duration / AV_TIME_BASE;
3398 us = ic->duration % AV_TIME_BASE;
3403 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3404 (100 * us) / AV_TIME_BASE);
3406 av_log(NULL, AV_LOG_INFO, "N/A");
3408 if (ic->start_time != AV_NOPTS_VALUE) {
3410 av_log(NULL, AV_LOG_INFO, ", start: ");
3411 secs = ic->start_time / AV_TIME_BASE;
3412 us = abs(ic->start_time % AV_TIME_BASE);
3413 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3414 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3416 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3418 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3420 av_log(NULL, AV_LOG_INFO, "N/A");
3422 av_log(NULL, AV_LOG_INFO, "\n");
3424 for (i = 0; i < ic->nb_chapters; i++) {
3425 AVChapter *ch = ic->chapters[i];
3426 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3427 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3428 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3430 dump_metadata(NULL, ch->metadata, " ");
3432 if(ic->nb_programs) {
3433 int j, k, total = 0;
3434 for(j=0; j<ic->nb_programs; j++) {
3435 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3437 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3438 name ? name->value : "");
3439 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3440 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3441 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3442 printed[ic->programs[j]->stream_index[k]] = 1;
3444 total += ic->programs[j]->nb_stream_indexes;
3446 if (total < ic->nb_streams)
3447 av_log(NULL, AV_LOG_INFO, " No Program\n");
3449 for(i=0;i<ic->nb_streams;i++)
3451 dump_stream_format(ic, i, index, is_output);
3456 int64_t av_gettime(void)
3459 gettimeofday(&tv,NULL);
3460 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3463 uint64_t ff_ntp_time(void)
3465 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3468 #if FF_API_PARSE_DATE
3469 #include "libavutil/parseutils.h"
3471 int64_t parse_date(const char *timestr, int duration)
3474 av_parse_time(&timeval, timestr, duration);
3479 #if FF_API_FIND_INFO_TAG
3480 #include "libavutil/parseutils.h"
3482 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3484 return av_find_info_tag(arg, arg_size, tag1, info);
3488 int av_get_frame_filename(char *buf, int buf_size,
3489 const char *path, int number)
3492 char *q, buf1[20], c;
3493 int nd, len, percentd_found;
3505 while (isdigit(*p)) {
3506 nd = nd * 10 + *p++ - '0';
3509 } while (isdigit(c));
3518 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3520 if ((q - buf + len) > buf_size - 1)
3522 memcpy(q, buf1, len);
3530 if ((q - buf) < buf_size - 1)
3534 if (!percentd_found)
3543 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3547 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3549 for(i=0;i<size;i+=16) {
3556 PRINT(" %02x", buf[i+j]);
3561 for(j=0;j<len;j++) {
3563 if (c < ' ' || c > '~')
3572 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3574 hex_dump_internal(NULL, f, 0, buf, size);
3577 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3579 hex_dump_internal(avcl, NULL, level, buf, size);
3582 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3585 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3586 PRINT("stream #%d:\n", pkt->stream_index);
3587 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3588 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3589 /* DTS is _always_ valid after av_read_frame() */
3591 if (pkt->dts == AV_NOPTS_VALUE)
3594 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3595 /* PTS may not be known if B-frames are present. */
3597 if (pkt->pts == AV_NOPTS_VALUE)
3600 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3602 PRINT(" size=%d\n", pkt->size);
3605 av_hex_dump(f, pkt->data, pkt->size);
3609 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3611 AVRational tb = { 1, AV_TIME_BASE };
3612 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3616 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3618 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3622 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3624 AVRational tb = { 1, AV_TIME_BASE };
3625 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3629 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3632 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3635 void av_url_split(char *proto, int proto_size,
3636 char *authorization, int authorization_size,
3637 char *hostname, int hostname_size,
3639 char *path, int path_size,
3642 const char *p, *ls, *at, *col, *brk;
3644 if (port_ptr) *port_ptr = -1;
3645 if (proto_size > 0) proto[0] = 0;
3646 if (authorization_size > 0) authorization[0] = 0;
3647 if (hostname_size > 0) hostname[0] = 0;
3648 if (path_size > 0) path[0] = 0;
3650 /* parse protocol */
3651 if ((p = strchr(url, ':'))) {
3652 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3657 /* no protocol means plain filename */
3658 av_strlcpy(path, url, path_size);
3662 /* separate path from hostname */
3663 ls = strchr(p, '/');
3665 ls = strchr(p, '?');
3667 av_strlcpy(path, ls, path_size);
3669 ls = &p[strlen(p)]; // XXX
3671 /* the rest is hostname, use that to parse auth/port */
3673 /* authorization (user[:pass]@hostname) */
3674 if ((at = strchr(p, '@')) && at < ls) {
3675 av_strlcpy(authorization, p,
3676 FFMIN(authorization_size, at + 1 - p));
3677 p = at + 1; /* skip '@' */
3680 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3682 av_strlcpy(hostname, p + 1,
3683 FFMIN(hostname_size, brk - p));
3684 if (brk[1] == ':' && port_ptr)
3685 *port_ptr = atoi(brk + 2);
3686 } else if ((col = strchr(p, ':')) && col < ls) {
3687 av_strlcpy(hostname, p,
3688 FFMIN(col + 1 - p, hostname_size));
3689 if (port_ptr) *port_ptr = atoi(col + 1);
3691 av_strlcpy(hostname, p,
3692 FFMIN(ls + 1 - p, hostname_size));
3696 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3699 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3702 'C', 'D', 'E', 'F' };
3703 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3706 'c', 'd', 'e', 'f' };
3707 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3709 for(i = 0; i < s; i++) {
3710 buff[i * 2] = hex_table[src[i] >> 4];
3711 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3717 int ff_hex_to_data(uint8_t *data, const char *p)
3724 p += strspn(p, SPACE_CHARS);
3727 c = toupper((unsigned char) *p++);
3728 if (c >= '0' && c <= '9')
3730 else if (c >= 'A' && c <= 'F')
3745 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3746 unsigned int pts_num, unsigned int pts_den)
3749 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3750 if(new_tb.num != pts_num)
3751 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3753 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3755 if(new_tb.num <= 0 || new_tb.den <= 0) {
3756 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
3759 s->time_base = new_tb;
3760 s->pts_wrap_bits = pts_wrap_bits;
3763 int ff_url_join(char *str, int size, const char *proto,
3764 const char *authorization, const char *hostname,
3765 int port, const char *fmt, ...)
3768 struct addrinfo hints, *ai;
3773 av_strlcatf(str, size, "%s://", proto);
3774 if (authorization && authorization[0])
3775 av_strlcatf(str, size, "%s@", authorization);
3776 #if CONFIG_NETWORK && defined(AF_INET6)
3777 /* Determine if hostname is a numerical IPv6 address,
3778 * properly escape it within [] in that case. */
3779 memset(&hints, 0, sizeof(hints));
3780 hints.ai_flags = AI_NUMERICHOST;
3781 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
3782 if (ai->ai_family == AF_INET6) {
3783 av_strlcat(str, "[", size);
3784 av_strlcat(str, hostname, size);
3785 av_strlcat(str, "]", size);
3787 av_strlcat(str, hostname, size);
3792 /* Not an IPv6 address, just output the plain string. */
3793 av_strlcat(str, hostname, size);
3796 av_strlcatf(str, size, ":%d", port);
3799 int len = strlen(str);
3802 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
3808 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
3809 AVFormatContext *src)
3814 local_pkt.stream_index = dst_stream;
3815 if (pkt->pts != AV_NOPTS_VALUE)
3816 local_pkt.pts = av_rescale_q(pkt->pts,
3817 src->streams[pkt->stream_index]->time_base,
3818 dst->streams[dst_stream]->time_base);
3819 if (pkt->dts != AV_NOPTS_VALUE)
3820 local_pkt.dts = av_rescale_q(pkt->dts,
3821 src->streams[pkt->stream_index]->time_base,
3822 dst->streams[dst_stream]->time_base);
3823 return av_write_frame(dst, &local_pkt);
3826 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3829 const char *ptr = str;
3831 /* Parse key=value pairs. */
3834 char *dest = NULL, *dest_end;
3835 int key_len, dest_len = 0;
3837 /* Skip whitespace and potential commas. */
3838 while (*ptr && (isspace(*ptr) || *ptr == ','))
3845 if (!(ptr = strchr(key, '=')))
3848 key_len = ptr - key;
3850 callback_get_buf(context, key, key_len, &dest, &dest_len);
3851 dest_end = dest + dest_len - 1;
3855 while (*ptr && *ptr != '\"') {
3859 if (dest && dest < dest_end)
3863 if (dest && dest < dest_end)
3871 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
3872 if (dest && dest < dest_end)
3880 int ff_find_stream_index(AVFormatContext *s, int id)
3883 for (i = 0; i < s->nb_streams; i++) {
3884 if (s->streams[i]->id == id)
3890 void ff_make_absolute_url(char *buf, int size, const char *base,
3894 /* Absolute path, relative to the current server */
3895 if (base && strstr(base, "://") && rel[0] == '/') {
3897 av_strlcpy(buf, base, size);
3898 sep = strstr(buf, "://");
3901 sep = strchr(sep, '/');
3905 av_strlcat(buf, rel, size);
3908 /* If rel actually is an absolute url, just copy it */
3909 if (!base || strstr(rel, "://") || rel[0] == '/') {
3910 av_strlcpy(buf, rel, size);
3914 av_strlcpy(buf, base, size);
3915 /* Remove the file name from the base url */
3916 sep = strrchr(buf, '/');
3921 while (av_strstart(rel, "../", NULL) && sep) {
3922 /* Remove the path delimiter at the end */
3924 sep = strrchr(buf, '/');
3925 /* If the next directory name to pop off is "..", break here */
3926 if (!strcmp(sep ? &sep[1] : buf, "..")) {
3927 /* Readd the slash we just removed */
3928 av_strlcat(buf, "/", size);
3931 /* Cut off the directory name */
3938 av_strlcat(buf, rel, size);
3941 int64_t ff_iso8601_to_unix_time(const char *datestr)
3944 struct tm time = {0};
3945 strptime(datestr, "%Y - %m - %dT%T", &time);
3946 return mktime(&time);
3948 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
3949 "the date string.\n");
3954 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
3957 if (ofmt->query_codec)
3958 return ofmt->query_codec(codec_id, std_compliance);
3959 else if (ofmt->codec_tag)
3960 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3961 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
3962 codec_id == ofmt->subtitle_codec)
3965 return AVERROR_PATCHWELCOME;
3968 int avformat_network_init(void)
3972 ff_network_inited_globally = 1;
3973 if ((ret = ff_network_init()) < 0)
3980 int avformat_network_deinit(void)