2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/bytestream.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/pixdesc.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
40 #include "audiointerleave.h"
52 * various utility functions for use within Libav
55 unsigned avformat_version(void)
57 return LIBAVFORMAT_VERSION_INT;
60 const char *avformat_configuration(void)
62 return LIBAV_CONFIGURATION;
65 const char *avformat_license(void)
67 #define LICENSE_PREFIX "libavformat license: "
68 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
71 /* fraction handling */
74 * f = val + (num / den) + 0.5.
76 * 'num' is normalized so that it is such as 0 <= num < den.
78 * @param f fractional number
79 * @param val integer value
80 * @param num must be >= 0
81 * @param den must be >= 1
83 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
96 * Fractional addition to f: f = f + (incr / f->den).
98 * @param f fractional number
99 * @param incr increment, can be positive or negative
101 static void frac_add(AVFrac *f, int64_t incr)
114 } else if (num >= den) {
121 /** head of registered input format linked list */
122 static AVInputFormat *first_iformat = NULL;
123 /** head of registered output format linked list */
124 static AVOutputFormat *first_oformat = NULL;
126 AVInputFormat *av_iformat_next(AVInputFormat *f)
128 if(f) return f->next;
129 else return first_iformat;
132 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
134 if(f) return f->next;
135 else return first_oformat;
138 void av_register_input_format(AVInputFormat *format)
142 while (*p != NULL) p = &(*p)->next;
147 void av_register_output_format(AVOutputFormat *format)
151 while (*p != NULL) p = &(*p)->next;
156 int av_match_ext(const char *filename, const char *extensions)
164 ext = strrchr(filename, '.');
170 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
173 if (!av_strcasecmp(ext1, ext))
183 static int match_format(const char *name, const char *names)
191 namelen = strlen(name);
192 while ((p = strchr(names, ','))) {
193 len = FFMAX(p - names, namelen);
194 if (!av_strncasecmp(name, names, len))
198 return !av_strcasecmp(name, names);
201 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
202 const char *mime_type)
204 AVOutputFormat *fmt = NULL, *fmt_found;
205 int score_max, score;
207 /* specific test for image sequences */
208 #if CONFIG_IMAGE2_MUXER
209 if (!short_name && filename &&
210 av_filename_number_test(filename) &&
211 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
212 return av_guess_format("image2", NULL, NULL);
215 /* Find the proper file type. */
218 while ((fmt = av_oformat_next(fmt))) {
220 if (fmt->name && short_name && !av_strcasecmp(fmt->name, short_name))
222 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
224 if (filename && fmt->extensions &&
225 av_match_ext(filename, fmt->extensions)) {
228 if (score > score_max) {
236 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
237 const char *filename, const char *mime_type, enum AVMediaType type){
238 if(type == AVMEDIA_TYPE_VIDEO){
239 enum CodecID codec_id= CODEC_ID_NONE;
241 #if CONFIG_IMAGE2_MUXER
242 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
243 codec_id= ff_guess_image2_codec(filename);
246 if(codec_id == CODEC_ID_NONE)
247 codec_id= fmt->video_codec;
249 }else if(type == AVMEDIA_TYPE_AUDIO)
250 return fmt->audio_codec;
251 else if (type == AVMEDIA_TYPE_SUBTITLE)
252 return fmt->subtitle_codec;
254 return CODEC_ID_NONE;
257 AVInputFormat *av_find_input_format(const char *short_name)
259 AVInputFormat *fmt = NULL;
260 while ((fmt = av_iformat_next(fmt))) {
261 if (match_format(short_name, fmt->name))
268 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
270 int ret= av_new_packet(pkt, size);
275 pkt->pos= avio_tell(s);
277 ret= avio_read(s, pkt->data, size);
281 av_shrink_packet(pkt, ret);
286 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
291 return av_get_packet(s, pkt, size);
292 old_size = pkt->size;
293 ret = av_grow_packet(pkt, size);
296 ret = avio_read(s, pkt->data + old_size, size);
297 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
302 int av_filename_number_test(const char *filename)
305 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
308 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
310 AVProbeData lpd = *pd;
311 AVInputFormat *fmt1 = NULL, *fmt;
314 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
315 int id3len = ff_id3v2_tag_len(lpd.buf);
316 if (lpd.buf_size > id3len + 16) {
318 lpd.buf_size -= id3len;
324 while ((fmt1 = av_iformat_next(fmt1))) {
325 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
328 if (fmt1->read_probe) {
329 score = fmt1->read_probe(&lpd);
330 } else if (fmt1->extensions) {
331 if (av_match_ext(lpd.filename, fmt1->extensions)) {
335 if (score > *score_max) {
338 }else if (score == *score_max)
342 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
343 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
344 while ((fmt = av_iformat_next(fmt)))
345 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
346 *score_max = AVPROBE_SCORE_MAX/4;
351 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
352 while ((fmt = av_iformat_next(fmt)))
353 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
354 *score_max = AVPROBE_SCORE_MAX/4-1;
362 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
364 return av_probe_input_format2(pd, is_opened, &score);
367 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
369 static const struct {
370 const char *name; enum CodecID id; enum AVMediaType type;
372 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
373 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
374 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
375 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
376 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
377 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
378 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
379 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
382 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
386 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
387 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
388 for (i = 0; fmt_id_type[i].name; i++) {
389 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
390 st->codec->codec_id = fmt_id_type[i].id;
391 st->codec->codec_type = fmt_id_type[i].type;
399 /************************************************************/
400 /* input media file */
402 /** size of probe buffer, for guessing file type from file contents */
403 #define PROBE_BUF_MIN 2048
404 #define PROBE_BUF_MAX (1<<20)
406 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
407 const char *filename, void *logctx,
408 unsigned int offset, unsigned int max_probe_size)
410 AVProbeData pd = { filename ? filename : "", NULL, -offset };
411 unsigned char *buf = NULL;
412 int ret = 0, probe_size;
414 if (!max_probe_size) {
415 max_probe_size = PROBE_BUF_MAX;
416 } else if (max_probe_size > PROBE_BUF_MAX) {
417 max_probe_size = PROBE_BUF_MAX;
418 } else if (max_probe_size < PROBE_BUF_MIN) {
419 return AVERROR(EINVAL);
422 if (offset >= max_probe_size) {
423 return AVERROR(EINVAL);
426 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
427 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
428 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
429 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
431 if (probe_size < offset) {
435 /* read probe data */
436 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
437 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
438 /* fail if error was not end of file, otherwise, lower score */
439 if (ret != AVERROR_EOF) {
444 ret = 0; /* error was end of file, nothing read */
447 pd.buf = &buf[offset];
449 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
451 /* guess file format */
452 *fmt = av_probe_input_format2(&pd, 1, &score);
454 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
455 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
457 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
463 return AVERROR_INVALIDDATA;
466 /* rewind. reuse probe buffer to avoid seeking */
467 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
473 /* open input file and probe the format if necessary */
474 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
477 AVProbeData pd = {filename, NULL, 0};
480 s->flags |= AVFMT_FLAG_CUSTOM_IO;
482 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
483 else if (s->iformat->flags & AVFMT_NOFILE)
484 return AVERROR(EINVAL);
488 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
489 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
492 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
493 &s->interrupt_callback, options)) < 0)
497 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
500 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
501 AVPacketList **plast_pktl){
502 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
507 (*plast_pktl)->next = pktl;
509 *packet_buffer = pktl;
511 /* add the packet in the buffered packet list */
517 static void queue_attached_pictures(AVFormatContext *s)
520 for (i = 0; i < s->nb_streams; i++)
521 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
522 s->streams[i]->discard < AVDISCARD_ALL) {
523 AVPacket copy = s->streams[i]->attached_pic;
524 copy.destruct = NULL;
525 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
529 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
531 AVFormatContext *s = *ps;
533 AVDictionary *tmp = NULL;
534 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
536 if (!s && !(s = avformat_alloc_context()))
537 return AVERROR(ENOMEM);
542 av_dict_copy(&tmp, *options, 0);
544 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
547 if ((ret = init_input(s, filename, &tmp)) < 0)
550 /* check filename in case an image number is expected */
551 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
552 if (!av_filename_number_test(filename)) {
553 ret = AVERROR(EINVAL);
558 s->duration = s->start_time = AV_NOPTS_VALUE;
559 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
561 /* allocate private data */
562 if (s->iformat->priv_data_size > 0) {
563 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
564 ret = AVERROR(ENOMEM);
567 if (s->iformat->priv_class) {
568 *(const AVClass**)s->priv_data = s->iformat->priv_class;
569 av_opt_set_defaults(s->priv_data);
570 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
575 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
577 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
579 if (s->iformat->read_header)
580 if ((ret = s->iformat->read_header(s)) < 0)
583 if (id3v2_extra_meta &&
584 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
586 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
588 queue_attached_pictures(s);
590 if (s->pb && !s->data_offset)
591 s->data_offset = avio_tell(s->pb);
593 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
596 av_dict_free(options);
603 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
605 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
607 avformat_free_context(s);
612 /*******************************************************/
614 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
616 if(st->codec->codec_id == CODEC_ID_PROBE){
617 AVProbeData *pd = &st->probe_data;
618 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
622 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
623 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
624 pd->buf_size += pkt->size;
625 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
627 st->probe_packets = 0;
630 if (!st->probe_packets ||
631 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
632 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
633 if(st->codec->codec_id != CODEC_ID_PROBE){
636 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
642 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
648 AVPacketList *pktl = s->raw_packet_buffer;
652 st = s->streams[pkt->stream_index];
653 if (st->codec->codec_id != CODEC_ID_PROBE || !st->probe_packets ||
654 s->raw_packet_buffer_remaining_size < pkt->size) {
656 if (st->probe_packets) {
657 probe_codec(s, st, NULL);
659 pd = &st->probe_data;
662 s->raw_packet_buffer = pktl->next;
663 s->raw_packet_buffer_remaining_size += pkt->size;
670 ret= s->iformat->read_packet(s, pkt);
672 if (!pktl || ret == AVERROR(EAGAIN))
674 for (i = 0; i < s->nb_streams; i++) {
676 if (st->probe_packets) {
677 probe_codec(s, st, NULL);
683 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
684 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
685 av_log(s, AV_LOG_WARNING,
686 "Dropped corrupted packet (stream = %d)\n",
692 st= s->streams[pkt->stream_index];
694 switch(st->codec->codec_type){
695 case AVMEDIA_TYPE_VIDEO:
696 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
698 case AVMEDIA_TYPE_AUDIO:
699 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
701 case AVMEDIA_TYPE_SUBTITLE:
702 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
706 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
710 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
711 s->raw_packet_buffer_remaining_size -= pkt->size;
713 probe_codec(s, st, pkt);
717 #if FF_API_READ_PACKET
718 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
720 return ff_read_packet(s, pkt);
725 /**********************************************************/
728 * Get the number of samples of an audio frame. Return -1 on error.
730 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
734 /* give frame_size priority if demuxing */
735 if (!mux && enc->frame_size > 1)
736 return enc->frame_size;
738 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
741 /* fallback to using frame_size if muxing */
742 if (enc->frame_size > 1)
743 return enc->frame_size;
750 * Return the frame duration in seconds. Return 0 if not available.
752 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
753 AVCodecParserContext *pc, AVPacket *pkt)
759 switch(st->codec->codec_type) {
760 case AVMEDIA_TYPE_VIDEO:
761 if (st->r_frame_rate.num) {
762 *pnum = st->r_frame_rate.den;
763 *pden = st->r_frame_rate.num;
764 } else if(st->time_base.num*1000LL > st->time_base.den) {
765 *pnum = st->time_base.num;
766 *pden = st->time_base.den;
767 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
768 *pnum = st->codec->time_base.num;
769 *pden = st->codec->time_base.den;
770 if (pc && pc->repeat_pict) {
771 *pnum = (*pnum) * (1 + pc->repeat_pict);
773 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
774 //Thus if we have no parser in such case leave duration undefined.
775 if(st->codec->ticks_per_frame>1 && !pc){
780 case AVMEDIA_TYPE_AUDIO:
781 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
782 if (frame_size <= 0 || st->codec->sample_rate <= 0)
785 *pden = st->codec->sample_rate;
792 static int is_intra_only(AVCodecContext *enc){
793 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
795 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
796 switch(enc->codec_id){
798 case CODEC_ID_MJPEGB:
800 case CODEC_ID_PRORES:
801 case CODEC_ID_RAWVIDEO:
802 case CODEC_ID_DVVIDEO:
803 case CODEC_ID_HUFFYUV:
804 case CODEC_ID_FFVHUFF:
809 case CODEC_ID_JPEG2000:
818 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
819 int64_t dts, int64_t pts)
821 AVStream *st= s->streams[stream_index];
822 AVPacketList *pktl= s->packet_buffer;
824 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
827 st->first_dts= dts - st->cur_dts;
830 for(; pktl; pktl= pktl->next){
831 if(pktl->pkt.stream_index != stream_index)
833 //FIXME think more about this check
834 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
835 pktl->pkt.pts += st->first_dts;
837 if(pktl->pkt.dts != AV_NOPTS_VALUE)
838 pktl->pkt.dts += st->first_dts;
840 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
841 st->start_time= pktl->pkt.pts;
843 if (st->start_time == AV_NOPTS_VALUE)
844 st->start_time = pts;
847 static void update_initial_durations(AVFormatContext *s, AVStream *st,
848 int stream_index, int duration)
850 AVPacketList *pktl= s->packet_buffer;
853 if(st->first_dts != AV_NOPTS_VALUE){
854 cur_dts= st->first_dts;
855 for(; pktl; pktl= pktl->next){
856 if(pktl->pkt.stream_index == stream_index){
857 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
862 pktl= s->packet_buffer;
863 st->first_dts = cur_dts;
864 }else if(st->cur_dts)
867 for(; pktl; pktl= pktl->next){
868 if(pktl->pkt.stream_index != stream_index)
870 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
871 && !pktl->pkt.duration){
872 pktl->pkt.dts= cur_dts;
873 if(!st->codec->has_b_frames)
874 pktl->pkt.pts= cur_dts;
876 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
877 pktl->pkt.duration = duration;
881 if(st->first_dts == AV_NOPTS_VALUE)
882 st->cur_dts= cur_dts;
885 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
886 AVCodecParserContext *pc, AVPacket *pkt)
888 int num, den, presentation_delayed, delay, i;
891 if (s->flags & AVFMT_FLAG_NOFILLIN)
894 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
895 pkt->dts= AV_NOPTS_VALUE;
897 /* do we have a video B-frame ? */
898 delay= st->codec->has_b_frames;
899 presentation_delayed = 0;
901 /* XXX: need has_b_frame, but cannot get it if the codec is
904 pc && pc->pict_type != AV_PICTURE_TYPE_B)
905 presentation_delayed = 1;
907 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
908 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
909 pkt->dts -= 1LL<<st->pts_wrap_bits;
912 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
913 // we take the conservative approach and discard both
914 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
915 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
916 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
917 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
920 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
921 compute_frame_duration(&num, &den, st, pc, pkt);
923 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
925 if(pkt->duration != 0 && s->packet_buffer)
926 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
930 /* correct timestamps with byte offset if demuxers only have timestamps
931 on packet boundaries */
932 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
933 /* this will estimate bitrate based on this frame's duration and size */
934 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
935 if(pkt->pts != AV_NOPTS_VALUE)
937 if(pkt->dts != AV_NOPTS_VALUE)
941 if (pc && pc->dts_sync_point >= 0) {
942 // we have synchronization info from the parser
943 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
945 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
946 if (pkt->dts != AV_NOPTS_VALUE) {
947 // got DTS from the stream, update reference timestamp
948 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
949 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
950 } else if (st->reference_dts != AV_NOPTS_VALUE) {
951 // compute DTS based on reference timestamp
952 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
953 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
955 if (pc->dts_sync_point > 0)
956 st->reference_dts = pkt->dts; // new reference
960 /* This may be redundant, but it should not hurt. */
961 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
962 presentation_delayed = 1;
964 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
965 /* interpolate PTS and DTS if they are not present */
966 //We skip H264 currently because delay and has_b_frames are not reliably set
967 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
968 if (presentation_delayed) {
969 /* DTS = decompression timestamp */
970 /* PTS = presentation timestamp */
971 if (pkt->dts == AV_NOPTS_VALUE)
972 pkt->dts = st->last_IP_pts;
973 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
974 if (pkt->dts == AV_NOPTS_VALUE)
975 pkt->dts = st->cur_dts;
977 /* this is tricky: the dts must be incremented by the duration
978 of the frame we are displaying, i.e. the last I- or P-frame */
979 if (st->last_IP_duration == 0)
980 st->last_IP_duration = pkt->duration;
981 if(pkt->dts != AV_NOPTS_VALUE)
982 st->cur_dts = pkt->dts + st->last_IP_duration;
983 st->last_IP_duration = pkt->duration;
984 st->last_IP_pts= pkt->pts;
985 /* cannot compute PTS if not present (we can compute it only
986 by knowing the future */
987 } else if (pkt->pts != AV_NOPTS_VALUE ||
988 pkt->dts != AV_NOPTS_VALUE ||
990 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
991 int duration = pkt->duration;
992 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
993 compute_frame_duration(&num, &den, st, pc, pkt);
995 duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
996 den * (int64_t)st->time_base.num,
998 if (duration != 0 && s->packet_buffer) {
999 update_initial_durations(s, st, pkt->stream_index,
1005 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
1007 /* presentation is not delayed : PTS and DTS are the same */
1008 if (pkt->pts == AV_NOPTS_VALUE)
1009 pkt->pts = pkt->dts;
1010 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1012 if (pkt->pts == AV_NOPTS_VALUE)
1013 pkt->pts = st->cur_dts;
1014 pkt->dts = pkt->pts;
1015 if (pkt->pts != AV_NOPTS_VALUE)
1016 st->cur_dts = pkt->pts + duration;
1021 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1022 st->pts_buffer[0]= pkt->pts;
1023 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1024 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1025 if(pkt->dts == AV_NOPTS_VALUE)
1026 pkt->dts= st->pts_buffer[0];
1027 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1028 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1030 if(pkt->dts > st->cur_dts)
1031 st->cur_dts = pkt->dts;
1034 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1037 if(is_intra_only(st->codec))
1038 pkt->flags |= AV_PKT_FLAG_KEY;
1040 pkt->convergence_duration = pc->convergence_duration;
1043 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1046 AVPacketList *pktl = *pkt_buf;
1047 *pkt_buf = pktl->next;
1048 av_free_packet(&pktl->pkt);
1051 *pkt_buf_end = NULL;
1055 * Parse a packet, add all split parts to parse_queue
1057 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1059 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1061 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1062 AVStream *st = s->streams[stream_index];
1063 uint8_t *data = pkt ? pkt->data : NULL;
1064 int size = pkt ? pkt->size : 0;
1065 int ret = 0, got_output = 0;
1068 av_init_packet(&flush_pkt);
1073 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1076 av_init_packet(&out_pkt);
1077 len = av_parser_parse2(st->parser, st->codec,
1078 &out_pkt.data, &out_pkt.size, data, size,
1079 pkt->pts, pkt->dts, pkt->pos);
1081 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1082 /* increment read pointer */
1086 got_output = !!out_pkt.size;
1091 /* set the duration */
1092 out_pkt.duration = 0;
1093 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1094 if (st->codec->sample_rate > 0) {
1095 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1096 (AVRational){ 1, st->codec->sample_rate },
1100 } else if (st->codec->time_base.num != 0 &&
1101 st->codec->time_base.den != 0) {
1102 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1103 st->codec->time_base,
1108 out_pkt.stream_index = st->index;
1109 out_pkt.pts = st->parser->pts;
1110 out_pkt.dts = st->parser->dts;
1111 out_pkt.pos = st->parser->pos;
1113 if (st->parser->key_frame == 1 ||
1114 (st->parser->key_frame == -1 &&
1115 st->parser->pict_type == AV_PICTURE_TYPE_I))
1116 out_pkt.flags |= AV_PKT_FLAG_KEY;
1118 compute_pkt_fields(s, st, st->parser, &out_pkt);
1120 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1121 out_pkt.flags & AV_PKT_FLAG_KEY) {
1122 ff_reduce_index(s, st->index);
1123 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
1124 0, 0, AVINDEX_KEYFRAME);
1127 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1128 out_pkt.destruct = pkt->destruct;
1129 pkt->destruct = NULL;
1131 if ((ret = av_dup_packet(&out_pkt)) < 0)
1134 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1135 av_free_packet(&out_pkt);
1136 ret = AVERROR(ENOMEM);
1142 /* end of the stream => close and free the parser */
1143 if (pkt == &flush_pkt) {
1144 av_parser_close(st->parser);
1149 av_free_packet(pkt);
1153 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1154 AVPacketList **pkt_buffer_end,
1158 av_assert0(*pkt_buffer);
1161 *pkt_buffer = pktl->next;
1163 *pkt_buffer_end = NULL;
1168 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1170 int ret = 0, i, got_packet = 0;
1172 av_init_packet(pkt);
1174 while (!got_packet && !s->parse_queue) {
1178 /* read next packet */
1179 ret = ff_read_packet(s, &cur_pkt);
1181 if (ret == AVERROR(EAGAIN))
1183 /* flush the parsers */
1184 for(i = 0; i < s->nb_streams; i++) {
1186 if (st->parser && st->need_parsing)
1187 parse_packet(s, NULL, st->index);
1189 /* all remaining packets are now in parse_queue =>
1190 * really terminate parsing */
1194 st = s->streams[cur_pkt.stream_index];
1196 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1197 cur_pkt.dts != AV_NOPTS_VALUE &&
1198 cur_pkt.pts < cur_pkt.dts) {
1199 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1200 cur_pkt.stream_index,
1205 if (s->debug & FF_FDEBUG_TS)
1206 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1207 cur_pkt.stream_index,
1214 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1215 st->parser = av_parser_init(st->codec->codec_id);
1217 /* no parser available: just output the raw packets */
1218 st->need_parsing = AVSTREAM_PARSE_NONE;
1219 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1220 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1221 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1222 st->parser->flags |= PARSER_FLAG_ONCE;
1226 if (!st->need_parsing || !st->parser) {
1227 /* no parsing needed: we just output the packet as is */
1229 compute_pkt_fields(s, st, NULL, pkt);
1230 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1231 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1232 ff_reduce_index(s, st->index);
1233 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1236 } else if (st->discard < AVDISCARD_ALL) {
1237 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1241 av_free_packet(&cur_pkt);
1245 if (!got_packet && s->parse_queue)
1246 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1248 if(s->debug & FF_FDEBUG_TS)
1249 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1260 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1262 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1266 return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1267 &s->packet_buffer_end,
1269 read_frame_internal(s, pkt);
1273 AVPacketList *pktl = s->packet_buffer;
1276 AVPacket *next_pkt = &pktl->pkt;
1278 if (next_pkt->dts != AV_NOPTS_VALUE) {
1279 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1280 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1281 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1282 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1283 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1284 next_pkt->pts = pktl->pkt.dts;
1288 pktl = s->packet_buffer;
1291 /* read packet from packet buffer, if there is data */
1292 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1293 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1294 return read_from_packet_buffer(&s->packet_buffer,
1295 &s->packet_buffer_end, pkt);
1298 ret = read_frame_internal(s, pkt);
1300 if (pktl && ret != AVERROR(EAGAIN)) {
1307 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1308 &s->packet_buffer_end)) < 0)
1309 return AVERROR(ENOMEM);
1313 /* XXX: suppress the packet queue */
1314 static void flush_packet_queue(AVFormatContext *s)
1316 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1317 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1318 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1320 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1323 /*******************************************************/
1326 int av_find_default_stream_index(AVFormatContext *s)
1328 int first_audio_index = -1;
1332 if (s->nb_streams <= 0)
1334 for(i = 0; i < s->nb_streams; i++) {
1336 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1337 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1340 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1341 first_audio_index = i;
1343 return first_audio_index >= 0 ? first_audio_index : 0;
1347 * Flush the frame reader.
1349 void ff_read_frame_flush(AVFormatContext *s)
1354 flush_packet_queue(s);
1356 /* for each stream, reset read state */
1357 for(i = 0; i < s->nb_streams; i++) {
1361 av_parser_close(st->parser);
1364 st->last_IP_pts = AV_NOPTS_VALUE;
1365 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1366 st->reference_dts = AV_NOPTS_VALUE;
1368 st->probe_packets = MAX_PROBE_PACKETS;
1370 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1371 st->pts_buffer[j]= AV_NOPTS_VALUE;
1375 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1379 for(i = 0; i < s->nb_streams; i++) {
1380 AVStream *st = s->streams[i];
1382 st->cur_dts = av_rescale(timestamp,
1383 st->time_base.den * (int64_t)ref_st->time_base.num,
1384 st->time_base.num * (int64_t)ref_st->time_base.den);
1388 void ff_reduce_index(AVFormatContext *s, int stream_index)
1390 AVStream *st= s->streams[stream_index];
1391 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1393 if((unsigned)st->nb_index_entries >= max_entries){
1395 for(i=0; 2*i<st->nb_index_entries; i++)
1396 st->index_entries[i]= st->index_entries[2*i];
1397 st->nb_index_entries= i;
1401 int ff_add_index_entry(AVIndexEntry **index_entries,
1402 int *nb_index_entries,
1403 unsigned int *index_entries_allocated_size,
1404 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1406 AVIndexEntry *entries, *ie;
1409 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1412 entries = av_fast_realloc(*index_entries,
1413 index_entries_allocated_size,
1414 (*nb_index_entries + 1) *
1415 sizeof(AVIndexEntry));
1419 *index_entries= entries;
1421 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1424 index= (*nb_index_entries)++;
1425 ie= &entries[index];
1426 assert(index==0 || ie[-1].timestamp < timestamp);
1428 ie= &entries[index];
1429 if(ie->timestamp != timestamp){
1430 if(ie->timestamp <= timestamp)
1432 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1433 (*nb_index_entries)++;
1434 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1435 distance= ie->min_distance;
1439 ie->timestamp = timestamp;
1440 ie->min_distance= distance;
1447 int av_add_index_entry(AVStream *st,
1448 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1450 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1451 &st->index_entries_allocated_size, pos,
1452 timestamp, size, distance, flags);
1455 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1456 int64_t wanted_timestamp, int flags)
1464 //optimize appending index entries at the end
1465 if(b && entries[b-1].timestamp < wanted_timestamp)
1470 timestamp = entries[m].timestamp;
1471 if(timestamp >= wanted_timestamp)
1473 if(timestamp <= wanted_timestamp)
1476 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1478 if(!(flags & AVSEEK_FLAG_ANY)){
1479 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1480 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1489 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1492 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1493 wanted_timestamp, flags);
1496 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1498 AVInputFormat *avif= s->iformat;
1499 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1500 int64_t ts_min, ts_max, ts;
1505 if (stream_index < 0)
1508 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1511 ts_min= AV_NOPTS_VALUE;
1512 pos_limit= -1; //gcc falsely says it may be uninitialized
1514 st= s->streams[stream_index];
1515 if(st->index_entries){
1518 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1519 index= FFMAX(index, 0);
1520 e= &st->index_entries[index];
1522 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1524 ts_min= e->timestamp;
1525 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1531 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1532 assert(index < st->nb_index_entries);
1534 e= &st->index_entries[index];
1535 assert(e->timestamp >= target_ts);
1537 ts_max= e->timestamp;
1538 pos_limit= pos_max - e->min_distance;
1539 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1540 pos_max,pos_limit, ts_max);
1544 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1549 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1552 ff_update_cur_dts(s, st, ts);
1557 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1558 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1559 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1560 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1563 int64_t start_pos, filesize;
1566 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1568 if(ts_min == AV_NOPTS_VALUE){
1569 pos_min = s->data_offset;
1570 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1571 if (ts_min == AV_NOPTS_VALUE)
1575 if(ts_max == AV_NOPTS_VALUE){
1577 filesize = avio_size(s->pb);
1578 pos_max = filesize - 1;
1581 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1583 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1584 if (ts_max == AV_NOPTS_VALUE)
1588 int64_t tmp_pos= pos_max + 1;
1589 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1590 if(tmp_ts == AV_NOPTS_VALUE)
1594 if(tmp_pos >= filesize)
1600 if(ts_min > ts_max){
1602 }else if(ts_min == ts_max){
1607 while (pos_min < pos_limit) {
1608 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1609 pos_min, pos_max, ts_min, ts_max);
1610 assert(pos_limit <= pos_max);
1613 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1614 // interpolate position (better than dichotomy)
1615 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1616 + pos_min - approximate_keyframe_distance;
1617 }else if(no_change==1){
1618 // bisection, if interpolation failed to change min or max pos last time
1619 pos = (pos_min + pos_limit)>>1;
1621 /* linear search if bisection failed, can only happen if there
1622 are very few or no keyframes between min/max */
1627 else if(pos > pos_limit)
1631 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1636 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1637 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1638 pos_limit, start_pos, no_change);
1639 if(ts == AV_NOPTS_VALUE){
1640 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1643 assert(ts != AV_NOPTS_VALUE);
1644 if (target_ts <= ts) {
1645 pos_limit = start_pos - 1;
1649 if (target_ts >= ts) {
1655 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1656 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1658 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1660 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1661 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1662 pos, ts_min, target_ts, ts_max);
1667 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1668 int64_t pos_min, pos_max;
1670 pos_min = s->data_offset;
1671 pos_max = avio_size(s->pb) - 1;
1673 if (pos < pos_min) pos= pos_min;
1674 else if(pos > pos_max) pos= pos_max;
1676 avio_seek(s->pb, pos, SEEK_SET);
1681 static int seek_frame_generic(AVFormatContext *s,
1682 int stream_index, int64_t timestamp, int flags)
1689 st = s->streams[stream_index];
1691 index = av_index_search_timestamp(st, timestamp, flags);
1693 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1696 if(index < 0 || index==st->nb_index_entries-1){
1699 if(st->nb_index_entries){
1700 assert(st->index_entries);
1701 ie= &st->index_entries[st->nb_index_entries-1];
1702 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1704 ff_update_cur_dts(s, st, ie->timestamp);
1706 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1712 read_status = av_read_frame(s, &pkt);
1713 } while (read_status == AVERROR(EAGAIN));
1714 if (read_status < 0)
1716 av_free_packet(&pkt);
1717 if(stream_index == pkt.stream_index){
1718 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1722 index = av_index_search_timestamp(st, timestamp, flags);
1727 ff_read_frame_flush(s);
1728 if (s->iformat->read_seek){
1729 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1732 ie = &st->index_entries[index];
1733 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1735 ff_update_cur_dts(s, st, ie->timestamp);
1740 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1741 int64_t timestamp, int flags)
1746 if (flags & AVSEEK_FLAG_BYTE) {
1747 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1749 ff_read_frame_flush(s);
1750 return seek_frame_byte(s, stream_index, timestamp, flags);
1753 if(stream_index < 0){
1754 stream_index= av_find_default_stream_index(s);
1755 if(stream_index < 0)
1758 st= s->streams[stream_index];
1759 /* timestamp for default must be expressed in AV_TIME_BASE units */
1760 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1763 /* first, we try the format specific seek */
1764 if (s->iformat->read_seek) {
1765 ff_read_frame_flush(s);
1766 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1773 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1774 ff_read_frame_flush(s);
1775 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1776 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1777 ff_read_frame_flush(s);
1778 return seek_frame_generic(s, stream_index, timestamp, flags);
1784 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1786 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1789 queue_attached_pictures(s);
1794 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1796 if(min_ts > ts || max_ts < ts)
1799 if (s->iformat->read_seek2) {
1801 ff_read_frame_flush(s);
1802 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1805 queue_attached_pictures(s);
1809 if(s->iformat->read_timestamp){
1810 //try to seek via read_timestamp()
1813 //Fallback to old API if new is not implemented but old is
1814 //Note the old has somewat different sematics
1815 if(s->iformat->read_seek || 1)
1816 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1818 // try some generic seek like seek_frame_generic() but with new ts semantics
1821 /*******************************************************/
1824 * Return TRUE if the stream has accurate duration in any stream.
1826 * @return TRUE if the stream has accurate duration for at least one component.
1828 static int has_duration(AVFormatContext *ic)
1833 for(i = 0;i < ic->nb_streams; i++) {
1834 st = ic->streams[i];
1835 if (st->duration != AV_NOPTS_VALUE)
1838 if (ic->duration != AV_NOPTS_VALUE)
1844 * Estimate the stream timings from the one of each components.
1846 * Also computes the global bitrate if possible.
1848 static void update_stream_timings(AVFormatContext *ic)
1850 int64_t start_time, start_time1, end_time, end_time1;
1851 int64_t duration, duration1, filesize;
1855 start_time = INT64_MAX;
1856 end_time = INT64_MIN;
1857 duration = INT64_MIN;
1858 for(i = 0;i < ic->nb_streams; i++) {
1859 st = ic->streams[i];
1860 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1861 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1862 start_time = FFMIN(start_time, start_time1);
1863 if (st->duration != AV_NOPTS_VALUE) {
1864 end_time1 = start_time1
1865 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1866 end_time = FFMAX(end_time, end_time1);
1869 if (st->duration != AV_NOPTS_VALUE) {
1870 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1871 duration = FFMAX(duration, duration1);
1874 if (start_time != INT64_MAX) {
1875 ic->start_time = start_time;
1876 if (end_time != INT64_MIN)
1877 duration = FFMAX(duration, end_time - start_time);
1879 if (duration != INT64_MIN) {
1880 ic->duration = duration;
1881 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1882 /* compute the bitrate */
1883 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1884 (double)ic->duration;
1889 static void fill_all_stream_timings(AVFormatContext *ic)
1894 update_stream_timings(ic);
1895 for(i = 0;i < ic->nb_streams; i++) {
1896 st = ic->streams[i];
1897 if (st->start_time == AV_NOPTS_VALUE) {
1898 if(ic->start_time != AV_NOPTS_VALUE)
1899 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1900 if(ic->duration != AV_NOPTS_VALUE)
1901 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1906 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1908 int64_t filesize, duration;
1912 /* if bit_rate is already set, we believe it */
1913 if (ic->bit_rate <= 0) {
1915 for(i=0;i<ic->nb_streams;i++) {
1916 st = ic->streams[i];
1917 if (st->codec->bit_rate > 0)
1918 bit_rate += st->codec->bit_rate;
1920 ic->bit_rate = bit_rate;
1923 /* if duration is already set, we believe it */
1924 if (ic->duration == AV_NOPTS_VALUE &&
1925 ic->bit_rate != 0) {
1926 filesize = ic->pb ? avio_size(ic->pb) : 0;
1928 for(i = 0; i < ic->nb_streams; i++) {
1929 st = ic->streams[i];
1930 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1931 if (st->duration == AV_NOPTS_VALUE)
1932 st->duration = duration;
1938 #define DURATION_MAX_READ_SIZE 250000
1939 #define DURATION_MAX_RETRY 3
1941 /* only usable for MPEG-PS streams */
1942 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1944 AVPacket pkt1, *pkt = &pkt1;
1946 int read_size, i, ret;
1948 int64_t filesize, offset, duration;
1951 /* flush packet queue */
1952 flush_packet_queue(ic);
1954 for (i=0; i<ic->nb_streams; i++) {
1955 st = ic->streams[i];
1956 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1957 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1960 av_parser_close(st->parser);
1965 /* estimate the end time (duration) */
1966 /* XXX: may need to support wrapping */
1967 filesize = ic->pb ? avio_size(ic->pb) : 0;
1968 end_time = AV_NOPTS_VALUE;
1970 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1974 avio_seek(ic->pb, offset, SEEK_SET);
1977 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1981 ret = ff_read_packet(ic, pkt);
1982 } while(ret == AVERROR(EAGAIN));
1985 read_size += pkt->size;
1986 st = ic->streams[pkt->stream_index];
1987 if (pkt->pts != AV_NOPTS_VALUE &&
1988 (st->start_time != AV_NOPTS_VALUE ||
1989 st->first_dts != AV_NOPTS_VALUE)) {
1990 duration = end_time = pkt->pts;
1991 if (st->start_time != AV_NOPTS_VALUE)
1992 duration -= st->start_time;
1994 duration -= st->first_dts;
1996 duration += 1LL<<st->pts_wrap_bits;
1998 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1999 st->duration = duration;
2002 av_free_packet(pkt);
2004 }while( end_time==AV_NOPTS_VALUE
2005 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2006 && ++retry <= DURATION_MAX_RETRY);
2008 fill_all_stream_timings(ic);
2010 avio_seek(ic->pb, old_offset, SEEK_SET);
2011 for (i=0; i<ic->nb_streams; i++) {
2013 st->cur_dts= st->first_dts;
2014 st->last_IP_pts = AV_NOPTS_VALUE;
2015 st->reference_dts = AV_NOPTS_VALUE;
2019 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2023 /* get the file size, if possible */
2024 if (ic->iformat->flags & AVFMT_NOFILE) {
2027 file_size = avio_size(ic->pb);
2028 file_size = FFMAX(0, file_size);
2031 if ((!strcmp(ic->iformat->name, "mpeg") ||
2032 !strcmp(ic->iformat->name, "mpegts")) &&
2033 file_size && ic->pb->seekable) {
2034 /* get accurate estimate from the PTSes */
2035 estimate_timings_from_pts(ic, old_offset);
2036 } else if (has_duration(ic)) {
2037 /* at least one component has timings - we use them for all
2039 fill_all_stream_timings(ic);
2041 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2042 /* less precise: use bitrate info */
2043 estimate_timings_from_bit_rate(ic);
2045 update_stream_timings(ic);
2049 AVStream av_unused *st;
2050 for(i = 0;i < ic->nb_streams; i++) {
2051 st = ic->streams[i];
2052 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2053 (double) st->start_time / AV_TIME_BASE,
2054 (double) st->duration / AV_TIME_BASE);
2056 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2057 (double) ic->start_time / AV_TIME_BASE,
2058 (double) ic->duration / AV_TIME_BASE,
2059 ic->bit_rate / 1000);
2063 static int has_codec_parameters(AVStream *st)
2065 AVCodecContext *avctx = st->codec;
2067 switch (avctx->codec_type) {
2068 case AVMEDIA_TYPE_AUDIO:
2069 val = avctx->sample_rate && avctx->channels;
2070 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2073 case AVMEDIA_TYPE_VIDEO:
2075 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2082 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2085 static int has_decode_delay_been_guessed(AVStream *st)
2087 return st->codec->codec_id != CODEC_ID_H264 ||
2088 st->info->nb_decoded_frames >= 6;
2091 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2092 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2095 int got_picture = 1, ret = 0;
2097 AVPacket pkt = *avpkt;
2099 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2100 AVDictionary *thread_opt = NULL;
2102 codec = st->codec->codec ? st->codec->codec :
2103 avcodec_find_decoder(st->codec->codec_id);
2106 st->info->found_decoder = -1;
2110 /* force thread count to 1 since the h264 decoder will not extract SPS
2111 * and PPS to extradata during multi-threaded decoding */
2112 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2113 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2115 av_dict_free(&thread_opt);
2117 st->info->found_decoder = -1;
2120 st->info->found_decoder = 1;
2121 } else if (!st->info->found_decoder)
2122 st->info->found_decoder = 1;
2124 if (st->info->found_decoder < 0)
2127 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2129 (!has_codec_parameters(st) ||
2130 !has_decode_delay_been_guessed(st) ||
2131 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2133 avcodec_get_frame_defaults(&picture);
2134 switch(st->codec->codec_type) {
2135 case AVMEDIA_TYPE_VIDEO:
2136 ret = avcodec_decode_video2(st->codec, &picture,
2137 &got_picture, &pkt);
2139 case AVMEDIA_TYPE_AUDIO:
2140 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2147 st->info->nb_decoded_frames++;
2156 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2158 while (tags->id != CODEC_ID_NONE) {
2166 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2169 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2170 if(tag == tags[i].tag)
2173 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2174 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2177 return CODEC_ID_NONE;
2180 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2183 for(i=0; tags && tags[i]; i++){
2184 int tag= ff_codec_get_tag(tags[i], id);
2190 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2193 for(i=0; tags && tags[i]; i++){
2194 enum CodecID id= ff_codec_get_id(tags[i], tag);
2195 if(id!=CODEC_ID_NONE) return id;
2197 return CODEC_ID_NONE;
2200 static void compute_chapters_end(AVFormatContext *s)
2203 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2205 for (i = 0; i < s->nb_chapters; i++)
2206 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2207 AVChapter *ch = s->chapters[i];
2208 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2211 for (j = 0; j < s->nb_chapters; j++) {
2212 AVChapter *ch1 = s->chapters[j];
2213 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2214 if (j != i && next_start > ch->start && next_start < end)
2217 ch->end = (end == INT64_MAX) ? ch->start : end;
2221 static int get_std_framerate(int i){
2222 if(i<60*12) return i*1001;
2223 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2227 * Is the time base unreliable.
2228 * This is a heuristic to balance between quick acceptance of the values in
2229 * the headers vs. some extra checks.
2230 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2231 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2232 * And there are "variable" fps files this needs to detect as well.
2234 static int tb_unreliable(AVCodecContext *c){
2235 if( c->time_base.den >= 101L*c->time_base.num
2236 || c->time_base.den < 5L*c->time_base.num
2237 /* || c->codec_tag == AV_RL32("DIVX")
2238 || c->codec_tag == AV_RL32("XVID")*/
2239 || c->codec_id == CODEC_ID_MPEG2VIDEO
2240 || c->codec_id == CODEC_ID_H264
2246 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2248 int i, count, ret, read_size, j;
2250 AVPacket pkt1, *pkt;
2251 int64_t old_offset = avio_tell(ic->pb);
2252 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2254 for(i=0;i<ic->nb_streams;i++) {
2256 AVDictionary *thread_opt = NULL;
2257 st = ic->streams[i];
2259 //only for the split stuff
2260 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2261 st->parser = av_parser_init(st->codec->codec_id);
2262 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2263 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2266 codec = st->codec->codec ? st->codec->codec :
2267 avcodec_find_decoder(st->codec->codec_id);
2269 /* force thread count to 1 since the h264 decoder will not extract SPS
2270 * and PPS to extradata during multi-threaded decoding */
2271 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2273 /* Ensure that subtitle_header is properly set. */
2274 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2275 && codec && !st->codec->codec)
2276 avcodec_open2(st->codec, codec, options ? &options[i]
2279 //try to just open decoders, in case this is enough to get parameters
2280 if (!has_codec_parameters(st)) {
2281 if (codec && !st->codec->codec)
2282 avcodec_open2(st->codec, codec, options ? &options[i]
2286 av_dict_free(&thread_opt);
2289 for (i=0; i<ic->nb_streams; i++) {
2290 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2296 if (ff_check_interrupt(&ic->interrupt_callback)){
2298 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2302 /* check if one codec still needs to be handled */
2303 for(i=0;i<ic->nb_streams;i++) {
2304 int fps_analyze_framecount = 20;
2306 st = ic->streams[i];
2307 if (!has_codec_parameters(st))
2309 /* if the timebase is coarse (like the usual millisecond precision
2310 of mkv), we need to analyze more frames to reliably arrive at
2312 if (av_q2d(st->time_base) > 0.0005)
2313 fps_analyze_framecount *= 2;
2314 if (ic->fps_probe_size >= 0)
2315 fps_analyze_framecount = ic->fps_probe_size;
2316 /* variable fps and no guess at the real fps */
2317 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2318 && st->info->duration_count < fps_analyze_framecount
2319 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2321 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2323 if (st->first_dts == AV_NOPTS_VALUE &&
2324 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2325 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2328 if (i == ic->nb_streams) {
2329 /* NOTE: if the format has no header, then we need to read
2330 some packets to get most of the streams, so we cannot
2332 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2333 /* if we found the info for all the codecs, we can stop */
2335 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2339 /* we did not get all the codec info, but we read too much data */
2340 if (read_size >= ic->probesize) {
2342 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2346 /* NOTE: a new stream can be added there if no header in file
2347 (AVFMTCTX_NOHEADER) */
2348 ret = read_frame_internal(ic, &pkt1);
2349 if (ret == AVERROR(EAGAIN))
2354 AVPacket empty_pkt = { 0 };
2356 av_init_packet(&empty_pkt);
2358 ret = -1; /* we could not have all the codec parameters before EOF */
2359 for(i=0;i<ic->nb_streams;i++) {
2360 st = ic->streams[i];
2362 /* flush the decoders */
2363 if (st->info->found_decoder == 1) {
2365 err = try_decode_frame(st, &empty_pkt,
2366 (options && i < orig_nb_streams) ?
2367 &options[i] : NULL);
2368 } while (err > 0 && !has_codec_parameters(st));
2372 av_log(ic, AV_LOG_WARNING,
2373 "decoding for stream %d failed\n", st->index);
2374 } else if (!has_codec_parameters(st)) {
2376 avcodec_string(buf, sizeof(buf), st->codec, 0);
2377 av_log(ic, AV_LOG_WARNING,
2378 "Could not find codec parameters (%s)\n", buf);
2386 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2387 if ((ret = av_dup_packet(pkt)) < 0)
2388 goto find_stream_info_err;
2390 read_size += pkt->size;
2392 st = ic->streams[pkt->stream_index];
2393 if (st->codec_info_nb_frames>1) {
2394 if (av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2395 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2398 st->info->codec_info_duration += pkt->duration;
2401 int64_t last = st->info->last_dts;
2403 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2404 int64_t duration= pkt->dts - last;
2405 double dur= duration * av_q2d(st->time_base);
2407 if (st->info->duration_count < 2)
2408 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2409 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2410 int framerate= get_std_framerate(i);
2411 int ticks= lrintf(dur*framerate/(1001*12));
2412 double error = dur - (double)ticks*1001*12 / framerate;
2413 st->info->duration_error[i] += error*error;
2415 st->info->duration_count++;
2416 // ignore the first 4 values, they might have some random jitter
2417 if (st->info->duration_count > 3)
2418 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2420 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2421 st->info->last_dts = pkt->dts;
2423 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2424 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2425 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2426 st->codec->extradata_size= i;
2427 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2428 if (!st->codec->extradata)
2429 return AVERROR(ENOMEM);
2430 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2431 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2435 /* if still no information, we try to open the codec and to
2436 decompress the frame. We try to avoid that in most cases as
2437 it takes longer and uses more memory. For MPEG-4, we need to
2438 decompress for QuickTime.
2440 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2441 least one frame of codec data, this makes sure the codec initializes
2442 the channel configuration and does not only trust the values from the container.
2444 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2446 st->codec_info_nb_frames++;
2450 // close codecs which were opened in try_decode_frame()
2451 for(i=0;i<ic->nb_streams;i++) {
2452 st = ic->streams[i];
2453 avcodec_close(st->codec);
2455 for(i=0;i<ic->nb_streams;i++) {
2456 st = ic->streams[i];
2457 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2458 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2459 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2460 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2461 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2462 // the check for tb_unreliable() is not completely correct, since this is not about handling
2463 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2464 // ipmovie.c produces.
2465 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2466 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2467 if (st->info->duration_count && !st->r_frame_rate.num
2468 && tb_unreliable(st->codec)) {
2470 double best_error= 2*av_q2d(st->time_base);
2471 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2473 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2474 double error = st->info->duration_error[j] * get_std_framerate(j);
2475 if(error < best_error){
2477 num = get_std_framerate(j);
2480 // do not increase frame rate by more than 1 % in order to match a standard rate.
2481 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2482 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2484 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2485 if(!st->codec->bits_per_coded_sample)
2486 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2487 // set stream disposition based on audio service type
2488 switch (st->codec->audio_service_type) {
2489 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2490 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2491 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2492 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2493 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2494 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2495 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2496 st->disposition = AV_DISPOSITION_COMMENT; break;
2497 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2498 st->disposition = AV_DISPOSITION_KARAOKE; break;
2503 estimate_timings(ic, old_offset);
2505 compute_chapters_end(ic);
2507 find_stream_info_err:
2508 for (i=0; i < ic->nb_streams; i++) {
2509 if (ic->streams[i]->codec)
2510 ic->streams[i]->codec->thread_count = 0;
2511 av_freep(&ic->streams[i]->info);
2516 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2520 for (i = 0; i < ic->nb_programs; i++)
2521 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2522 if (ic->programs[i]->stream_index[j] == s)
2523 return ic->programs[i];
2527 int av_find_best_stream(AVFormatContext *ic,
2528 enum AVMediaType type,
2529 int wanted_stream_nb,
2531 AVCodec **decoder_ret,
2534 int i, nb_streams = ic->nb_streams;
2535 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2536 unsigned *program = NULL;
2537 AVCodec *decoder = NULL, *best_decoder = NULL;
2539 if (related_stream >= 0 && wanted_stream_nb < 0) {
2540 AVProgram *p = find_program_from_stream(ic, related_stream);
2542 program = p->stream_index;
2543 nb_streams = p->nb_stream_indexes;
2546 for (i = 0; i < nb_streams; i++) {
2547 int real_stream_index = program ? program[i] : i;
2548 AVStream *st = ic->streams[real_stream_index];
2549 AVCodecContext *avctx = st->codec;
2550 if (avctx->codec_type != type)
2552 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2554 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2557 decoder = avcodec_find_decoder(st->codec->codec_id);
2560 ret = AVERROR_DECODER_NOT_FOUND;
2564 if (best_count >= st->codec_info_nb_frames)
2566 best_count = st->codec_info_nb_frames;
2567 ret = real_stream_index;
2568 best_decoder = decoder;
2569 if (program && i == nb_streams - 1 && ret < 0) {
2571 nb_streams = ic->nb_streams;
2572 i = 0; /* no related stream found, try again with everything */
2576 *decoder_ret = best_decoder;
2580 /*******************************************************/
2582 int av_read_play(AVFormatContext *s)
2584 if (s->iformat->read_play)
2585 return s->iformat->read_play(s);
2587 return avio_pause(s->pb, 0);
2588 return AVERROR(ENOSYS);
2591 int av_read_pause(AVFormatContext *s)
2593 if (s->iformat->read_pause)
2594 return s->iformat->read_pause(s);
2596 return avio_pause(s->pb, 1);
2597 return AVERROR(ENOSYS);
2600 void avformat_free_context(AVFormatContext *s)
2606 if (s->iformat && s->iformat->priv_class && s->priv_data)
2607 av_opt_free(s->priv_data);
2609 for(i=0;i<s->nb_streams;i++) {
2610 /* free all data in a stream component */
2613 av_parser_close(st->parser);
2615 if (st->attached_pic.data)
2616 av_free_packet(&st->attached_pic);
2617 av_dict_free(&st->metadata);
2618 av_free(st->index_entries);
2619 av_free(st->codec->extradata);
2620 av_free(st->codec->subtitle_header);
2622 av_free(st->priv_data);
2626 for(i=s->nb_programs-1; i>=0; i--) {
2627 av_dict_free(&s->programs[i]->metadata);
2628 av_freep(&s->programs[i]->stream_index);
2629 av_freep(&s->programs[i]);
2631 av_freep(&s->programs);
2632 av_freep(&s->priv_data);
2633 while(s->nb_chapters--) {
2634 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2635 av_free(s->chapters[s->nb_chapters]);
2637 av_freep(&s->chapters);
2638 av_dict_free(&s->metadata);
2639 av_freep(&s->streams);
2643 #if FF_API_CLOSE_INPUT_FILE
2644 void av_close_input_file(AVFormatContext *s)
2646 avformat_close_input(&s);
2650 void avformat_close_input(AVFormatContext **ps)
2652 AVFormatContext *s = *ps;
2653 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2655 flush_packet_queue(s);
2656 if (s->iformat->read_close)
2657 s->iformat->read_close(s);
2658 avformat_free_context(s);
2664 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2670 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2672 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2675 s->streams = streams;
2677 st = av_mallocz(sizeof(AVStream));
2680 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2685 st->codec = avcodec_alloc_context3(c);
2687 /* no default bitrate if decoding */
2688 st->codec->bit_rate = 0;
2690 st->index = s->nb_streams;
2691 st->start_time = AV_NOPTS_VALUE;
2692 st->duration = AV_NOPTS_VALUE;
2693 /* we set the current DTS to 0 so that formats without any timestamps
2694 but durations get some timestamps, formats with some unknown
2695 timestamps have their first few packets buffered and the
2696 timestamps corrected before they are returned to the user */
2698 st->first_dts = AV_NOPTS_VALUE;
2699 st->probe_packets = MAX_PROBE_PACKETS;
2701 /* default pts setting is MPEG-like */
2702 avpriv_set_pts_info(st, 33, 1, 90000);
2703 st->last_IP_pts = AV_NOPTS_VALUE;
2704 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2705 st->pts_buffer[i]= AV_NOPTS_VALUE;
2706 st->reference_dts = AV_NOPTS_VALUE;
2708 st->sample_aspect_ratio = (AVRational){0,1};
2710 s->streams[s->nb_streams++] = st;
2714 AVProgram *av_new_program(AVFormatContext *ac, int id)
2716 AVProgram *program=NULL;
2719 av_dlog(ac, "new_program: id=0x%04x\n", id);
2721 for(i=0; i<ac->nb_programs; i++)
2722 if(ac->programs[i]->id == id)
2723 program = ac->programs[i];
2726 program = av_mallocz(sizeof(AVProgram));
2729 dynarray_add(&ac->programs, &ac->nb_programs, program);
2730 program->discard = AVDISCARD_NONE;
2737 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2739 AVChapter *chapter = NULL;
2742 for(i=0; i<s->nb_chapters; i++)
2743 if(s->chapters[i]->id == id)
2744 chapter = s->chapters[i];
2747 chapter= av_mallocz(sizeof(AVChapter));
2750 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2752 av_dict_set(&chapter->metadata, "title", title, 0);
2754 chapter->time_base= time_base;
2755 chapter->start = start;
2761 /************************************************************/
2762 /* output media file */
2764 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2766 const AVCodecTag *avctag;
2768 enum CodecID id = CODEC_ID_NONE;
2769 unsigned int tag = 0;
2772 * Check that tag + id is in the table
2773 * If neither is in the table -> OK
2774 * If tag is in the table with another id -> FAIL
2775 * If id is in the table with another tag -> FAIL unless strict < normal
2777 for (n = 0; s->oformat->codec_tag[n]; n++) {
2778 avctag = s->oformat->codec_tag[n];
2779 while (avctag->id != CODEC_ID_NONE) {
2780 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2782 if (id == st->codec->codec_id)
2785 if (avctag->id == st->codec->codec_id)
2790 if (id != CODEC_ID_NONE)
2792 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2797 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2801 AVDictionary *tmp = NULL;
2804 av_dict_copy(&tmp, *options, 0);
2805 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2808 // some sanity checks
2809 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2810 av_log(s, AV_LOG_ERROR, "no streams\n");
2811 ret = AVERROR(EINVAL);
2815 for(i=0;i<s->nb_streams;i++) {
2818 switch (st->codec->codec_type) {
2819 case AVMEDIA_TYPE_AUDIO:
2820 if(st->codec->sample_rate<=0){
2821 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2822 ret = AVERROR(EINVAL);
2825 if(!st->codec->block_align)
2826 st->codec->block_align = st->codec->channels *
2827 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2829 case AVMEDIA_TYPE_VIDEO:
2830 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2831 av_log(s, AV_LOG_ERROR, "time base not set\n");
2832 ret = AVERROR(EINVAL);
2835 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2836 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2837 ret = AVERROR(EINVAL);
2840 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2841 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
2842 "(%d/%d) and encoder layer (%d/%d)\n",
2843 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2844 st->codec->sample_aspect_ratio.num,
2845 st->codec->sample_aspect_ratio.den);
2846 ret = AVERROR(EINVAL);
2852 if(s->oformat->codec_tag){
2853 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2854 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2855 st->codec->codec_tag= 0;
2857 if(st->codec->codec_tag){
2858 if (!validate_codec_tag(s, st)) {
2860 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2861 av_log(s, AV_LOG_ERROR,
2862 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2863 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2864 ret = AVERROR_INVALIDDATA;
2868 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2871 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2872 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2873 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2876 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2877 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2878 if (!s->priv_data) {
2879 ret = AVERROR(ENOMEM);
2882 if (s->oformat->priv_class) {
2883 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2884 av_opt_set_defaults(s->priv_data);
2885 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2890 /* set muxer identification string */
2891 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2892 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2895 if(s->oformat->write_header){
2896 ret = s->oformat->write_header(s);
2901 /* init PTS generation */
2902 for(i=0;i<s->nb_streams;i++) {
2903 int64_t den = AV_NOPTS_VALUE;
2906 switch (st->codec->codec_type) {
2907 case AVMEDIA_TYPE_AUDIO:
2908 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2910 case AVMEDIA_TYPE_VIDEO:
2911 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2916 if (den != AV_NOPTS_VALUE) {
2918 ret = AVERROR_INVALIDDATA;
2921 frac_init(&st->pts, 0, 0, den);
2926 av_dict_free(options);
2935 //FIXME merge with compute_pkt_fields
2936 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2937 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2938 int num, den, frame_size, i;
2940 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2941 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2943 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2944 return AVERROR(EINVAL);*/
2946 /* duration field */
2947 if (pkt->duration == 0) {
2948 compute_frame_duration(&num, &den, st, NULL, pkt);
2950 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2954 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2957 //XXX/FIXME this is a temporary hack until all encoders output pts
2958 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2960 // pkt->pts= st->cur_dts;
2961 pkt->pts= st->pts.val;
2964 //calculate dts from pts
2965 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2966 st->pts_buffer[0]= pkt->pts;
2967 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2968 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2969 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2970 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2972 pkt->dts= st->pts_buffer[0];
2975 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
2976 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
2977 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
2978 av_log(s, AV_LOG_ERROR,
2979 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2980 st->index, st->cur_dts, pkt->dts);
2981 return AVERROR(EINVAL);
2983 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2984 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2985 return AVERROR(EINVAL);
2988 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2989 st->cur_dts= pkt->dts;
2990 st->pts.val= pkt->dts;
2993 switch (st->codec->codec_type) {
2994 case AVMEDIA_TYPE_AUDIO:
2995 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
2997 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2998 likely equal to the encoder delay, but it would be better if we
2999 had the real timestamps from the encoder */
3000 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3001 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3004 case AVMEDIA_TYPE_VIDEO:
3005 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3013 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3018 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3019 return s->oformat->write_packet(s, pkt);
3023 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3025 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3028 ret= s->oformat->write_packet(s, pkt);
3031 s->streams[pkt->stream_index]->nb_frames++;
3035 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3036 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3038 AVPacketList **next_point, *this_pktl;
3040 this_pktl = av_mallocz(sizeof(AVPacketList));
3041 this_pktl->pkt= *pkt;
3042 pkt->destruct= NULL; // do not free original but only the copy
3043 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3045 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3046 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3048 next_point = &s->packet_buffer;
3051 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3052 while(!compare(s, &(*next_point)->pkt, pkt)){
3053 next_point= &(*next_point)->next;
3057 next_point = &(s->packet_buffer_end->next);
3060 assert(!*next_point);
3062 s->packet_buffer_end= this_pktl;
3065 this_pktl->next= *next_point;
3067 s->streams[pkt->stream_index]->last_in_packet_buffer=
3068 *next_point= this_pktl;
3071 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3073 AVStream *st = s->streams[ pkt ->stream_index];
3074 AVStream *st2= s->streams[ next->stream_index];
3075 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3079 return pkt->stream_index < next->stream_index;
3083 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3084 AVPacket *pkt, int flush)
3091 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3094 for(i=0; i < s->nb_streams; i++)
3095 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3097 if(stream_count && (s->nb_streams == stream_count || flush)){
3098 pktl= s->packet_buffer;
3101 s->packet_buffer= pktl->next;
3102 if(!s->packet_buffer)
3103 s->packet_buffer_end= NULL;
3105 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3106 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3110 av_init_packet(out);
3115 #if FF_API_INTERLEAVE_PACKET
3116 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3117 AVPacket *pkt, int flush)
3119 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3124 * Interleave an AVPacket correctly so it can be muxed.
3125 * @param out the interleaved packet will be output here
3126 * @param in the input packet
3127 * @param flush 1 if no further packets are available as input and all
3128 * remaining packets should be output
3129 * @return 1 if a packet was output, 0 if no packet could be output,
3130 * < 0 if an error occurred
3132 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3133 if (s->oformat->interleave_packet) {
3134 int ret = s->oformat->interleave_packet(s, out, in, flush);
3139 return ff_interleave_packet_per_dts(s, out, in, flush);
3142 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3146 AVStream *st= s->streams[ pkt->stream_index];
3148 //FIXME/XXX/HACK drop zero sized packets
3149 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3152 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3153 pkt->size, pkt->dts, pkt->pts);
3154 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3157 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3158 return AVERROR(EINVAL);
3160 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3166 int ret= interleave_packet(s, &opkt, pkt, flush);
3167 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3170 ret= s->oformat->write_packet(s, &opkt);
3172 s->streams[opkt.stream_index]->nb_frames++;
3174 av_free_packet(&opkt);
3182 int av_write_trailer(AVFormatContext *s)
3188 ret= interleave_packet(s, &pkt, NULL, 1);
3189 if(ret<0) //FIXME cleanup needed for ret<0 ?
3194 ret= s->oformat->write_packet(s, &pkt);
3196 s->streams[pkt.stream_index]->nb_frames++;
3198 av_free_packet(&pkt);
3204 if(s->oformat->write_trailer)
3205 ret = s->oformat->write_trailer(s);
3207 for(i=0;i<s->nb_streams;i++) {
3208 av_freep(&s->streams[i]->priv_data);
3209 av_freep(&s->streams[i]->index_entries);
3211 if (s->oformat->priv_class)
3212 av_opt_free(s->priv_data);
3213 av_freep(&s->priv_data);
3217 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3220 AVProgram *program=NULL;
3223 if (idx >= ac->nb_streams) {
3224 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3228 for(i=0; i<ac->nb_programs; i++){
3229 if(ac->programs[i]->id != progid)
3231 program = ac->programs[i];
3232 for(j=0; j<program->nb_stream_indexes; j++)
3233 if(program->stream_index[j] == idx)
3236 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3239 program->stream_index = tmp;
3240 program->stream_index[program->nb_stream_indexes++] = idx;
3245 static void print_fps(double d, const char *postfix){
3246 uint64_t v= lrintf(d*100);
3247 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3248 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3249 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3252 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3254 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3255 AVDictionaryEntry *tag=NULL;
3257 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3258 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3259 if(strcmp("language", tag->key))
3260 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3265 /* "user interface" functions */
3266 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3269 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3270 AVStream *st = ic->streams[i];
3271 int g = av_gcd(st->time_base.num, st->time_base.den);
3272 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3273 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3274 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3275 /* the pid is an important information, so we display it */
3276 /* XXX: add a generic system */
3277 if (flags & AVFMT_SHOW_IDS)
3278 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3280 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3281 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3282 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3283 if (st->sample_aspect_ratio.num && // default
3284 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3285 AVRational display_aspect_ratio;
3286 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3287 st->codec->width*st->sample_aspect_ratio.num,
3288 st->codec->height*st->sample_aspect_ratio.den,
3290 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3291 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3292 display_aspect_ratio.num, display_aspect_ratio.den);
3294 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3295 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3296 print_fps(av_q2d(st->avg_frame_rate), "fps");
3297 if(st->r_frame_rate.den && st->r_frame_rate.num)
3298 print_fps(av_q2d(st->r_frame_rate), "tbr");
3299 if(st->time_base.den && st->time_base.num)
3300 print_fps(1/av_q2d(st->time_base), "tbn");
3301 if(st->codec->time_base.den && st->codec->time_base.num)
3302 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3304 if (st->disposition & AV_DISPOSITION_DEFAULT)
3305 av_log(NULL, AV_LOG_INFO, " (default)");
3306 if (st->disposition & AV_DISPOSITION_DUB)
3307 av_log(NULL, AV_LOG_INFO, " (dub)");
3308 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3309 av_log(NULL, AV_LOG_INFO, " (original)");
3310 if (st->disposition & AV_DISPOSITION_COMMENT)
3311 av_log(NULL, AV_LOG_INFO, " (comment)");
3312 if (st->disposition & AV_DISPOSITION_LYRICS)
3313 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3314 if (st->disposition & AV_DISPOSITION_KARAOKE)
3315 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3316 if (st->disposition & AV_DISPOSITION_FORCED)
3317 av_log(NULL, AV_LOG_INFO, " (forced)");
3318 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3319 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3320 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3321 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3322 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3323 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3324 av_log(NULL, AV_LOG_INFO, "\n");
3325 dump_metadata(NULL, st->metadata, " ");
3328 void av_dump_format(AVFormatContext *ic,
3334 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3335 if (ic->nb_streams && !printed)
3338 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3339 is_output ? "Output" : "Input",
3341 is_output ? ic->oformat->name : ic->iformat->name,
3342 is_output ? "to" : "from", url);
3343 dump_metadata(NULL, ic->metadata, " ");
3345 av_log(NULL, AV_LOG_INFO, " Duration: ");
3346 if (ic->duration != AV_NOPTS_VALUE) {
3347 int hours, mins, secs, us;
3348 secs = ic->duration / AV_TIME_BASE;
3349 us = ic->duration % AV_TIME_BASE;
3354 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3355 (100 * us) / AV_TIME_BASE);
3357 av_log(NULL, AV_LOG_INFO, "N/A");
3359 if (ic->start_time != AV_NOPTS_VALUE) {
3361 av_log(NULL, AV_LOG_INFO, ", start: ");
3362 secs = ic->start_time / AV_TIME_BASE;
3363 us = abs(ic->start_time % AV_TIME_BASE);
3364 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3365 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3367 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3369 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3371 av_log(NULL, AV_LOG_INFO, "N/A");
3373 av_log(NULL, AV_LOG_INFO, "\n");
3375 for (i = 0; i < ic->nb_chapters; i++) {
3376 AVChapter *ch = ic->chapters[i];
3377 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3378 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3379 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3381 dump_metadata(NULL, ch->metadata, " ");
3383 if(ic->nb_programs) {
3384 int j, k, total = 0;
3385 for(j=0; j<ic->nb_programs; j++) {
3386 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3388 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3389 name ? name->value : "");
3390 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3391 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3392 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3393 printed[ic->programs[j]->stream_index[k]] = 1;
3395 total += ic->programs[j]->nb_stream_indexes;
3397 if (total < ic->nb_streams)
3398 av_log(NULL, AV_LOG_INFO, " No Program\n");
3400 for(i=0;i<ic->nb_streams;i++)
3402 dump_stream_format(ic, i, index, is_output);
3407 #if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
3408 FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
3410 return av_gettime();
3414 uint64_t ff_ntp_time(void)
3416 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3419 int av_get_frame_filename(char *buf, int buf_size,
3420 const char *path, int number)
3423 char *q, buf1[20], c;
3424 int nd, len, percentd_found;
3436 while (isdigit(*p)) {
3437 nd = nd * 10 + *p++ - '0';
3440 } while (isdigit(c));
3449 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3451 if ((q - buf + len) > buf_size - 1)
3453 memcpy(q, buf1, len);
3461 if ((q - buf) < buf_size - 1)
3465 if (!percentd_found)
3474 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3478 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3480 for(i=0;i<size;i+=16) {
3487 PRINT(" %02x", buf[i+j]);
3492 for(j=0;j<len;j++) {
3494 if (c < ' ' || c > '~')
3503 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3505 hex_dump_internal(NULL, f, 0, buf, size);
3508 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3510 hex_dump_internal(avcl, NULL, level, buf, size);
3513 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3516 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3517 PRINT("stream #%d:\n", pkt->stream_index);
3518 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3519 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3520 /* DTS is _always_ valid after av_read_frame() */
3522 if (pkt->dts == AV_NOPTS_VALUE)
3525 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3526 /* PTS may not be known if B-frames are present. */
3528 if (pkt->pts == AV_NOPTS_VALUE)
3531 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3533 PRINT(" size=%d\n", pkt->size);
3536 av_hex_dump(f, pkt->data, pkt->size);
3539 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3541 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3544 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3547 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3550 void av_url_split(char *proto, int proto_size,
3551 char *authorization, int authorization_size,
3552 char *hostname, int hostname_size,
3554 char *path, int path_size,
3557 const char *p, *ls, *at, *col, *brk;
3559 if (port_ptr) *port_ptr = -1;
3560 if (proto_size > 0) proto[0] = 0;
3561 if (authorization_size > 0) authorization[0] = 0;
3562 if (hostname_size > 0) hostname[0] = 0;
3563 if (path_size > 0) path[0] = 0;
3565 /* parse protocol */
3566 if ((p = strchr(url, ':'))) {
3567 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3572 /* no protocol means plain filename */
3573 av_strlcpy(path, url, path_size);
3577 /* separate path from hostname */
3578 ls = strchr(p, '/');
3580 ls = strchr(p, '?');
3582 av_strlcpy(path, ls, path_size);
3584 ls = &p[strlen(p)]; // XXX
3586 /* the rest is hostname, use that to parse auth/port */
3588 /* authorization (user[:pass]@hostname) */
3589 if ((at = strchr(p, '@')) && at < ls) {
3590 av_strlcpy(authorization, p,
3591 FFMIN(authorization_size, at + 1 - p));
3592 p = at + 1; /* skip '@' */
3595 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3597 av_strlcpy(hostname, p + 1,
3598 FFMIN(hostname_size, brk - p));
3599 if (brk[1] == ':' && port_ptr)
3600 *port_ptr = atoi(brk + 2);
3601 } else if ((col = strchr(p, ':')) && col < ls) {
3602 av_strlcpy(hostname, p,
3603 FFMIN(col + 1 - p, hostname_size));
3604 if (port_ptr) *port_ptr = atoi(col + 1);
3606 av_strlcpy(hostname, p,
3607 FFMIN(ls + 1 - p, hostname_size));
3611 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3614 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3617 'C', 'D', 'E', 'F' };
3618 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3621 'c', 'd', 'e', 'f' };
3622 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3624 for(i = 0; i < s; i++) {
3625 buff[i * 2] = hex_table[src[i] >> 4];
3626 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3632 int ff_hex_to_data(uint8_t *data, const char *p)
3639 p += strspn(p, SPACE_CHARS);
3642 c = toupper((unsigned char) *p++);
3643 if (c >= '0' && c <= '9')
3645 else if (c >= 'A' && c <= 'F')
3660 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3661 unsigned int pts_num, unsigned int pts_den)
3664 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3665 if(new_tb.num != pts_num)
3666 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3668 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3670 if(new_tb.num <= 0 || new_tb.den <= 0) {
3671 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
3674 s->time_base = new_tb;
3675 s->pts_wrap_bits = pts_wrap_bits;
3678 int ff_url_join(char *str, int size, const char *proto,
3679 const char *authorization, const char *hostname,
3680 int port, const char *fmt, ...)
3683 struct addrinfo hints = { 0 }, *ai;
3688 av_strlcatf(str, size, "%s://", proto);
3689 if (authorization && authorization[0])
3690 av_strlcatf(str, size, "%s@", authorization);
3691 #if CONFIG_NETWORK && defined(AF_INET6)
3692 /* Determine if hostname is a numerical IPv6 address,
3693 * properly escape it within [] in that case. */
3694 hints.ai_flags = AI_NUMERICHOST;
3695 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
3696 if (ai->ai_family == AF_INET6) {
3697 av_strlcat(str, "[", size);
3698 av_strlcat(str, hostname, size);
3699 av_strlcat(str, "]", size);
3701 av_strlcat(str, hostname, size);
3706 /* Not an IPv6 address, just output the plain string. */
3707 av_strlcat(str, hostname, size);
3710 av_strlcatf(str, size, ":%d", port);
3713 int len = strlen(str);
3716 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
3722 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
3723 AVFormatContext *src)
3728 local_pkt.stream_index = dst_stream;
3729 if (pkt->pts != AV_NOPTS_VALUE)
3730 local_pkt.pts = av_rescale_q(pkt->pts,
3731 src->streams[pkt->stream_index]->time_base,
3732 dst->streams[dst_stream]->time_base);
3733 if (pkt->dts != AV_NOPTS_VALUE)
3734 local_pkt.dts = av_rescale_q(pkt->dts,
3735 src->streams[pkt->stream_index]->time_base,
3736 dst->streams[dst_stream]->time_base);
3737 return av_write_frame(dst, &local_pkt);
3740 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3743 const char *ptr = str;
3745 /* Parse key=value pairs. */
3748 char *dest = NULL, *dest_end;
3749 int key_len, dest_len = 0;
3751 /* Skip whitespace and potential commas. */
3752 while (*ptr && (isspace(*ptr) || *ptr == ','))
3759 if (!(ptr = strchr(key, '=')))
3762 key_len = ptr - key;
3764 callback_get_buf(context, key, key_len, &dest, &dest_len);
3765 dest_end = dest + dest_len - 1;
3769 while (*ptr && *ptr != '\"') {
3773 if (dest && dest < dest_end)
3777 if (dest && dest < dest_end)
3785 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
3786 if (dest && dest < dest_end)
3794 int ff_find_stream_index(AVFormatContext *s, int id)
3797 for (i = 0; i < s->nb_streams; i++) {
3798 if (s->streams[i]->id == id)
3804 void ff_make_absolute_url(char *buf, int size, const char *base,
3808 /* Absolute path, relative to the current server */
3809 if (base && strstr(base, "://") && rel[0] == '/') {
3811 av_strlcpy(buf, base, size);
3812 sep = strstr(buf, "://");
3815 sep = strchr(sep, '/');
3819 av_strlcat(buf, rel, size);
3822 /* If rel actually is an absolute url, just copy it */
3823 if (!base || strstr(rel, "://") || rel[0] == '/') {
3824 av_strlcpy(buf, rel, size);
3828 av_strlcpy(buf, base, size);
3829 /* Remove the file name from the base url */
3830 sep = strrchr(buf, '/');
3835 while (av_strstart(rel, "../", NULL) && sep) {
3836 /* Remove the path delimiter at the end */
3838 sep = strrchr(buf, '/');
3839 /* If the next directory name to pop off is "..", break here */
3840 if (!strcmp(sep ? &sep[1] : buf, "..")) {
3841 /* Readd the slash we just removed */
3842 av_strlcat(buf, "/", size);
3845 /* Cut off the directory name */
3852 av_strlcat(buf, rel, size);
3855 int64_t ff_iso8601_to_unix_time(const char *datestr)
3858 struct tm time1 = {0}, time2 = {0};
3860 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
3861 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
3863 return av_timegm(&time2);
3865 return av_timegm(&time1);
3867 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
3868 "the date string.\n");
3873 int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance)
3876 if (ofmt->query_codec)
3877 return ofmt->query_codec(codec_id, std_compliance);
3878 else if (ofmt->codec_tag)
3879 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3880 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
3881 codec_id == ofmt->subtitle_codec)
3884 return AVERROR_PATCHWELCOME;
3887 int avformat_network_init(void)
3891 ff_network_inited_globally = 1;
3892 if ((ret = ff_network_init()) < 0)
3899 int avformat_network_deinit(void)
3908 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3909 uint64_t channel_layout, int32_t sample_rate,
3910 int32_t width, int32_t height)
3916 return AVERROR(EINVAL);
3919 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3921 if (channel_layout) {
3923 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3927 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3929 if (width || height) {
3931 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3933 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3935 return AVERROR(ENOMEM);
3936 bytestream_put_le32(&data, flags);
3938 bytestream_put_le32(&data, channels);
3940 bytestream_put_le64(&data, channel_layout);
3942 bytestream_put_le32(&data, sample_rate);
3943 if (width || height) {
3944 bytestream_put_le32(&data, width);
3945 bytestream_put_le32(&data, height);
3950 const struct AVCodecTag *avformat_get_riff_video_tags(void)
3952 return ff_codec_bmp_tags;
3954 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
3956 return ff_codec_wav_tags;