2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/raw.h"
29 #include "libavcodec/bytestream.h"
30 #include "libavutil/avassert.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/dict.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
43 #include "audiointerleave.h"
55 * various utility functions for use within FFmpeg
58 unsigned avformat_version(void)
60 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
61 return LIBAVFORMAT_VERSION_INT;
64 const char *avformat_configuration(void)
66 return FFMPEG_CONFIGURATION;
69 const char *avformat_license(void)
71 #define LICENSE_PREFIX "libavformat license: "
72 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
75 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
77 static int is_relative(int64_t ts) {
78 return ts > (RELATIVE_TS_BASE - (1LL<<48));
81 /* fraction handling */
84 * f = val + (num / den) + 0.5.
86 * 'num' is normalized so that it is such as 0 <= num < den.
88 * @param f fractional number
89 * @param val integer value
90 * @param num must be >= 0
91 * @param den must be >= 1
93 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
106 * Fractional addition to f: f = f + (incr / f->den).
108 * @param f fractional number
109 * @param incr increment, can be positive or negative
111 static void frac_add(AVFrac *f, int64_t incr)
124 } else if (num >= den) {
131 /** head of registered input format linked list */
132 static AVInputFormat *first_iformat = NULL;
133 /** head of registered output format linked list */
134 static AVOutputFormat *first_oformat = NULL;
136 AVInputFormat *av_iformat_next(AVInputFormat *f)
138 if(f) return f->next;
139 else return first_iformat;
142 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
144 if(f) return f->next;
145 else return first_oformat;
148 void av_register_input_format(AVInputFormat *format)
152 while (*p != NULL) p = &(*p)->next;
157 void av_register_output_format(AVOutputFormat *format)
161 while (*p != NULL) p = &(*p)->next;
166 int av_match_ext(const char *filename, const char *extensions)
174 ext = strrchr(filename, '.');
180 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
183 if (!av_strcasecmp(ext1, ext))
193 static int match_format(const char *name, const char *names)
201 namelen = strlen(name);
202 while ((p = strchr(names, ','))) {
203 len = FFMAX(p - names, namelen);
204 if (!av_strncasecmp(name, names, len))
208 return !av_strcasecmp(name, names);
211 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = NULL, *fmt_found;
215 int score_max, score;
217 /* specific test for image sequences */
218 #if CONFIG_IMAGE2_MUXER
219 if (!short_name && filename &&
220 av_filename_number_test(filename) &&
221 ff_guess_image2_codec(filename) != AV_CODEC_ID_NONE) {
222 return av_guess_format("image2", NULL, NULL);
225 /* Find the proper file type. */
228 while ((fmt = av_oformat_next(fmt))) {
230 if (fmt->name && short_name && match_format(short_name, fmt->name))
232 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
234 if (filename && fmt->extensions &&
235 av_match_ext(filename, fmt->extensions)) {
238 if (score > score_max) {
246 enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
247 const char *filename, const char *mime_type, enum AVMediaType type){
248 if(type == AVMEDIA_TYPE_VIDEO){
249 enum AVCodecID codec_id= AV_CODEC_ID_NONE;
251 #if CONFIG_IMAGE2_MUXER
252 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
253 codec_id= ff_guess_image2_codec(filename);
256 if(codec_id == AV_CODEC_ID_NONE)
257 codec_id= fmt->video_codec;
259 }else if(type == AVMEDIA_TYPE_AUDIO)
260 return fmt->audio_codec;
261 else if (type == AVMEDIA_TYPE_SUBTITLE)
262 return fmt->subtitle_codec;
264 return AV_CODEC_ID_NONE;
267 AVInputFormat *av_find_input_format(const char *short_name)
269 AVInputFormat *fmt = NULL;
270 while ((fmt = av_iformat_next(fmt))) {
271 if (match_format(short_name, fmt->name))
277 int ffio_limit(AVIOContext *s, int size)
280 int64_t remaining= s->maxsize - avio_tell(s);
281 if(remaining < size){
282 int64_t newsize= avio_size(s);
283 if(!s->maxsize || s->maxsize<newsize)
284 s->maxsize= newsize - !newsize;
285 remaining= s->maxsize - avio_tell(s);
286 remaining= FFMAX(remaining, 0);
289 if(s->maxsize>=0 && remaining+1 < size){
290 av_log(0, AV_LOG_ERROR, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
297 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
300 int orig_size = size;
301 size= ffio_limit(s, size);
303 ret= av_new_packet(pkt, size);
308 pkt->pos= avio_tell(s);
310 ret= avio_read(s, pkt->data, size);
314 av_shrink_packet(pkt, ret);
315 if (pkt->size < orig_size)
316 pkt->flags |= AV_PKT_FLAG_CORRUPT;
321 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
326 return av_get_packet(s, pkt, size);
327 old_size = pkt->size;
328 ret = av_grow_packet(pkt, size);
331 ret = avio_read(s, pkt->data + old_size, size);
332 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
337 int av_filename_number_test(const char *filename)
340 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
343 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
345 AVProbeData lpd = *pd;
346 AVInputFormat *fmt1 = NULL, *fmt;
347 int score, nodat = 0, score_max=0;
349 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
350 int id3len = ff_id3v2_tag_len(lpd.buf);
351 if (lpd.buf_size > id3len + 16) {
353 lpd.buf_size -= id3len;
359 while ((fmt1 = av_iformat_next(fmt1))) {
360 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
363 if (fmt1->read_probe) {
364 score = fmt1->read_probe(&lpd);
365 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
366 score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1);
367 } else if (fmt1->extensions) {
368 if (av_match_ext(lpd.filename, fmt1->extensions)) {
372 if (score > score_max) {
375 }else if (score == score_max)
378 *score_ret= score_max;
383 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
386 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
387 if(score_ret > *score_max){
388 *score_max= score_ret;
394 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
396 return av_probe_input_format2(pd, is_opened, &score);
399 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
401 static const struct {
402 const char *name; enum AVCodecID id; enum AVMediaType type;
404 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
405 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
406 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
407 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
408 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
409 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
410 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
411 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
412 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
416 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
420 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
421 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
422 for (i = 0; fmt_id_type[i].name; i++) {
423 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
424 st->codec->codec_id = fmt_id_type[i].id;
425 st->codec->codec_type = fmt_id_type[i].type;
433 /************************************************************/
434 /* input media file */
436 int av_demuxer_open(AVFormatContext *ic){
439 if (ic->iformat->read_header) {
440 err = ic->iformat->read_header(ic);
445 if (ic->pb && !ic->data_offset)
446 ic->data_offset = avio_tell(ic->pb);
452 /** size of probe buffer, for guessing file type from file contents */
453 #define PROBE_BUF_MIN 2048
454 #define PROBE_BUF_MAX (1<<20)
456 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
457 const char *filename, void *logctx,
458 unsigned int offset, unsigned int max_probe_size)
460 AVProbeData pd = { filename ? filename : "", NULL, -offset };
461 unsigned char *buf = NULL;
462 int ret = 0, probe_size;
464 if (!max_probe_size) {
465 max_probe_size = PROBE_BUF_MAX;
466 } else if (max_probe_size > PROBE_BUF_MAX) {
467 max_probe_size = PROBE_BUF_MAX;
468 } else if (max_probe_size < PROBE_BUF_MIN) {
469 return AVERROR(EINVAL);
472 if (offset >= max_probe_size) {
473 return AVERROR(EINVAL);
476 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
477 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
478 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
479 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
482 if (probe_size < offset) {
486 /* read probe data */
487 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
490 return AVERROR(ENOMEM);
493 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
494 /* fail if error was not end of file, otherwise, lower score */
495 if (ret != AVERROR_EOF) {
500 ret = 0; /* error was end of file, nothing read */
503 pd.buf = &buf[offset];
505 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
507 /* guess file format */
508 *fmt = av_probe_input_format2(&pd, 1, &score);
510 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
511 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
513 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
519 return AVERROR_INVALIDDATA;
522 /* rewind. reuse probe buffer to avoid seeking */
523 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
529 /* open input file and probe the format if necessary */
530 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
533 AVProbeData pd = {filename, NULL, 0};
536 s->flags |= AVFMT_FLAG_CUSTOM_IO;
538 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
539 else if (s->iformat->flags & AVFMT_NOFILE)
540 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
541 "will be ignored with AVFMT_NOFILE format.\n");
545 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
546 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
549 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
550 &s->interrupt_callback, options)) < 0)
554 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
557 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
558 AVPacketList **plast_pktl){
559 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
564 (*plast_pktl)->next = pktl;
566 *packet_buffer = pktl;
568 /* add the packet in the buffered packet list */
574 static void queue_attached_pictures(AVFormatContext *s)
577 for (i = 0; i < s->nb_streams; i++)
578 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
579 s->streams[i]->discard < AVDISCARD_ALL) {
580 AVPacket copy = s->streams[i]->attached_pic;
581 copy.destruct = NULL;
582 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
586 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
588 AVFormatContext *s = *ps;
590 AVDictionary *tmp = NULL;
591 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
593 if (!s && !(s = avformat_alloc_context()))
594 return AVERROR(ENOMEM);
596 av_log(0, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
597 return AVERROR(EINVAL);
603 av_dict_copy(&tmp, *options, 0);
605 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
608 if ((ret = init_input(s, filename, &tmp)) < 0)
611 /* check filename in case an image number is expected */
612 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
613 if (!av_filename_number_test(filename)) {
614 ret = AVERROR(EINVAL);
619 s->duration = s->start_time = AV_NOPTS_VALUE;
620 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
622 /* allocate private data */
623 if (s->iformat->priv_data_size > 0) {
624 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
625 ret = AVERROR(ENOMEM);
628 if (s->iformat->priv_class) {
629 *(const AVClass**)s->priv_data = s->iformat->priv_class;
630 av_opt_set_defaults(s->priv_data);
631 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
636 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
638 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
640 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
641 if ((ret = s->iformat->read_header(s)) < 0)
644 if (id3v2_extra_meta &&
645 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
647 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
649 queue_attached_pictures(s);
651 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
652 s->data_offset = avio_tell(s->pb);
654 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
657 av_dict_free(options);
664 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
666 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
668 avformat_free_context(s);
673 /*******************************************************/
675 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
677 if(st->request_probe>0){
678 AVProbeData *pd = &st->probe_data;
680 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
684 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
688 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
689 pd->buf_size += pkt->size;
690 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
693 st->probe_packets = 0;
696 end= s->raw_packet_buffer_remaining_size <= 0
697 || st->probe_packets<=0;
699 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
700 int score= set_codec_from_probe_data(s, st, pd);
701 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
705 st->request_probe= -1;
706 if(st->codec->codec_id != AV_CODEC_ID_NONE){
707 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
709 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
715 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
721 AVPacketList *pktl = s->raw_packet_buffer;
725 st = s->streams[pkt->stream_index];
726 if(st->request_probe <= 0){
727 s->raw_packet_buffer = pktl->next;
728 s->raw_packet_buffer_remaining_size += pkt->size;
735 ret= s->iformat->read_packet(s, pkt);
737 if (!pktl || ret == AVERROR(EAGAIN))
739 for (i = 0; i < s->nb_streams; i++) {
741 if (st->probe_packets) {
742 probe_codec(s, st, NULL);
744 av_assert0(st->request_probe <= 0);
749 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
750 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
751 av_log(s, AV_LOG_WARNING,
752 "Dropped corrupted packet (stream = %d)\n",
758 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
759 av_packet_merge_side_data(pkt);
761 if(pkt->stream_index >= (unsigned)s->nb_streams){
762 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
766 st= s->streams[pkt->stream_index];
768 switch(st->codec->codec_type){
769 case AVMEDIA_TYPE_VIDEO:
770 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
772 case AVMEDIA_TYPE_AUDIO:
773 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
775 case AVMEDIA_TYPE_SUBTITLE:
776 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
780 if(!pktl && st->request_probe <= 0)
783 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
784 s->raw_packet_buffer_remaining_size -= pkt->size;
786 probe_codec(s, st, pkt);
790 #if FF_API_READ_PACKET
791 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
793 return ff_read_packet(s, pkt);
798 /**********************************************************/
800 static int determinable_frame_size(AVCodecContext *avctx)
802 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
803 avctx->codec_id == AV_CODEC_ID_MP1 ||
804 avctx->codec_id == AV_CODEC_ID_MP2 ||
805 avctx->codec_id == AV_CODEC_ID_MP3/* ||
806 avctx->codec_id == AV_CODEC_ID_CELT*/)
812 * Get the number of samples of an audio frame. Return -1 on error.
814 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
818 /* give frame_size priority if demuxing */
819 if (!mux && enc->frame_size > 1)
820 return enc->frame_size;
822 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
825 /* fallback to using frame_size if muxing */
826 if (enc->frame_size > 1)
827 return enc->frame_size;
834 * Return the frame duration in seconds. Return 0 if not available.
836 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
837 AVCodecParserContext *pc, AVPacket *pkt)
843 switch(st->codec->codec_type) {
844 case AVMEDIA_TYPE_VIDEO:
845 if (st->r_frame_rate.num && !pc) {
846 *pnum = st->r_frame_rate.den;
847 *pden = st->r_frame_rate.num;
848 } else if(st->time_base.num*1000LL > st->time_base.den) {
849 *pnum = st->time_base.num;
850 *pden = st->time_base.den;
851 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
852 *pnum = st->codec->time_base.num;
853 *pden = st->codec->time_base.den;
854 if (pc && pc->repeat_pict) {
855 *pnum = (*pnum) * (1 + pc->repeat_pict);
857 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
858 //Thus if we have no parser in such case leave duration undefined.
859 if(st->codec->ticks_per_frame>1 && !pc){
864 case AVMEDIA_TYPE_AUDIO:
865 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
866 if (frame_size <= 0 || st->codec->sample_rate <= 0)
869 *pden = st->codec->sample_rate;
876 static int is_intra_only(AVCodecContext *enc){
877 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
879 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
880 switch(enc->codec_id){
881 case AV_CODEC_ID_MJPEG:
882 case AV_CODEC_ID_MJPEGB:
883 case AV_CODEC_ID_LJPEG:
884 case AV_CODEC_ID_PRORES:
885 case AV_CODEC_ID_RAWVIDEO:
886 case AV_CODEC_ID_V210:
887 case AV_CODEC_ID_DVVIDEO:
888 case AV_CODEC_ID_HUFFYUV:
889 case AV_CODEC_ID_FFVHUFF:
890 case AV_CODEC_ID_ASV1:
891 case AV_CODEC_ID_ASV2:
892 case AV_CODEC_ID_VCR1:
893 case AV_CODEC_ID_DNXHD:
894 case AV_CODEC_ID_JPEG2000:
895 case AV_CODEC_ID_MDEC:
896 case AV_CODEC_ID_UTVIDEO:
904 static int has_decode_delay_been_guessed(AVStream *st)
906 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
907 #if CONFIG_H264_DECODER
908 if(st->codec->has_b_frames &&
909 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
912 if(st->codec->has_b_frames<3)
913 return st->nb_decoded_frames >= 7;
914 else if(st->codec->has_b_frames<4)
915 return st->nb_decoded_frames >= 18;
917 return st->nb_decoded_frames >= 20;
920 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
924 if (pktl == s->parse_queue_end)
925 return s->packet_buffer;
929 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
930 int64_t dts, int64_t pts)
932 AVStream *st= s->streams[stream_index];
933 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
935 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
938 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
941 if (is_relative(pts))
942 pts += st->first_dts - RELATIVE_TS_BASE;
944 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
945 if(pktl->pkt.stream_index != stream_index)
947 if(is_relative(pktl->pkt.pts))
948 pktl->pkt.pts += st->first_dts - RELATIVE_TS_BASE;
950 if(is_relative(pktl->pkt.dts))
951 pktl->pkt.dts += st->first_dts - RELATIVE_TS_BASE;
953 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
954 st->start_time= pktl->pkt.pts;
956 if (st->start_time == AV_NOPTS_VALUE)
957 st->start_time = pts;
960 static void update_initial_durations(AVFormatContext *s, AVStream *st,
961 int stream_index, int duration)
963 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
964 int64_t cur_dts= RELATIVE_TS_BASE;
966 if(st->first_dts != AV_NOPTS_VALUE){
967 cur_dts= st->first_dts;
968 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
969 if(pktl->pkt.stream_index == stream_index){
970 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
975 if(pktl && pktl->pkt.dts != st->first_dts) {
976 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in que\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
980 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in ques\n", av_ts2str(st->first_dts));
983 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
984 st->first_dts = cur_dts;
985 }else if(st->cur_dts != RELATIVE_TS_BASE)
988 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
989 if(pktl->pkt.stream_index != stream_index)
991 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
992 && !pktl->pkt.duration){
993 pktl->pkt.dts= cur_dts;
994 if(!st->codec->has_b_frames)
995 pktl->pkt.pts= cur_dts;
996 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
997 pktl->pkt.duration = duration;
1000 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1003 st->cur_dts= cur_dts;
1006 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1007 AVCodecParserContext *pc, AVPacket *pkt)
1009 int num, den, presentation_delayed, delay, i;
1012 if (s->flags & AVFMT_FLAG_NOFILLIN)
1015 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1016 pkt->dts= AV_NOPTS_VALUE;
1018 if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1019 //FIXME Set low_delay = 0 when has_b_frames = 1
1020 st->codec->has_b_frames = 1;
1022 /* do we have a video B-frame ? */
1023 delay= st->codec->has_b_frames;
1024 presentation_delayed = 0;
1026 /* XXX: need has_b_frame, but cannot get it if the codec is
1029 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1030 presentation_delayed = 1;
1032 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts && st->pts_wrap_bits<63){
1033 pkt->dts -= 1LL<<st->pts_wrap_bits;
1036 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1037 // we take the conservative approach and discard both
1038 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1039 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1040 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1041 pkt->dts= AV_NOPTS_VALUE;
1044 if (pkt->duration == 0) {
1045 compute_frame_duration(&num, &den, st, pc, pkt);
1047 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1050 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1051 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1053 /* correct timestamps with byte offset if demuxers only have timestamps
1054 on packet boundaries */
1055 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1056 /* this will estimate bitrate based on this frame's duration and size */
1057 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1058 if(pkt->pts != AV_NOPTS_VALUE)
1060 if(pkt->dts != AV_NOPTS_VALUE)
1064 if (pc && pc->dts_sync_point >= 0) {
1065 // we have synchronization info from the parser
1066 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1068 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1069 if (pkt->dts != AV_NOPTS_VALUE) {
1070 // got DTS from the stream, update reference timestamp
1071 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1072 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1073 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1074 // compute DTS based on reference timestamp
1075 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1076 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1078 if (pc->dts_sync_point > 0)
1079 st->reference_dts = pkt->dts; // new reference
1083 /* This may be redundant, but it should not hurt. */
1084 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1085 presentation_delayed = 1;
1087 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1088 // presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1089 /* interpolate PTS and DTS if they are not present */
1090 //We skip H264 currently because delay and has_b_frames are not reliably set
1091 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1092 if (presentation_delayed) {
1093 /* DTS = decompression timestamp */
1094 /* PTS = presentation timestamp */
1095 if (pkt->dts == AV_NOPTS_VALUE)
1096 pkt->dts = st->last_IP_pts;
1097 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1098 if (pkt->dts == AV_NOPTS_VALUE)
1099 pkt->dts = st->cur_dts;
1101 /* this is tricky: the dts must be incremented by the duration
1102 of the frame we are displaying, i.e. the last I- or P-frame */
1103 if (st->last_IP_duration == 0)
1104 st->last_IP_duration = pkt->duration;
1105 if(pkt->dts != AV_NOPTS_VALUE)
1106 st->cur_dts = pkt->dts + st->last_IP_duration;
1107 st->last_IP_duration = pkt->duration;
1108 st->last_IP_pts= pkt->pts;
1109 /* cannot compute PTS if not present (we can compute it only
1110 by knowing the future */
1111 } else if (pkt->pts != AV_NOPTS_VALUE ||
1112 pkt->dts != AV_NOPTS_VALUE ||
1114 int duration = pkt->duration;
1116 if(pkt->pts != AV_NOPTS_VALUE && duration){
1117 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
1118 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1119 if( old_diff < new_diff && old_diff < (duration>>3)
1120 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO
1121 && (!strcmp(s->iformat->name, "mpeg") ||
1122 !strcmp(s->iformat->name, "mpegts"))){
1123 pkt->pts += duration;
1124 av_log(s, AV_LOG_WARNING, "Adjusting PTS forward\n");
1125 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%s size:%d\n",
1126 // pkt->stream_index, old_diff, new_diff, pkt->duration, av_ts2str(st->cur_dts), pkt->size);
1130 /* presentation is not delayed : PTS and DTS are the same */
1131 if (pkt->pts == AV_NOPTS_VALUE)
1132 pkt->pts = pkt->dts;
1133 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1135 if (pkt->pts == AV_NOPTS_VALUE)
1136 pkt->pts = st->cur_dts;
1137 pkt->dts = pkt->pts;
1138 if (pkt->pts != AV_NOPTS_VALUE)
1139 st->cur_dts = pkt->pts + duration;
1143 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1144 st->pts_buffer[0]= pkt->pts;
1145 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1146 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1147 if(pkt->dts == AV_NOPTS_VALUE)
1148 pkt->dts= st->pts_buffer[0];
1149 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1150 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1152 if(pkt->dts > st->cur_dts)
1153 st->cur_dts = pkt->dts;
1156 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1157 // presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1160 if(is_intra_only(st->codec))
1161 pkt->flags |= AV_PKT_FLAG_KEY;
1163 pkt->convergence_duration = pc->convergence_duration;
1166 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1169 AVPacketList *pktl = *pkt_buf;
1170 *pkt_buf = pktl->next;
1171 av_free_packet(&pktl->pkt);
1174 *pkt_buf_end = NULL;
1178 * Parse a packet, add all split parts to parse_queue
1180 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1182 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1184 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1185 AVStream *st = s->streams[stream_index];
1186 uint8_t *data = pkt ? pkt->data : NULL;
1187 int size = pkt ? pkt->size : 0;
1188 int ret = 0, got_output = 0;
1191 av_init_packet(&flush_pkt);
1194 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1195 // preserve 0-size sync packets
1196 compute_pkt_fields(s, st, st->parser, pkt);
1199 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1202 av_init_packet(&out_pkt);
1203 len = av_parser_parse2(st->parser, st->codec,
1204 &out_pkt.data, &out_pkt.size, data, size,
1205 pkt->pts, pkt->dts, pkt->pos);
1207 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1209 /* increment read pointer */
1213 got_output = !!out_pkt.size;
1218 /* set the duration */
1219 out_pkt.duration = 0;
1220 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1221 if (st->codec->sample_rate > 0) {
1222 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1223 (AVRational){ 1, st->codec->sample_rate },
1227 } else if (st->codec->time_base.num != 0 &&
1228 st->codec->time_base.den != 0) {
1229 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1230 st->codec->time_base,
1235 out_pkt.stream_index = st->index;
1236 out_pkt.pts = st->parser->pts;
1237 out_pkt.dts = st->parser->dts;
1238 out_pkt.pos = st->parser->pos;
1240 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1241 out_pkt.pos = st->parser->frame_offset;
1243 if (st->parser->key_frame == 1 ||
1244 (st->parser->key_frame == -1 &&
1245 st->parser->pict_type == AV_PICTURE_TYPE_I))
1246 out_pkt.flags |= AV_PKT_FLAG_KEY;
1248 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1249 out_pkt.flags |= AV_PKT_FLAG_KEY;
1251 compute_pkt_fields(s, st, st->parser, &out_pkt);
1253 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1254 out_pkt.destruct = pkt->destruct;
1255 pkt->destruct = NULL;
1257 if ((ret = av_dup_packet(&out_pkt)) < 0)
1260 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1261 av_free_packet(&out_pkt);
1262 ret = AVERROR(ENOMEM);
1268 /* end of the stream => close and free the parser */
1269 if (pkt == &flush_pkt) {
1270 av_parser_close(st->parser);
1275 av_free_packet(pkt);
1279 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1280 AVPacketList **pkt_buffer_end,
1284 av_assert0(*pkt_buffer);
1287 *pkt_buffer = pktl->next;
1289 *pkt_buffer_end = NULL;
1294 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1296 int ret = 0, i, got_packet = 0;
1298 av_init_packet(pkt);
1300 while (!got_packet && !s->parse_queue) {
1304 /* read next packet */
1305 ret = ff_read_packet(s, &cur_pkt);
1307 if (ret == AVERROR(EAGAIN))
1309 /* flush the parsers */
1310 for(i = 0; i < s->nb_streams; i++) {
1312 if (st->parser && st->need_parsing)
1313 parse_packet(s, NULL, st->index);
1315 /* all remaining packets are now in parse_queue =>
1316 * really terminate parsing */
1320 st = s->streams[cur_pkt.stream_index];
1322 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1323 cur_pkt.dts != AV_NOPTS_VALUE &&
1324 cur_pkt.pts < cur_pkt.dts) {
1325 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1326 cur_pkt.stream_index,
1327 av_ts2str(cur_pkt.pts),
1328 av_ts2str(cur_pkt.dts),
1331 if (s->debug & FF_FDEBUG_TS)
1332 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1333 cur_pkt.stream_index,
1334 av_ts2str(cur_pkt.pts),
1335 av_ts2str(cur_pkt.dts),
1340 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1341 st->parser = av_parser_init(st->codec->codec_id);
1343 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1344 "%s, packets or times may be invalid.\n",
1345 avcodec_get_name(st->codec->codec_id));
1346 /* no parser available: just output the raw packets */
1347 st->need_parsing = AVSTREAM_PARSE_NONE;
1348 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1349 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1350 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1351 st->parser->flags |= PARSER_FLAG_ONCE;
1352 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1353 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1357 if (!st->need_parsing || !st->parser) {
1358 /* no parsing needed: we just output the packet as is */
1360 compute_pkt_fields(s, st, NULL, pkt);
1361 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1362 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1363 ff_reduce_index(s, st->index);
1364 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1367 } else if (st->discard < AVDISCARD_ALL) {
1368 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1372 av_free_packet(&cur_pkt);
1374 if (pkt->flags & AV_PKT_FLAG_KEY)
1375 st->skip_to_keyframe = 0;
1376 if (st->skip_to_keyframe) {
1377 av_free_packet(&cur_pkt);
1382 if (!got_packet && s->parse_queue)
1383 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1385 if(s->debug & FF_FDEBUG_TS)
1386 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1388 av_ts2str(pkt->pts),
1389 av_ts2str(pkt->dts),
1397 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1399 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1405 ret = s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1406 &s->packet_buffer_end,
1408 read_frame_internal(s, pkt);
1413 AVPacketList *pktl = s->packet_buffer;
1416 AVPacket *next_pkt = &pktl->pkt;
1418 if (next_pkt->dts != AV_NOPTS_VALUE) {
1419 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1420 // last dts seen for this stream. if any of packets following
1421 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1422 int64_t last_dts = next_pkt->dts;
1423 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1424 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1425 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1426 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1427 next_pkt->pts = pktl->pkt.dts;
1429 if (last_dts != AV_NOPTS_VALUE) {
1430 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1431 last_dts = pktl->pkt.dts;
1436 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1437 // Fixing the last reference frame had none pts issue (For MXF etc).
1438 // We only do this when
1440 // 2. we are not able to resolve a pts value for current packet.
1441 // 3. the packets for this stream at the end of the files had valid dts.
1442 next_pkt->pts = last_dts + next_pkt->duration;
1444 pktl = s->packet_buffer;
1447 /* read packet from packet buffer, if there is data */
1448 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1449 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1450 ret = read_from_packet_buffer(&s->packet_buffer,
1451 &s->packet_buffer_end, pkt);
1456 ret = read_frame_internal(s, pkt);
1458 if (pktl && ret != AVERROR(EAGAIN)) {
1465 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1466 &s->packet_buffer_end)) < 0)
1467 return AVERROR(ENOMEM);
1472 st = s->streams[pkt->stream_index];
1473 if (st->skip_samples) {
1474 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1475 AV_WL32(p, st->skip_samples);
1476 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1477 st->skip_samples = 0;
1480 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1481 ff_reduce_index(s, st->index);
1482 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1485 if (is_relative(pkt->dts))
1486 pkt->dts -= RELATIVE_TS_BASE;
1487 if (is_relative(pkt->pts))
1488 pkt->pts -= RELATIVE_TS_BASE;
1493 /* XXX: suppress the packet queue */
1494 static void flush_packet_queue(AVFormatContext *s)
1496 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1497 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1498 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1500 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1503 /*******************************************************/
1506 int av_find_default_stream_index(AVFormatContext *s)
1508 int first_audio_index = -1;
1512 if (s->nb_streams <= 0)
1514 for(i = 0; i < s->nb_streams; i++) {
1516 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1517 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1520 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1521 first_audio_index = i;
1523 return first_audio_index >= 0 ? first_audio_index : 0;
1527 * Flush the frame reader.
1529 void ff_read_frame_flush(AVFormatContext *s)
1534 flush_packet_queue(s);
1536 /* for each stream, reset read state */
1537 for(i = 0; i < s->nb_streams; i++) {
1541 av_parser_close(st->parser);
1544 st->last_IP_pts = AV_NOPTS_VALUE;
1545 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1546 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1547 st->reference_dts = AV_NOPTS_VALUE;
1549 st->probe_packets = MAX_PROBE_PACKETS;
1551 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1552 st->pts_buffer[j]= AV_NOPTS_VALUE;
1556 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1560 for(i = 0; i < s->nb_streams; i++) {
1561 AVStream *st = s->streams[i];
1563 st->cur_dts = av_rescale(timestamp,
1564 st->time_base.den * (int64_t)ref_st->time_base.num,
1565 st->time_base.num * (int64_t)ref_st->time_base.den);
1569 void ff_reduce_index(AVFormatContext *s, int stream_index)
1571 AVStream *st= s->streams[stream_index];
1572 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1574 if((unsigned)st->nb_index_entries >= max_entries){
1576 for(i=0; 2*i<st->nb_index_entries; i++)
1577 st->index_entries[i]= st->index_entries[2*i];
1578 st->nb_index_entries= i;
1582 int ff_add_index_entry(AVIndexEntry **index_entries,
1583 int *nb_index_entries,
1584 unsigned int *index_entries_allocated_size,
1585 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1587 AVIndexEntry *entries, *ie;
1590 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1593 if(timestamp == AV_NOPTS_VALUE)
1594 return AVERROR(EINVAL);
1596 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1597 timestamp -= RELATIVE_TS_BASE;
1599 entries = av_fast_realloc(*index_entries,
1600 index_entries_allocated_size,
1601 (*nb_index_entries + 1) *
1602 sizeof(AVIndexEntry));
1606 *index_entries= entries;
1608 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1611 index= (*nb_index_entries)++;
1612 ie= &entries[index];
1613 assert(index==0 || ie[-1].timestamp < timestamp);
1615 ie= &entries[index];
1616 if(ie->timestamp != timestamp){
1617 if(ie->timestamp <= timestamp)
1619 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1620 (*nb_index_entries)++;
1621 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1622 distance= ie->min_distance;
1626 ie->timestamp = timestamp;
1627 ie->min_distance= distance;
1634 int av_add_index_entry(AVStream *st,
1635 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1637 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1638 &st->index_entries_allocated_size, pos,
1639 timestamp, size, distance, flags);
1642 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1643 int64_t wanted_timestamp, int flags)
1651 //optimize appending index entries at the end
1652 if(b && entries[b-1].timestamp < wanted_timestamp)
1657 timestamp = entries[m].timestamp;
1658 if(timestamp >= wanted_timestamp)
1660 if(timestamp <= wanted_timestamp)
1663 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1665 if(!(flags & AVSEEK_FLAG_ANY)){
1666 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1667 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1676 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1679 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1680 wanted_timestamp, flags);
1683 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1685 AVInputFormat *avif= s->iformat;
1686 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1687 int64_t ts_min, ts_max, ts;
1692 if (stream_index < 0)
1695 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1698 ts_min= AV_NOPTS_VALUE;
1699 pos_limit= -1; //gcc falsely says it may be uninitialized
1701 st= s->streams[stream_index];
1702 if(st->index_entries){
1705 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1706 index= FFMAX(index, 0);
1707 e= &st->index_entries[index];
1709 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1711 ts_min= e->timestamp;
1712 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1713 pos_min, av_ts2str(ts_min));
1718 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1719 assert(index < st->nb_index_entries);
1721 e= &st->index_entries[index];
1722 assert(e->timestamp >= target_ts);
1724 ts_max= e->timestamp;
1725 pos_limit= pos_max - e->min_distance;
1726 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1727 pos_max, pos_limit, av_ts2str(ts_max));
1731 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1736 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1739 ff_read_frame_flush(s);
1740 ff_update_cur_dts(s, st, ts);
1745 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1746 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1747 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1748 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1751 int64_t start_pos, filesize;
1754 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1756 if(ts_min == AV_NOPTS_VALUE){
1757 pos_min = s->data_offset;
1758 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1759 if (ts_min == AV_NOPTS_VALUE)
1763 if(ts_min >= target_ts){
1768 if(ts_max == AV_NOPTS_VALUE){
1770 filesize = avio_size(s->pb);
1771 pos_max = filesize - 1;
1774 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1776 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1777 if (ts_max == AV_NOPTS_VALUE)
1781 int64_t tmp_pos= pos_max + 1;
1782 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1783 if(tmp_ts == AV_NOPTS_VALUE)
1787 if(tmp_pos >= filesize)
1793 if(ts_max <= target_ts){
1798 if(ts_min > ts_max){
1800 }else if(ts_min == ts_max){
1805 while (pos_min < pos_limit) {
1806 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1807 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1808 assert(pos_limit <= pos_max);
1811 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1812 // interpolate position (better than dichotomy)
1813 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1814 + pos_min - approximate_keyframe_distance;
1815 }else if(no_change==1){
1816 // bisection, if interpolation failed to change min or max pos last time
1817 pos = (pos_min + pos_limit)>>1;
1819 /* linear search if bisection failed, can only happen if there
1820 are very few or no keyframes between min/max */
1825 else if(pos > pos_limit)
1829 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1834 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1835 pos_min, pos, pos_max,
1836 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1837 pos_limit, start_pos, no_change);
1838 if(ts == AV_NOPTS_VALUE){
1839 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1842 assert(ts != AV_NOPTS_VALUE);
1843 if (target_ts <= ts) {
1844 pos_limit = start_pos - 1;
1848 if (target_ts >= ts) {
1854 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1855 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1858 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1860 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1861 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1862 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1868 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1869 int64_t pos_min, pos_max;
1871 pos_min = s->data_offset;
1872 pos_max = avio_size(s->pb) - 1;
1874 if (pos < pos_min) pos= pos_min;
1875 else if(pos > pos_max) pos= pos_max;
1877 avio_seek(s->pb, pos, SEEK_SET);
1882 static int seek_frame_generic(AVFormatContext *s,
1883 int stream_index, int64_t timestamp, int flags)
1890 st = s->streams[stream_index];
1892 index = av_index_search_timestamp(st, timestamp, flags);
1894 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1897 if(index < 0 || index==st->nb_index_entries-1){
1901 if(st->nb_index_entries){
1902 assert(st->index_entries);
1903 ie= &st->index_entries[st->nb_index_entries-1];
1904 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1906 ff_update_cur_dts(s, st, ie->timestamp);
1908 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1914 read_status = av_read_frame(s, &pkt);
1915 } while (read_status == AVERROR(EAGAIN));
1916 if (read_status < 0)
1918 av_free_packet(&pkt);
1919 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1920 if(pkt.flags & AV_PKT_FLAG_KEY)
1922 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1923 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1928 index = av_index_search_timestamp(st, timestamp, flags);
1933 ff_read_frame_flush(s);
1934 AV_NOWARN_DEPRECATED(
1935 if (s->iformat->read_seek){
1936 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1940 ie = &st->index_entries[index];
1941 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1943 ff_update_cur_dts(s, st, ie->timestamp);
1948 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1949 int64_t timestamp, int flags)
1954 if (flags & AVSEEK_FLAG_BYTE) {
1955 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1957 ff_read_frame_flush(s);
1958 return seek_frame_byte(s, stream_index, timestamp, flags);
1961 if(stream_index < 0){
1962 stream_index= av_find_default_stream_index(s);
1963 if(stream_index < 0)
1966 st= s->streams[stream_index];
1967 /* timestamp for default must be expressed in AV_TIME_BASE units */
1968 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1971 /* first, we try the format specific seek */
1972 AV_NOWARN_DEPRECATED(
1973 if (s->iformat->read_seek) {
1974 ff_read_frame_flush(s);
1975 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1983 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1984 ff_read_frame_flush(s);
1985 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1986 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1987 ff_read_frame_flush(s);
1988 return seek_frame_generic(s, stream_index, timestamp, flags);
1994 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1996 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1999 queue_attached_pictures(s);
2004 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2006 if(min_ts > ts || max_ts < ts)
2009 if (s->iformat->read_seek2) {
2011 ff_read_frame_flush(s);
2012 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2015 queue_attached_pictures(s);
2019 if(s->iformat->read_timestamp){
2020 //try to seek via read_timestamp()
2023 //Fallback to old API if new is not implemented but old is
2024 //Note the old has somewat different sematics
2025 AV_NOWARN_DEPRECATED(
2026 if (s->iformat->read_seek || 1) {
2027 int dir = (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0);
2028 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2029 if (ret<0 && ts != min_ts && max_ts != ts) {
2030 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2032 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2038 // try some generic seek like seek_frame_generic() but with new ts semantics
2041 /*******************************************************/
2044 * Return TRUE if the stream has accurate duration in any stream.
2046 * @return TRUE if the stream has accurate duration for at least one component.
2048 static int has_duration(AVFormatContext *ic)
2053 for(i = 0;i < ic->nb_streams; i++) {
2054 st = ic->streams[i];
2055 if (st->duration != AV_NOPTS_VALUE)
2058 if (ic->duration != AV_NOPTS_VALUE)
2064 * Estimate the stream timings from the one of each components.
2066 * Also computes the global bitrate if possible.
2068 static void update_stream_timings(AVFormatContext *ic)
2070 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2071 int64_t duration, duration1, filesize;
2075 start_time = INT64_MAX;
2076 start_time_text = INT64_MAX;
2077 end_time = INT64_MIN;
2078 duration = INT64_MIN;
2079 for(i = 0;i < ic->nb_streams; i++) {
2080 st = ic->streams[i];
2081 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2082 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2083 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2084 if (start_time1 < start_time_text)
2085 start_time_text = start_time1;
2087 start_time = FFMIN(start_time, start_time1);
2088 if (st->duration != AV_NOPTS_VALUE) {
2089 end_time1 = start_time1
2090 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2091 end_time = FFMAX(end_time, end_time1);
2094 if (st->duration != AV_NOPTS_VALUE) {
2095 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2096 duration = FFMAX(duration, duration1);
2099 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2100 start_time = start_time_text;
2101 else if(start_time > start_time_text)
2102 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2104 if (start_time != INT64_MAX) {
2105 ic->start_time = start_time;
2106 if (end_time != INT64_MIN)
2107 duration = FFMAX(duration, end_time - start_time);
2109 if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
2110 ic->duration = duration;
2112 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2113 /* compute the bitrate */
2114 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
2115 (double)ic->duration;
2119 static void fill_all_stream_timings(AVFormatContext *ic)
2124 update_stream_timings(ic);
2125 for(i = 0;i < ic->nb_streams; i++) {
2126 st = ic->streams[i];
2127 if (st->start_time == AV_NOPTS_VALUE) {
2128 if(ic->start_time != AV_NOPTS_VALUE)
2129 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2130 if(ic->duration != AV_NOPTS_VALUE)
2131 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2136 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2138 int64_t filesize, duration;
2142 /* if bit_rate is already set, we believe it */
2143 if (ic->bit_rate <= 0) {
2145 for(i=0;i<ic->nb_streams;i++) {
2146 st = ic->streams[i];
2147 if (st->codec->bit_rate > 0)
2148 bit_rate += st->codec->bit_rate;
2150 ic->bit_rate = bit_rate;
2153 /* if duration is already set, we believe it */
2154 if (ic->duration == AV_NOPTS_VALUE &&
2155 ic->bit_rate != 0) {
2156 filesize = ic->pb ? avio_size(ic->pb) : 0;
2158 for(i = 0; i < ic->nb_streams; i++) {
2159 st = ic->streams[i];
2160 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2161 if (st->duration == AV_NOPTS_VALUE)
2162 st->duration = duration;
2168 #define DURATION_MAX_READ_SIZE 250000
2169 #define DURATION_MAX_RETRY 3
2171 /* only usable for MPEG-PS streams */
2172 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2174 AVPacket pkt1, *pkt = &pkt1;
2176 int read_size, i, ret;
2178 int64_t filesize, offset, duration;
2181 /* flush packet queue */
2182 flush_packet_queue(ic);
2184 for (i=0; i<ic->nb_streams; i++) {
2185 st = ic->streams[i];
2186 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2187 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2190 av_parser_close(st->parser);
2195 /* estimate the end time (duration) */
2196 /* XXX: may need to support wrapping */
2197 filesize = ic->pb ? avio_size(ic->pb) : 0;
2198 end_time = AV_NOPTS_VALUE;
2200 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2204 avio_seek(ic->pb, offset, SEEK_SET);
2207 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2211 ret = ff_read_packet(ic, pkt);
2212 } while(ret == AVERROR(EAGAIN));
2215 read_size += pkt->size;
2216 st = ic->streams[pkt->stream_index];
2217 if (pkt->pts != AV_NOPTS_VALUE &&
2218 (st->start_time != AV_NOPTS_VALUE ||
2219 st->first_dts != AV_NOPTS_VALUE)) {
2220 duration = end_time = pkt->pts;
2221 if (st->start_time != AV_NOPTS_VALUE)
2222 duration -= st->start_time;
2224 duration -= st->first_dts;
2226 duration += 1LL<<st->pts_wrap_bits;
2228 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2229 st->duration = duration;
2232 av_free_packet(pkt);
2234 }while( end_time==AV_NOPTS_VALUE
2235 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2236 && ++retry <= DURATION_MAX_RETRY);
2238 fill_all_stream_timings(ic);
2240 avio_seek(ic->pb, old_offset, SEEK_SET);
2241 for (i=0; i<ic->nb_streams; i++) {
2243 st->cur_dts= st->first_dts;
2244 st->last_IP_pts = AV_NOPTS_VALUE;
2245 st->reference_dts = AV_NOPTS_VALUE;
2249 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2253 /* get the file size, if possible */
2254 if (ic->iformat->flags & AVFMT_NOFILE) {
2257 file_size = avio_size(ic->pb);
2258 file_size = FFMAX(0, file_size);
2261 if ((!strcmp(ic->iformat->name, "mpeg") ||
2262 !strcmp(ic->iformat->name, "mpegts")) &&
2263 file_size && ic->pb->seekable) {
2264 /* get accurate estimate from the PTSes */
2265 estimate_timings_from_pts(ic, old_offset);
2266 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2267 } else if (has_duration(ic)) {
2268 /* at least one component has timings - we use them for all
2270 fill_all_stream_timings(ic);
2271 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2273 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2274 /* less precise: use bitrate info */
2275 estimate_timings_from_bit_rate(ic);
2276 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2278 update_stream_timings(ic);
2282 AVStream av_unused *st;
2283 for(i = 0;i < ic->nb_streams; i++) {
2284 st = ic->streams[i];
2285 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2286 (double) st->start_time / AV_TIME_BASE,
2287 (double) st->duration / AV_TIME_BASE);
2289 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2290 (double) ic->start_time / AV_TIME_BASE,
2291 (double) ic->duration / AV_TIME_BASE,
2292 ic->bit_rate / 1000);
2296 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2298 AVCodecContext *avctx = st->codec;
2300 #define FAIL(errmsg) do { \
2302 *errmsg_ptr = errmsg; \
2306 switch (avctx->codec_type) {
2307 case AVMEDIA_TYPE_AUDIO:
2308 if (!avctx->frame_size && determinable_frame_size(avctx))
2309 FAIL("unspecified sample size");
2310 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2311 FAIL("unspecified sample format");
2312 if (!avctx->sample_rate)
2313 FAIL("unspecified sample rate");
2314 if (!avctx->channels)
2315 FAIL("unspecified number of channels");
2317 case AVMEDIA_TYPE_VIDEO:
2319 FAIL("unspecified size");
2320 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2321 FAIL("unspecified pixel format");
2323 case AVMEDIA_TYPE_DATA:
2324 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2327 if (avctx->codec_id == AV_CODEC_ID_NONE)
2328 FAIL("unknown codec");
2332 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2333 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2336 int got_picture = 1, ret = 0;
2338 AVPacket pkt = *avpkt;
2340 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2341 AVDictionary *thread_opt = NULL;
2343 codec = st->codec->codec ? st->codec->codec :
2344 avcodec_find_decoder(st->codec->codec_id);
2347 st->info->found_decoder = -1;
2351 /* force thread count to 1 since the h264 decoder will not extract SPS
2352 * and PPS to extradata during multi-threaded decoding */
2353 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2354 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2356 av_dict_free(&thread_opt);
2358 st->info->found_decoder = -1;
2361 st->info->found_decoder = 1;
2362 } else if (!st->info->found_decoder)
2363 st->info->found_decoder = 1;
2365 if (st->info->found_decoder < 0)
2368 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2370 (!has_codec_parameters(st, NULL) ||
2371 !has_decode_delay_been_guessed(st) ||
2372 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2374 avcodec_get_frame_defaults(&picture);
2375 switch(st->codec->codec_type) {
2376 case AVMEDIA_TYPE_VIDEO:
2377 ret = avcodec_decode_video2(st->codec, &picture,
2378 &got_picture, &pkt);
2380 case AVMEDIA_TYPE_AUDIO:
2381 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2388 st->nb_decoded_frames++;
2394 if(!pkt.data && !got_picture)
2399 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2401 while (tags->id != AV_CODEC_ID_NONE) {
2409 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2412 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2413 if(tag == tags[i].tag)
2416 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2417 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2420 return AV_CODEC_ID_NONE;
2423 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2426 for(i=0; tags && tags[i]; i++){
2427 int tag= ff_codec_get_tag(tags[i], id);
2433 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2436 for(i=0; tags && tags[i]; i++){
2437 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2438 if(id!=AV_CODEC_ID_NONE) return id;
2440 return AV_CODEC_ID_NONE;
2443 static void compute_chapters_end(AVFormatContext *s)
2446 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2448 for (i = 0; i < s->nb_chapters; i++)
2449 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2450 AVChapter *ch = s->chapters[i];
2451 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2454 for (j = 0; j < s->nb_chapters; j++) {
2455 AVChapter *ch1 = s->chapters[j];
2456 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2457 if (j != i && next_start > ch->start && next_start < end)
2460 ch->end = (end == INT64_MAX) ? ch->start : end;
2464 static int get_std_framerate(int i){
2465 if(i<60*12) return (i+1)*1001;
2466 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2470 * Is the time base unreliable.
2471 * This is a heuristic to balance between quick acceptance of the values in
2472 * the headers vs. some extra checks.
2473 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2474 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2475 * And there are "variable" fps files this needs to detect as well.
2477 static int tb_unreliable(AVCodecContext *c){
2478 if( c->time_base.den >= 101L*c->time_base.num
2479 || c->time_base.den < 5L*c->time_base.num
2480 /* || c->codec_tag == AV_RL32("DIVX")
2481 || c->codec_tag == AV_RL32("XVID")*/
2482 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2483 || c->codec_id == AV_CODEC_ID_H264
2489 #if FF_API_FORMAT_PARAMETERS
2490 int av_find_stream_info(AVFormatContext *ic)
2492 return avformat_find_stream_info(ic, NULL);
2496 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2498 int i, count, ret, read_size, j;
2500 AVPacket pkt1, *pkt;
2501 int64_t old_offset = avio_tell(ic->pb);
2502 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2503 int flush_codecs = ic->probesize > 0;
2506 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2508 for(i=0;i<ic->nb_streams;i++) {
2510 AVDictionary *thread_opt = NULL;
2511 st = ic->streams[i];
2513 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2514 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2515 /* if(!st->time_base.num)
2517 if(!st->codec->time_base.num)
2518 st->codec->time_base= st->time_base;
2520 //only for the split stuff
2521 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2522 st->parser = av_parser_init(st->codec->codec_id);
2524 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2525 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2526 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2527 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2529 } else if (st->need_parsing) {
2530 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2531 "%s, packets or times may be invalid.\n",
2532 avcodec_get_name(st->codec->codec_id));
2535 codec = st->codec->codec ? st->codec->codec :
2536 avcodec_find_decoder(st->codec->codec_id);
2538 /* force thread count to 1 since the h264 decoder will not extract SPS
2539 * and PPS to extradata during multi-threaded decoding */
2540 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2542 /* Ensure that subtitle_header is properly set. */
2543 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2544 && codec && !st->codec->codec)
2545 avcodec_open2(st->codec, codec, options ? &options[i]
2548 //try to just open decoders, in case this is enough to get parameters
2549 if (!has_codec_parameters(st, NULL)) {
2550 if (codec && !st->codec->codec)
2551 avcodec_open2(st->codec, codec, options ? &options[i]
2555 av_dict_free(&thread_opt);
2558 for (i=0; i<ic->nb_streams; i++) {
2559 #if FF_API_R_FRAME_RATE
2560 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2562 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2563 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2569 if (ff_check_interrupt(&ic->interrupt_callback)){
2571 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2575 /* check if one codec still needs to be handled */
2576 for(i=0;i<ic->nb_streams;i++) {
2577 int fps_analyze_framecount = 20;
2579 st = ic->streams[i];
2580 if (!has_codec_parameters(st, NULL))
2582 /* if the timebase is coarse (like the usual millisecond precision
2583 of mkv), we need to analyze more frames to reliably arrive at
2585 if (av_q2d(st->time_base) > 0.0005)
2586 fps_analyze_framecount *= 2;
2587 if (ic->fps_probe_size >= 0)
2588 fps_analyze_framecount = ic->fps_probe_size;
2589 /* variable fps and no guess at the real fps */
2590 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2591 && st->info->duration_count < fps_analyze_framecount
2592 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2594 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2596 if (st->first_dts == AV_NOPTS_VALUE &&
2597 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2598 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2601 if (i == ic->nb_streams) {
2602 /* NOTE: if the format has no header, then we need to read
2603 some packets to get most of the streams, so we cannot
2605 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2606 /* if we found the info for all the codecs, we can stop */
2608 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2613 /* we did not get all the codec info, but we read too much data */
2614 if (read_size >= ic->probesize) {
2616 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2617 for (i = 0; i < ic->nb_streams; i++)
2618 if (!ic->streams[i]->r_frame_rate.num &&
2619 ic->streams[i]->info->duration_count <= 1)
2620 av_log(ic, AV_LOG_WARNING,
2621 "Stream #%d: not enough frames to estimate rate; "
2622 "consider increasing probesize\n", i);
2626 /* NOTE: a new stream can be added there if no header in file
2627 (AVFMTCTX_NOHEADER) */
2628 ret = read_frame_internal(ic, &pkt1);
2629 if (ret == AVERROR(EAGAIN))
2637 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2640 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2641 &ic->packet_buffer_end);
2642 if ((ret = av_dup_packet(pkt)) < 0)
2643 goto find_stream_info_err;
2646 read_size += pkt->size;
2648 st = ic->streams[pkt->stream_index];
2649 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2650 /* check for non-increasing dts */
2651 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2652 st->info->fps_last_dts >= pkt->dts) {
2653 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2654 "packet %d with DTS %"PRId64", packet %d with DTS "
2655 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2656 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2657 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2660 /* update stored dts values */
2661 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2662 st->info->fps_first_dts = pkt->dts;
2663 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2665 st->info->fps_last_dts = pkt->dts;
2666 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2668 if (st->codec_info_nb_frames>1) {
2670 if (st->time_base.den > 0)
2671 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2672 if (st->avg_frame_rate.num > 0)
2673 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2675 if (t >= ic->max_analyze_duration) {
2676 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64"\n", ic->max_analyze_duration, t);
2679 st->info->codec_info_duration += pkt->duration;
2681 #if FF_API_R_FRAME_RATE
2683 int64_t last = st->info->last_dts;
2685 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2686 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2687 int64_t duration= pkt->dts - last;
2689 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2690 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2691 for (i=0; i<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); i++) {
2692 int framerate= get_std_framerate(i);
2693 double sdts= dts*framerate/(1001*12);
2695 int ticks= lrintf(sdts+j*0.5);
2696 double error= sdts - ticks + j*0.5;
2697 st->info->duration_error[j][0][i] += error;
2698 st->info->duration_error[j][1][i] += error*error;
2701 st->info->duration_count++;
2702 // ignore the first 4 values, they might have some random jitter
2703 if (st->info->duration_count > 3)
2704 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2706 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2707 st->info->last_dts = pkt->dts;
2710 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2711 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2712 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2713 st->codec->extradata_size= i;
2714 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2715 if (!st->codec->extradata)
2716 return AVERROR(ENOMEM);
2717 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2718 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2722 /* if still no information, we try to open the codec and to
2723 decompress the frame. We try to avoid that in most cases as
2724 it takes longer and uses more memory. For MPEG-4, we need to
2725 decompress for QuickTime.
2727 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2728 least one frame of codec data, this makes sure the codec initializes
2729 the channel configuration and does not only trust the values from the container.
2731 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2733 st->codec_info_nb_frames++;
2738 AVPacket empty_pkt = { 0 };
2740 av_init_packet(&empty_pkt);
2742 ret = -1; /* we could not have all the codec parameters before EOF */
2743 for(i=0;i<ic->nb_streams;i++) {
2746 st = ic->streams[i];
2748 /* flush the decoders */
2749 if (st->info->found_decoder == 1) {
2751 err = try_decode_frame(st, &empty_pkt,
2752 (options && i < orig_nb_streams) ?
2753 &options[i] : NULL);
2754 } while (err > 0 && !has_codec_parameters(st, NULL));
2757 av_log(ic, AV_LOG_INFO,
2758 "decoding for stream %d failed\n", st->index);
2762 if (!has_codec_parameters(st, &errmsg)) {
2764 avcodec_string(buf, sizeof(buf), st->codec, 0);
2765 av_log(ic, AV_LOG_WARNING,
2766 "Could not find codec parameters for stream %d (%s): %s\n"
2767 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
2775 // close codecs which were opened in try_decode_frame()
2776 for(i=0;i<ic->nb_streams;i++) {
2777 st = ic->streams[i];
2778 avcodec_close(st->codec);
2780 for(i=0;i<ic->nb_streams;i++) {
2781 st = ic->streams[i];
2782 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2783 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2784 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2785 if(ff_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2786 st->codec->codec_tag= tag;
2789 /* estimate average framerate if not set by demuxer */
2790 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration) {
2792 double best_error = 0.01;
2794 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2795 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2796 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2798 /* round guessed framerate to a "standard" framerate if it's
2799 * within 1% of the original estimate*/
2800 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
2801 AVRational std_fps = { get_std_framerate(j), 12*1001 };
2802 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
2804 if (error < best_error) {
2806 best_fps = std_fps.num;
2810 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2811 best_fps, 12*1001, INT_MAX);
2814 // the check for tb_unreliable() is not completely correct, since this is not about handling
2815 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2816 // ipmovie.c produces.
2817 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2818 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2819 if (st->info->duration_count && !st->r_frame_rate.num
2820 && tb_unreliable(st->codec)) {
2822 double best_error= 0.01;
2824 for (j=0; j<FF_ARRAY_ELEMS(st->info->duration_error[0][0]); j++) {
2827 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2829 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2832 int n= st->info->duration_count;
2833 double a= st->info->duration_error[k][0][j] / n;
2834 double error= st->info->duration_error[k][1][j]/n - a*a;
2836 if(error < best_error && best_error> 0.000000001){
2838 num = get_std_framerate(j);
2841 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2844 // do not increase frame rate by more than 1 % in order to match a standard rate.
2845 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2846 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2849 if (!st->r_frame_rate.num){
2850 if( st->codec->time_base.den * (int64_t)st->time_base.num
2851 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2852 st->r_frame_rate.num = st->codec->time_base.den;
2853 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2855 st->r_frame_rate.num = st->time_base.den;
2856 st->r_frame_rate.den = st->time_base.num;
2859 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2860 if(!st->codec->bits_per_coded_sample)
2861 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2862 // set stream disposition based on audio service type
2863 switch (st->codec->audio_service_type) {
2864 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2865 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2866 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2867 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2868 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2869 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2870 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2871 st->disposition = AV_DISPOSITION_COMMENT; break;
2872 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2873 st->disposition = AV_DISPOSITION_KARAOKE; break;
2879 estimate_timings(ic, old_offset);
2881 compute_chapters_end(ic);
2883 find_stream_info_err:
2884 for (i=0; i < ic->nb_streams; i++) {
2885 if (ic->streams[i]->codec)
2886 ic->streams[i]->codec->thread_count = 0;
2887 av_freep(&ic->streams[i]->info);
2890 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2894 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
2898 for (i = 0; i < ic->nb_programs; i++) {
2899 if (ic->programs[i] == last) {
2903 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2904 if (ic->programs[i]->stream_index[j] == s)
2905 return ic->programs[i];
2911 int av_find_best_stream(AVFormatContext *ic,
2912 enum AVMediaType type,
2913 int wanted_stream_nb,
2915 AVCodec **decoder_ret,
2918 int i, nb_streams = ic->nb_streams;
2919 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2920 unsigned *program = NULL;
2921 AVCodec *decoder = NULL, *best_decoder = NULL;
2923 if (related_stream >= 0 && wanted_stream_nb < 0) {
2924 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
2926 program = p->stream_index;
2927 nb_streams = p->nb_stream_indexes;
2930 for (i = 0; i < nb_streams; i++) {
2931 int real_stream_index = program ? program[i] : i;
2932 AVStream *st = ic->streams[real_stream_index];
2933 AVCodecContext *avctx = st->codec;
2934 if (avctx->codec_type != type)
2936 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2938 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2941 decoder = avcodec_find_decoder(st->codec->codec_id);
2944 ret = AVERROR_DECODER_NOT_FOUND;
2948 if (best_count >= st->codec_info_nb_frames)
2950 best_count = st->codec_info_nb_frames;
2951 ret = real_stream_index;
2952 best_decoder = decoder;
2953 if (program && i == nb_streams - 1 && ret < 0) {
2955 nb_streams = ic->nb_streams;
2956 i = 0; /* no related stream found, try again with everything */
2960 *decoder_ret = best_decoder;
2964 /*******************************************************/
2966 int av_read_play(AVFormatContext *s)
2968 if (s->iformat->read_play)
2969 return s->iformat->read_play(s);
2971 return avio_pause(s->pb, 0);
2972 return AVERROR(ENOSYS);
2975 int av_read_pause(AVFormatContext *s)
2977 if (s->iformat->read_pause)
2978 return s->iformat->read_pause(s);
2980 return avio_pause(s->pb, 1);
2981 return AVERROR(ENOSYS);
2984 void avformat_free_context(AVFormatContext *s)
2990 if (s->iformat && s->iformat->priv_class && s->priv_data)
2991 av_opt_free(s->priv_data);
2993 for(i=0;i<s->nb_streams;i++) {
2994 /* free all data in a stream component */
2997 av_parser_close(st->parser);
2999 if (st->attached_pic.data)
3000 av_free_packet(&st->attached_pic);
3001 av_dict_free(&st->metadata);
3002 av_freep(&st->index_entries);
3003 av_freep(&st->codec->extradata);
3004 av_freep(&st->codec->subtitle_header);
3005 av_freep(&st->codec);
3006 av_freep(&st->priv_data);
3007 av_freep(&st->info);
3010 for(i=s->nb_programs-1; i>=0; i--) {
3011 av_dict_free(&s->programs[i]->metadata);
3012 av_freep(&s->programs[i]->stream_index);
3013 av_freep(&s->programs[i]);
3015 av_freep(&s->programs);
3016 av_freep(&s->priv_data);
3017 while(s->nb_chapters--) {
3018 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3019 av_freep(&s->chapters[s->nb_chapters]);
3021 av_freep(&s->chapters);
3022 av_dict_free(&s->metadata);
3023 av_freep(&s->streams);
3027 #if FF_API_CLOSE_INPUT_FILE
3028 void av_close_input_file(AVFormatContext *s)
3030 avformat_close_input(&s);
3034 void avformat_close_input(AVFormatContext **ps)
3036 AVFormatContext *s = *ps;
3037 AVIOContext *pb = (s->iformat && (s->iformat->flags & AVFMT_NOFILE)) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
3039 flush_packet_queue(s);
3040 if (s->iformat && (s->iformat->read_close))
3041 s->iformat->read_close(s);
3042 avformat_free_context(s);
3048 #if FF_API_NEW_STREAM
3049 AVStream *av_new_stream(AVFormatContext *s, int id)
3051 AVStream *st = avformat_new_stream(s, NULL);
3058 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
3064 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3066 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
3069 s->streams = streams;
3071 st = av_mallocz(sizeof(AVStream));
3074 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3078 st->info->last_dts = AV_NOPTS_VALUE;
3080 st->codec = avcodec_alloc_context3(c);
3082 /* no default bitrate if decoding */
3083 st->codec->bit_rate = 0;
3085 st->index = s->nb_streams;
3086 st->start_time = AV_NOPTS_VALUE;
3087 st->duration = AV_NOPTS_VALUE;
3088 /* we set the current DTS to 0 so that formats without any timestamps
3089 but durations get some timestamps, formats with some unknown
3090 timestamps have their first few packets buffered and the
3091 timestamps corrected before they are returned to the user */
3092 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3093 st->first_dts = AV_NOPTS_VALUE;
3094 st->probe_packets = MAX_PROBE_PACKETS;
3096 /* default pts setting is MPEG-like */
3097 avpriv_set_pts_info(st, 33, 1, 90000);
3098 st->last_IP_pts = AV_NOPTS_VALUE;
3099 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3100 st->pts_buffer[i]= AV_NOPTS_VALUE;
3101 st->reference_dts = AV_NOPTS_VALUE;
3103 st->sample_aspect_ratio = (AVRational){0,1};
3105 s->streams[s->nb_streams++] = st;
3109 AVProgram *av_new_program(AVFormatContext *ac, int id)
3111 AVProgram *program=NULL;
3114 av_dlog(ac, "new_program: id=0x%04x\n", id);
3116 for(i=0; i<ac->nb_programs; i++)
3117 if(ac->programs[i]->id == id)
3118 program = ac->programs[i];
3121 program = av_mallocz(sizeof(AVProgram));
3124 dynarray_add(&ac->programs, &ac->nb_programs, program);
3125 program->discard = AVDISCARD_NONE;
3132 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3134 AVChapter *chapter = NULL;
3137 for(i=0; i<s->nb_chapters; i++)
3138 if(s->chapters[i]->id == id)
3139 chapter = s->chapters[i];
3142 chapter= av_mallocz(sizeof(AVChapter));
3145 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3147 av_dict_set(&chapter->metadata, "title", title, 0);
3149 chapter->time_base= time_base;
3150 chapter->start = start;
3156 /************************************************************/
3157 /* output media file */
3159 int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
3160 const char *format, const char *filename)
3162 AVFormatContext *s = avformat_alloc_context();
3171 oformat = av_guess_format(format, NULL, NULL);
3173 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
3174 ret = AVERROR(EINVAL);
3178 oformat = av_guess_format(NULL, filename, NULL);
3180 ret = AVERROR(EINVAL);
3181 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
3188 s->oformat = oformat;
3189 if (s->oformat->priv_data_size > 0) {
3190 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3193 if (s->oformat->priv_class) {
3194 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3195 av_opt_set_defaults(s->priv_data);
3198 s->priv_data = NULL;
3201 av_strlcpy(s->filename, filename, sizeof(s->filename));
3205 av_log(s, AV_LOG_ERROR, "Out of memory\n");
3206 ret = AVERROR(ENOMEM);
3208 avformat_free_context(s);
3212 #if FF_API_ALLOC_OUTPUT_CONTEXT
3213 AVFormatContext *avformat_alloc_output_context(const char *format,
3214 AVOutputFormat *oformat, const char *filename)
3216 AVFormatContext *avctx;
3217 int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
3218 return ret < 0 ? NULL : avctx;
3222 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
3224 const AVCodecTag *avctag;
3226 enum AVCodecID id = AV_CODEC_ID_NONE;
3227 unsigned int tag = 0;
3230 * Check that tag + id is in the table
3231 * If neither is in the table -> OK
3232 * If tag is in the table with another id -> FAIL
3233 * If id is in the table with another tag -> FAIL unless strict < normal
3235 for (n = 0; s->oformat->codec_tag[n]; n++) {
3236 avctag = s->oformat->codec_tag[n];
3237 while (avctag->id != AV_CODEC_ID_NONE) {
3238 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
3240 if (id == st->codec->codec_id)
3243 if (avctag->id == st->codec->codec_id)
3248 if (id != AV_CODEC_ID_NONE)
3250 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
3255 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
3259 AVDictionary *tmp = NULL;
3262 av_dict_copy(&tmp, *options, 0);
3263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
3265 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
3266 (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3269 // some sanity checks
3270 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
3271 av_log(s, AV_LOG_ERROR, "no streams\n");
3272 ret = AVERROR(EINVAL);
3276 for(i=0;i<s->nb_streams;i++) {
3279 switch (st->codec->codec_type) {
3280 case AVMEDIA_TYPE_AUDIO:
3281 if(st->codec->sample_rate<=0){
3282 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
3283 ret = AVERROR(EINVAL);
3286 if(!st->codec->block_align)
3287 st->codec->block_align = st->codec->channels *
3288 av_get_bits_per_sample(st->codec->codec_id) >> 3;
3290 case AVMEDIA_TYPE_VIDEO:
3291 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
3292 av_log(s, AV_LOG_ERROR, "time base not set\n");
3293 ret = AVERROR(EINVAL);
3296 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
3297 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
3298 ret = AVERROR(EINVAL);
3301 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)
3302 && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(st->codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
3304 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
3305 "(%d/%d) and encoder layer (%d/%d)\n",
3306 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3307 st->codec->sample_aspect_ratio.num,
3308 st->codec->sample_aspect_ratio.den);
3309 ret = AVERROR(EINVAL);
3315 if(s->oformat->codec_tag){
3316 if( st->codec->codec_tag
3317 && st->codec->codec_id == AV_CODEC_ID_RAWVIDEO
3318 && (av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 || av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) ==MKTAG('r', 'a', 'w', ' '))
3319 && !validate_codec_tag(s, st)){
3320 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi/mov, we override it here
3321 st->codec->codec_tag= 0;
3323 if(st->codec->codec_tag){
3324 if (!validate_codec_tag(s, st)) {
3325 char tagbuf[32], cortag[32];
3326 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
3327 av_get_codec_tag_string(cortag, sizeof(cortag), av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id));
3328 av_log(s, AV_LOG_ERROR,
3329 "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
3330 tagbuf, st->codec->codec_tag, st->codec->codec_id, cortag);
3331 ret = AVERROR_INVALIDDATA;
3335 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
3338 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
3339 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
3340 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
3343 if (!s->priv_data && s->oformat->priv_data_size > 0) {
3344 s->priv_data = av_mallocz(s->oformat->priv_data_size);
3345 if (!s->priv_data) {
3346 ret = AVERROR(ENOMEM);
3349 if (s->oformat->priv_class) {
3350 *(const AVClass**)s->priv_data= s->oformat->priv_class;
3351 av_opt_set_defaults(s->priv_data);
3352 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
3357 /* set muxer identification string */
3358 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
3359 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
3362 if(s->oformat->write_header){
3363 ret = s->oformat->write_header(s);
3368 /* init PTS generation */
3369 for(i=0;i<s->nb_streams;i++) {
3370 int64_t den = AV_NOPTS_VALUE;
3373 switch (st->codec->codec_type) {
3374 case AVMEDIA_TYPE_AUDIO:
3375 den = (int64_t)st->time_base.num * st->codec->sample_rate;
3377 case AVMEDIA_TYPE_VIDEO:
3378 den = (int64_t)st->time_base.num * st->codec->time_base.den;
3383 if (den != AV_NOPTS_VALUE) {
3385 ret = AVERROR_INVALIDDATA;
3388 frac_init(&st->pts, 0, 0, den);
3393 av_dict_free(options);
3402 //FIXME merge with compute_pkt_fields
3403 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3404 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3405 int num, den, frame_size, i;
3407 av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
3408 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
3410 /* duration field */
3411 if (pkt->duration == 0) {
3412 compute_frame_duration(&num, &den, st, NULL, pkt);
3414 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3418 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3421 //XXX/FIXME this is a temporary hack until all encoders output pts
3422 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3425 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
3429 // pkt->pts= st->cur_dts;
3430 pkt->pts= st->pts.val;
3433 //calculate dts from pts
3434 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3435 st->pts_buffer[0]= pkt->pts;
3436 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3437 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3438 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3439 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3441 pkt->dts= st->pts_buffer[0];
3444 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
3445 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
3446 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
3447 av_log(s, AV_LOG_ERROR,
3448 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
3449 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
3450 return AVERROR(EINVAL);
3452 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3453 av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
3454 av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
3455 return AVERROR(EINVAL);
3458 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts));
3459 st->cur_dts= pkt->dts;
3460 st->pts.val= pkt->dts;
3463 switch (st->codec->codec_type) {
3464 case AVMEDIA_TYPE_AUDIO:
3465 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
3467 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3468 likely equal to the encoder delay, but it would be better if we
3469 had the real timestamps from the encoder */
3470 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3471 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3474 case AVMEDIA_TYPE_VIDEO:
3475 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3483 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3488 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3489 return s->oformat->write_packet(s, pkt);
3493 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3495 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3498 ret= s->oformat->write_packet(s, pkt);
3501 s->streams[pkt->stream_index]->nb_frames++;
3505 #define CHUNK_START 0x1000
3507 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3508 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3510 AVPacketList **next_point, *this_pktl;
3511 AVStream *st= s->streams[pkt->stream_index];
3512 int chunked= s->max_chunk_size || s->max_chunk_duration;
3514 this_pktl = av_mallocz(sizeof(AVPacketList));
3516 return AVERROR(ENOMEM);
3517 this_pktl->pkt= *pkt;
3518 pkt->destruct= NULL; // do not free original but only the copy
3519 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3521 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3522 next_point = &(st->last_in_packet_buffer->next);
3524 next_point = &s->packet_buffer;
3529 uint64_t max= av_rescale_q(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base);
3530 if( st->interleaver_chunk_size + pkt->size <= s->max_chunk_size-1U
3531 && st->interleaver_chunk_duration + pkt->duration <= max-1U){
3532 st->interleaver_chunk_size += pkt->size;
3533 st->interleaver_chunk_duration += pkt->duration;
3536 st->interleaver_chunk_size =
3537 st->interleaver_chunk_duration = 0;
3538 this_pktl->pkt.flags |= CHUNK_START;
3542 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3544 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
3545 || !compare(s, &(*next_point)->pkt, pkt))){
3546 next_point= &(*next_point)->next;
3551 next_point = &(s->packet_buffer_end->next);
3554 assert(!*next_point);
3556 s->packet_buffer_end= this_pktl;
3559 this_pktl->next= *next_point;
3561 s->streams[pkt->stream_index]->last_in_packet_buffer=
3562 *next_point= this_pktl;
3566 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3568 AVStream *st = s->streams[ pkt ->stream_index];
3569 AVStream *st2= s->streams[ next->stream_index];
3570 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3572 if(s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))){
3573 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3574 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
3576 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
3577 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
3580 comp= (ts>ts2) - (ts<ts2);
3584 return pkt->stream_index < next->stream_index;
3588 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3589 AVPacket *pkt, int flush)
3592 int stream_count=0, noninterleaved_count=0;
3593 int64_t delta_dts_max = 0;
3597 ret = ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3602 for(i=0; i < s->nb_streams; i++) {
3603 if (s->streams[i]->last_in_packet_buffer) {
3605 } else if(s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3606 ++noninterleaved_count;
3610 if (s->nb_streams == stream_count) {
3613 for(i=0; i < s->nb_streams; i++) {
3614 if (s->streams[i]->last_in_packet_buffer) {
3616 av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
3617 s->streams[i]->time_base,
3619 av_rescale_q(s->packet_buffer->pkt.dts,
3620 s->streams[s->packet_buffer->pkt.stream_index]->time_base,
3622 delta_dts_max= FFMAX(delta_dts_max, delta_dts);
3625 if(s->nb_streams == stream_count+noninterleaved_count &&
3626 delta_dts_max > 20*AV_TIME_BASE) {
3627 av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
3631 if(stream_count && flush){
3632 pktl= s->packet_buffer;
3635 s->packet_buffer= pktl->next;
3636 if(!s->packet_buffer)
3637 s->packet_buffer_end= NULL;
3639 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3640 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3644 av_init_packet(out);
3649 #if FF_API_INTERLEAVE_PACKET
3650 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
3651 AVPacket *pkt, int flush)
3653 return ff_interleave_packet_per_dts(s, out, pkt, flush);
3658 * Interleave an AVPacket correctly so it can be muxed.
3659 * @param out the interleaved packet will be output here
3660 * @param in the input packet
3661 * @param flush 1 if no further packets are available as input and all
3662 * remaining packets should be output
3663 * @return 1 if a packet was output, 0 if no packet could be output,
3664 * < 0 if an error occurred
3666 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3667 if (s->oformat->interleave_packet) {
3668 int ret = s->oformat->interleave_packet(s, out, in, flush);
3673 return ff_interleave_packet_per_dts(s, out, in, flush);
3676 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3680 AVStream *st= s->streams[ pkt->stream_index];
3682 //FIXME/XXX/HACK drop zero sized packets
3683 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3686 av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
3687 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
3688 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3691 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3692 return AVERROR(EINVAL);
3694 av_dlog(s, "av_interleaved_write_frame FLUSH\n");
3700 int ret= interleave_packet(s, &opkt, pkt, flush);
3701 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3704 ret= s->oformat->write_packet(s, &opkt);
3706 s->streams[opkt.stream_index]->nb_frames++;
3708 av_free_packet(&opkt);
3713 if(s->pb && s->pb->error)
3714 return s->pb->error;
3718 int av_write_trailer(AVFormatContext *s)
3724 ret= interleave_packet(s, &pkt, NULL, 1);
3725 if(ret<0) //FIXME cleanup needed for ret<0 ?
3730 ret= s->oformat->write_packet(s, &pkt);
3732 s->streams[pkt.stream_index]->nb_frames++;
3734 av_free_packet(&pkt);
3738 if(s->pb && s->pb->error)
3742 if(s->oformat->write_trailer)
3743 ret = s->oformat->write_trailer(s);
3748 ret = s->pb ? s->pb->error : 0;
3749 for(i=0;i<s->nb_streams;i++) {
3750 av_freep(&s->streams[i]->priv_data);
3751 av_freep(&s->streams[i]->index_entries);
3753 if (s->oformat->priv_class)
3754 av_opt_free(s->priv_data);
3755 av_freep(&s->priv_data);
3759 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
3760 int64_t *dts, int64_t *wall)
3762 if (!s->oformat || !s->oformat->get_output_timestamp)
3763 return AVERROR(ENOSYS);
3764 s->oformat->get_output_timestamp(s, stream, dts, wall);
3768 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3771 AVProgram *program=NULL;
3774 if (idx >= ac->nb_streams) {
3775 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3779 for(i=0; i<ac->nb_programs; i++){
3780 if(ac->programs[i]->id != progid)
3782 program = ac->programs[i];
3783 for(j=0; j<program->nb_stream_indexes; j++)
3784 if(program->stream_index[j] == idx)
3787 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3790 program->stream_index = tmp;
3791 program->stream_index[program->nb_stream_indexes++] = idx;
3796 static void print_fps(double d, const char *postfix){
3797 uint64_t v= lrintf(d*100);
3798 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3799 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3800 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3803 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3805 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3806 AVDictionaryEntry *tag=NULL;
3808 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3809 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3810 if(strcmp("language", tag->key)){
3811 const char *p = tag->value;
3812 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3815 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3816 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3817 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3819 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3820 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3823 av_log(ctx, AV_LOG_INFO, "\n");
3829 /* "user interface" functions */
3830 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3833 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3834 AVStream *st = ic->streams[i];
3835 int g = av_gcd(st->time_base.num, st->time_base.den);
3836 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3837 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3838 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3839 /* the pid is an important information, so we display it */
3840 /* XXX: add a generic system */
3841 if (flags & AVFMT_SHOW_IDS)
3842 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3844 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3845 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3846 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3847 if (st->sample_aspect_ratio.num && // default
3848 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3849 AVRational display_aspect_ratio;
3850 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3851 st->codec->width*st->sample_aspect_ratio.num,
3852 st->codec->height*st->sample_aspect_ratio.den,
3854 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3855 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3856 display_aspect_ratio.num, display_aspect_ratio.den);
3858 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3859 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3860 print_fps(av_q2d(st->avg_frame_rate), "fps");
3861 #if FF_API_R_FRAME_RATE
3862 if(st->r_frame_rate.den && st->r_frame_rate.num)
3863 print_fps(av_q2d(st->r_frame_rate), "tbr");
3865 if(st->time_base.den && st->time_base.num)
3866 print_fps(1/av_q2d(st->time_base), "tbn");
3867 if(st->codec->time_base.den && st->codec->time_base.num)
3868 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3870 if (st->disposition & AV_DISPOSITION_DEFAULT)
3871 av_log(NULL, AV_LOG_INFO, " (default)");
3872 if (st->disposition & AV_DISPOSITION_DUB)
3873 av_log(NULL, AV_LOG_INFO, " (dub)");
3874 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3875 av_log(NULL, AV_LOG_INFO, " (original)");
3876 if (st->disposition & AV_DISPOSITION_COMMENT)
3877 av_log(NULL, AV_LOG_INFO, " (comment)");
3878 if (st->disposition & AV_DISPOSITION_LYRICS)
3879 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3880 if (st->disposition & AV_DISPOSITION_KARAOKE)
3881 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3882 if (st->disposition & AV_DISPOSITION_FORCED)
3883 av_log(NULL, AV_LOG_INFO, " (forced)");
3884 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3885 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3886 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3887 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3888 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3889 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3890 av_log(NULL, AV_LOG_INFO, "\n");
3891 dump_metadata(NULL, st->metadata, " ");
3894 void av_dump_format(AVFormatContext *ic,
3900 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3901 if (ic->nb_streams && !printed)
3904 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3905 is_output ? "Output" : "Input",
3907 is_output ? ic->oformat->name : ic->iformat->name,
3908 is_output ? "to" : "from", url);
3909 dump_metadata(NULL, ic->metadata, " ");
3911 av_log(NULL, AV_LOG_INFO, " Duration: ");
3912 if (ic->duration != AV_NOPTS_VALUE) {
3913 int hours, mins, secs, us;
3914 secs = ic->duration / AV_TIME_BASE;
3915 us = ic->duration % AV_TIME_BASE;
3920 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3921 (100 * us) / AV_TIME_BASE);
3923 av_log(NULL, AV_LOG_INFO, "N/A");
3925 if (ic->start_time != AV_NOPTS_VALUE) {
3927 av_log(NULL, AV_LOG_INFO, ", start: ");
3928 secs = ic->start_time / AV_TIME_BASE;
3929 us = abs(ic->start_time % AV_TIME_BASE);
3930 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3931 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3933 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3935 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3937 av_log(NULL, AV_LOG_INFO, "N/A");
3939 av_log(NULL, AV_LOG_INFO, "\n");
3941 for (i = 0; i < ic->nb_chapters; i++) {
3942 AVChapter *ch = ic->chapters[i];
3943 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3944 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3945 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3947 dump_metadata(NULL, ch->metadata, " ");
3949 if(ic->nb_programs) {
3950 int j, k, total = 0;
3951 for(j=0; j<ic->nb_programs; j++) {
3952 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3954 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3955 name ? name->value : "");
3956 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3957 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3958 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3959 printed[ic->programs[j]->stream_index[k]] = 1;
3961 total += ic->programs[j]->nb_stream_indexes;
3963 if (total < ic->nb_streams)
3964 av_log(NULL, AV_LOG_INFO, " No Program\n");
3966 for(i=0;i<ic->nb_streams;i++)
3968 dump_stream_format(ic, i, index, is_output);
3973 #if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
3974 FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
3976 return av_gettime();
3980 uint64_t ff_ntp_time(void)
3982 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3985 int av_get_frame_filename(char *buf, int buf_size,
3986 const char *path, int number)
3989 char *q, buf1[20], c;
3990 int nd, len, percentd_found;
4002 while (isdigit(*p)) {
4003 nd = nd * 10 + *p++ - '0';
4006 } while (isdigit(c));
4015 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
4017 if ((q - buf + len) > buf_size - 1)
4019 memcpy(q, buf1, len);
4027 if ((q - buf) < buf_size - 1)
4031 if (!percentd_found)
4040 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
4044 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
4046 for(i=0;i<size;i+=16) {
4053 PRINT(" %02x", buf[i+j]);
4058 for(j=0;j<len;j++) {
4060 if (c < ' ' || c > '~')
4069 void av_hex_dump(FILE *f, uint8_t *buf, int size)
4071 hex_dump_internal(NULL, f, 0, buf, size);
4074 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
4076 hex_dump_internal(avcl, NULL, level, buf, size);
4079 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
4082 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
4083 PRINT("stream #%d:\n", pkt->stream_index);
4084 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
4085 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
4086 /* DTS is _always_ valid after av_read_frame() */
4088 if (pkt->dts == AV_NOPTS_VALUE)
4091 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
4092 /* PTS may not be known if B-frames are present. */
4094 if (pkt->pts == AV_NOPTS_VALUE)
4097 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
4099 PRINT(" size=%d\n", pkt->size);
4102 av_hex_dump(f, pkt->data, pkt->size);
4106 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
4108 AVRational tb = { 1, AV_TIME_BASE };
4109 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
4113 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
4115 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
4119 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
4121 AVRational tb = { 1, AV_TIME_BASE };
4122 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
4126 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
4129 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
4132 void av_url_split(char *proto, int proto_size,
4133 char *authorization, int authorization_size,
4134 char *hostname, int hostname_size,
4136 char *path, int path_size,
4139 const char *p, *ls, *ls2, *at, *col, *brk;
4141 if (port_ptr) *port_ptr = -1;
4142 if (proto_size > 0) proto[0] = 0;
4143 if (authorization_size > 0) authorization[0] = 0;
4144 if (hostname_size > 0) hostname[0] = 0;
4145 if (path_size > 0) path[0] = 0;
4147 /* parse protocol */
4148 if ((p = strchr(url, ':'))) {
4149 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4154 /* no protocol means plain filename */
4155 av_strlcpy(path, url, path_size);
4159 /* separate path from hostname */
4160 ls = strchr(p, '/');
4161 ls2 = strchr(p, '?');
4165 ls = FFMIN(ls, ls2);
4167 av_strlcpy(path, ls, path_size);
4169 ls = &p[strlen(p)]; // XXX
4171 /* the rest is hostname, use that to parse auth/port */
4173 /* authorization (user[:pass]@hostname) */
4174 if ((at = strchr(p, '@')) && at < ls) {
4175 av_strlcpy(authorization, p,
4176 FFMIN(authorization_size, at + 1 - p));
4177 p = at + 1; /* skip '@' */
4180 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4182 av_strlcpy(hostname, p + 1,
4183 FFMIN(hostname_size, brk - p));
4184 if (brk[1] == ':' && port_ptr)
4185 *port_ptr = atoi(brk + 2);
4186 } else if ((col = strchr(p, ':')) && col < ls) {
4187 av_strlcpy(hostname, p,
4188 FFMIN(col + 1 - p, hostname_size));
4189 if (port_ptr) *port_ptr = atoi(col + 1);
4191 av_strlcpy(hostname, p,
4192 FFMIN(ls + 1 - p, hostname_size));
4196 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4199 static const char hex_table_uc[16] = { '0', '1', '2', '3',
4202 'C', 'D', 'E', 'F' };
4203 static const char hex_table_lc[16] = { '0', '1', '2', '3',
4206 'c', 'd', 'e', 'f' };
4207 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4209 for(i = 0; i < s; i++) {
4210 buff[i * 2] = hex_table[src[i] >> 4];
4211 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4217 int ff_hex_to_data(uint8_t *data, const char *p)
4224 p += strspn(p, SPACE_CHARS);
4227 c = toupper((unsigned char) *p++);
4228 if (c >= '0' && c <= '9')
4230 else if (c >= 'A' && c <= 'F')
4245 #if FF_API_SET_PTS_INFO
4246 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
4247 unsigned int pts_num, unsigned int pts_den)
4249 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
4253 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4254 unsigned int pts_num, unsigned int pts_den)
4257 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
4258 if(new_tb.num != pts_num)
4259 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
4261 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
4263 if(new_tb.num <= 0 || new_tb.den <= 0) {
4264 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
4267 s->time_base = new_tb;
4268 av_codec_set_pkt_timebase(s->codec, new_tb);
4269 s->pts_wrap_bits = pts_wrap_bits;
4272 int ff_url_join(char *str, int size, const char *proto,
4273 const char *authorization, const char *hostname,
4274 int port, const char *fmt, ...)
4277 struct addrinfo hints = { 0 }, *ai;
4282 av_strlcatf(str, size, "%s://", proto);
4283 if (authorization && authorization[0])
4284 av_strlcatf(str, size, "%s@", authorization);
4285 #if CONFIG_NETWORK && defined(AF_INET6)
4286 /* Determine if hostname is a numerical IPv6 address,
4287 * properly escape it within [] in that case. */
4288 hints.ai_flags = AI_NUMERICHOST;
4289 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
4290 if (ai->ai_family == AF_INET6) {
4291 av_strlcat(str, "[", size);
4292 av_strlcat(str, hostname, size);
4293 av_strlcat(str, "]", size);
4295 av_strlcat(str, hostname, size);
4300 /* Not an IPv6 address, just output the plain string. */
4301 av_strlcat(str, hostname, size);
4304 av_strlcatf(str, size, ":%d", port);
4307 int len = strlen(str);
4310 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
4316 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
4317 AVFormatContext *src)
4322 local_pkt.stream_index = dst_stream;
4323 if (pkt->pts != AV_NOPTS_VALUE)
4324 local_pkt.pts = av_rescale_q(pkt->pts,
4325 src->streams[pkt->stream_index]->time_base,
4326 dst->streams[dst_stream]->time_base);
4327 if (pkt->dts != AV_NOPTS_VALUE)
4328 local_pkt.dts = av_rescale_q(pkt->dts,
4329 src->streams[pkt->stream_index]->time_base,
4330 dst->streams[dst_stream]->time_base);
4331 return av_write_frame(dst, &local_pkt);
4334 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4337 const char *ptr = str;
4339 /* Parse key=value pairs. */
4342 char *dest = NULL, *dest_end;
4343 int key_len, dest_len = 0;
4345 /* Skip whitespace and potential commas. */
4346 while (*ptr && (isspace(*ptr) || *ptr == ','))
4353 if (!(ptr = strchr(key, '=')))
4356 key_len = ptr - key;
4358 callback_get_buf(context, key, key_len, &dest, &dest_len);
4359 dest_end = dest + dest_len - 1;
4363 while (*ptr && *ptr != '\"') {
4367 if (dest && dest < dest_end)
4371 if (dest && dest < dest_end)
4379 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
4380 if (dest && dest < dest_end)
4388 int ff_find_stream_index(AVFormatContext *s, int id)
4391 for (i = 0; i < s->nb_streams; i++) {
4392 if (s->streams[i]->id == id)
4398 void ff_make_absolute_url(char *buf, int size, const char *base,
4402 /* Absolute path, relative to the current server */
4403 if (base && strstr(base, "://") && rel[0] == '/') {
4405 av_strlcpy(buf, base, size);
4406 sep = strstr(buf, "://");
4409 sep = strchr(sep, '/');
4413 av_strlcat(buf, rel, size);
4416 /* If rel actually is an absolute url, just copy it */
4417 if (!base || strstr(rel, "://") || rel[0] == '/') {
4418 av_strlcpy(buf, rel, size);
4422 av_strlcpy(buf, base, size);
4423 /* Remove the file name from the base url */
4424 sep = strrchr(buf, '/');
4429 while (av_strstart(rel, "../", NULL) && sep) {
4430 /* Remove the path delimiter at the end */
4432 sep = strrchr(buf, '/');
4433 /* If the next directory name to pop off is "..", break here */
4434 if (!strcmp(sep ? &sep[1] : buf, "..")) {
4435 /* Readd the slash we just removed */
4436 av_strlcat(buf, "/", size);
4439 /* Cut off the directory name */
4446 av_strlcat(buf, rel, size);
4449 int64_t ff_iso8601_to_unix_time(const char *datestr)
4452 struct tm time1 = {0}, time2 = {0};
4454 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
4455 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
4457 return av_timegm(&time2);
4459 return av_timegm(&time1);
4461 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
4462 "the date string.\n");
4467 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4470 if (ofmt->query_codec)
4471 return ofmt->query_codec(codec_id, std_compliance);
4472 else if (ofmt->codec_tag)
4473 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4474 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4475 codec_id == ofmt->subtitle_codec)
4478 return AVERROR_PATCHWELCOME;
4481 int avformat_network_init(void)
4485 ff_network_inited_globally = 1;
4486 if ((ret = ff_network_init()) < 0)
4493 int avformat_network_deinit(void)
4502 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4503 uint64_t channel_layout, int32_t sample_rate,
4504 int32_t width, int32_t height)
4510 return AVERROR(EINVAL);
4513 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4515 if (channel_layout) {
4517 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4521 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4523 if (width || height) {
4525 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4527 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4529 return AVERROR(ENOMEM);
4530 bytestream_put_le32(&data, flags);
4532 bytestream_put_le32(&data, channels);
4534 bytestream_put_le64(&data, channel_layout);
4536 bytestream_put_le32(&data, sample_rate);
4537 if (width || height) {
4538 bytestream_put_le32(&data, width);
4539 bytestream_put_le32(&data, height);
4544 const struct AVCodecTag *avformat_get_riff_video_tags(void)
4546 return ff_codec_bmp_tags;
4548 const struct AVCodecTag *avformat_get_riff_audio_tags(void)
4550 return ff_codec_wav_tags;
4553 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4555 AVRational undef = {0, 1};
4556 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4557 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4558 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4560 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4561 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4562 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4563 stream_sample_aspect_ratio = undef;
4565 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4566 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4567 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4568 frame_sample_aspect_ratio = undef;
4570 if (stream_sample_aspect_ratio.num)
4571 return stream_sample_aspect_ratio;
4573 return frame_sample_aspect_ratio;
4576 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4579 if (*spec <= '9' && *spec >= '0') /* opt:index */
4580 return strtol(spec, NULL, 0) == st->index;
4581 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4582 *spec == 't') { /* opt:[vasdt] */
4583 enum AVMediaType type;
4586 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4587 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4588 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4589 case 'd': type = AVMEDIA_TYPE_DATA; break;
4590 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4591 default: av_assert0(0);
4593 if (type != st->codec->codec_type)
4595 if (*spec++ == ':') { /* possibly followed by :index */
4596 int i, index = strtol(spec, NULL, 0);
4597 for (i = 0; i < s->nb_streams; i++)
4598 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4599 return i == st->index;
4603 } else if (*spec == 'p' && *(spec + 1) == ':') {
4607 prog_id = strtol(spec, &endptr, 0);
4608 for (i = 0; i < s->nb_programs; i++) {
4609 if (s->programs[i]->id != prog_id)
4612 if (*endptr++ == ':') {
4613 int stream_idx = strtol(endptr, NULL, 0);
4614 return stream_idx >= 0 &&
4615 stream_idx < s->programs[i]->nb_stream_indexes &&
4616 st->index == s->programs[i]->stream_index[stream_idx];
4619 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4620 if (st->index == s->programs[i]->stream_index[j])
4624 } else if (*spec == '#') {
4627 sid = strtol(spec + 1, &endptr, 0);
4629 return st->id == sid;
4630 } else if (!*spec) /* empty specifier, matches everything */
4633 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4634 return AVERROR(EINVAL);