2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
24 #include "libavutil/avstring.h"
34 * @file libavformat/utils.c
35 * various utility functions for use within FFmpeg
38 unsigned avformat_version(void)
40 return LIBAVFORMAT_VERSION_INT;
43 /* fraction handling */
46 * f = val + (num / den) + 0.5.
48 * 'num' is normalized so that it is such as 0 <= num < den.
50 * @param f fractional number
51 * @param val integer value
52 * @param num must be >= 0
53 * @param den must be >= 1
55 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
68 * Fractional addition to f: f = f + (incr / f->den).
70 * @param f fractional number
71 * @param incr increment, can be positive or negative
73 static void av_frac_add(AVFrac *f, int64_t incr)
86 } else if (num >= den) {
93 /** head of registered input format linked list */
94 AVInputFormat *first_iformat = NULL;
95 /** head of registered output format linked list */
96 AVOutputFormat *first_oformat = NULL;
98 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 if(f) return f->next;
101 else return first_iformat;
104 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
106 if(f) return f->next;
107 else return first_oformat;
110 void av_register_input_format(AVInputFormat *format)
114 while (*p != NULL) p = &(*p)->next;
119 void av_register_output_format(AVOutputFormat *format)
123 while (*p != NULL) p = &(*p)->next;
128 int match_ext(const char *filename, const char *extensions)
136 ext = strrchr(filename, '.');
142 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
145 if (!strcasecmp(ext1, ext))
155 AVOutputFormat *guess_format(const char *short_name, const char *filename,
156 const char *mime_type)
158 AVOutputFormat *fmt, *fmt_found;
159 int score_max, score;
161 /* specific test for image sequences */
162 #ifdef CONFIG_IMAGE2_MUXER
163 if (!short_name && filename &&
164 av_filename_number_test(filename) &&
165 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
166 return guess_format("image2", NULL, NULL);
169 /* Find the proper file type. */
173 while (fmt != NULL) {
175 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
177 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
179 if (filename && fmt->extensions &&
180 match_ext(filename, fmt->extensions)) {
183 if (score > score_max) {
192 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
193 const char *mime_type)
195 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
198 AVOutputFormat *stream_fmt;
199 char stream_format_name[64];
201 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
202 stream_fmt = guess_format(stream_format_name, NULL, NULL);
211 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
212 const char *filename, const char *mime_type, enum CodecType type){
213 if(type == CODEC_TYPE_VIDEO){
214 enum CodecID codec_id= CODEC_ID_NONE;
216 #ifdef CONFIG_IMAGE2_MUXER
217 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
218 codec_id= av_guess_image2_codec(filename);
221 if(codec_id == CODEC_ID_NONE)
222 codec_id= fmt->video_codec;
224 }else if(type == CODEC_TYPE_AUDIO)
225 return fmt->audio_codec;
227 return CODEC_ID_NONE;
230 AVInputFormat *av_find_input_format(const char *short_name)
233 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
234 if (!strcmp(fmt->name, short_name))
240 /* memory handling */
242 void av_destruct_packet(AVPacket *pkt)
245 pkt->data = NULL; pkt->size = 0;
248 void av_init_packet(AVPacket *pkt)
250 pkt->pts = AV_NOPTS_VALUE;
251 pkt->dts = AV_NOPTS_VALUE;
254 pkt->convergence_duration = 0;
256 pkt->stream_index = 0;
257 pkt->destruct= av_destruct_packet_nofree;
260 int av_new_packet(AVPacket *pkt, int size)
263 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
264 return AVERROR(ENOMEM);
265 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
267 return AVERROR(ENOMEM);
268 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
273 pkt->destruct = av_destruct_packet;
277 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
279 int ret= av_new_packet(pkt, size);
284 pkt->pos= url_ftell(s);
286 ret= get_buffer(s, pkt->data, size);
295 int av_dup_packet(AVPacket *pkt)
297 if (pkt->destruct != av_destruct_packet) {
299 /* We duplicate the packet and don't forget to add the padding again. */
300 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
301 return AVERROR(ENOMEM);
302 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
304 return AVERROR(ENOMEM);
306 memcpy(data, pkt->data, pkt->size);
307 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
309 pkt->destruct = av_destruct_packet;
314 int av_filename_number_test(const char *filename)
317 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
320 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
322 AVInputFormat *fmt1, *fmt;
326 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
327 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
330 if (fmt1->read_probe) {
331 score = fmt1->read_probe(pd);
332 } else if (fmt1->extensions) {
333 if (match_ext(pd->filename, fmt1->extensions)) {
337 if (score > *score_max) {
340 }else if (score == *score_max)
346 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
348 return av_probe_input_format2(pd, is_opened, &score);
351 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
354 fmt = av_probe_input_format2(pd, 1, &score);
357 if (!strcmp(fmt->name, "mp3")) {
358 st->codec->codec_id = CODEC_ID_MP3;
359 st->codec->codec_type = CODEC_TYPE_AUDIO;
360 } else if (!strcmp(fmt->name, "ac3")) {
361 st->codec->codec_id = CODEC_ID_AC3;
362 st->codec->codec_type = CODEC_TYPE_AUDIO;
363 } else if (!strcmp(fmt->name, "mpegvideo")) {
364 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
365 st->codec->codec_type = CODEC_TYPE_VIDEO;
366 } else if (!strcmp(fmt->name, "m4v")) {
367 st->codec->codec_id = CODEC_ID_MPEG4;
368 st->codec->codec_type = CODEC_TYPE_VIDEO;
369 } else if (!strcmp(fmt->name, "h264")) {
370 st->codec->codec_id = CODEC_ID_H264;
371 st->codec->codec_type = CODEC_TYPE_VIDEO;
377 /************************************************************/
378 /* input media file */
381 * Open a media file from an IO stream. 'fmt' must be specified.
383 static const char* format_to_name(void* ptr)
385 AVFormatContext* fc = (AVFormatContext*) ptr;
386 if(fc->iformat) return fc->iformat->name;
387 else if(fc->oformat) return fc->oformat->name;
391 #define OFFSET(x) offsetof(AVFormatContext,x)
392 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
393 //these names are too long to be readable
394 #define E AV_OPT_FLAG_ENCODING_PARAM
395 #define D AV_OPT_FLAG_DECODING_PARAM
397 static const AVOption options[]={
398 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
399 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
400 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
401 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
402 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
403 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
404 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
405 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
406 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
407 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
408 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
409 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
410 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
411 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
419 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
421 static void avformat_get_context_defaults(AVFormatContext *s)
423 memset(s, 0, sizeof(AVFormatContext));
425 s->av_class = &av_format_context_class;
427 av_opt_set_defaults(s);
430 AVFormatContext *av_alloc_format_context(void)
433 ic = av_malloc(sizeof(AVFormatContext));
435 avformat_get_context_defaults(ic);
436 ic->av_class = &av_format_context_class;
440 int av_open_input_stream(AVFormatContext **ic_ptr,
441 ByteIOContext *pb, const char *filename,
442 AVInputFormat *fmt, AVFormatParameters *ap)
446 AVFormatParameters default_ap;
450 memset(ap, 0, sizeof(default_ap));
453 if(!ap->prealloced_context)
454 ic = av_alloc_format_context();
458 err = AVERROR(ENOMEM);
463 ic->duration = AV_NOPTS_VALUE;
464 ic->start_time = AV_NOPTS_VALUE;
465 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
467 /* allocate private data */
468 if (fmt->priv_data_size > 0) {
469 ic->priv_data = av_mallocz(fmt->priv_data_size);
470 if (!ic->priv_data) {
471 err = AVERROR(ENOMEM);
475 ic->priv_data = NULL;
478 if (ic->iformat->read_header) {
479 err = ic->iformat->read_header(ic, ap);
484 if (pb && !ic->data_offset)
485 ic->data_offset = url_ftell(ic->pb);
492 av_freep(&ic->priv_data);
493 for(i=0;i<ic->nb_streams;i++) {
494 AVStream *st = ic->streams[i];
496 av_free(st->priv_data);
497 av_free(st->codec->extradata);
507 /** size of probe buffer, for guessing file type from file contents */
508 #define PROBE_BUF_MIN 2048
509 #define PROBE_BUF_MAX (1<<20)
511 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
514 AVFormatParameters *ap)
517 AVProbeData probe_data, *pd = &probe_data;
518 ByteIOContext *pb = NULL;
522 pd->filename = filename;
527 /* guess format if no file can be opened */
528 fmt = av_probe_input_format(pd, 0);
531 /* Do not open file if the format does not need it. XXX: specific
532 hack needed to handle RTSP/TCP */
533 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
534 /* if no file needed do not try to open one */
535 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
539 url_setbufsize(pb, buf_size);
542 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
543 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
544 /* read probe data */
545 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
546 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
547 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
548 if (url_fseek(pb, 0, SEEK_SET) < 0) {
550 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
556 /* guess file format */
557 fmt = av_probe_input_format2(pd, 1, &score);
562 /* if still no format found, error */
568 /* check filename in case an image number is expected */
569 if (fmt->flags & AVFMT_NEEDNUMBER) {
570 if (!av_filename_number_test(filename)) {
571 err = AVERROR_NUMEXPECTED;
575 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
588 /*******************************************************/
590 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
591 AVPacketList **plast_pktl){
592 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
597 (*plast_pktl)->next = pktl;
599 *packet_buffer = pktl;
601 /* add the packet in the buffered packet list */
607 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
613 AVPacketList *pktl = s->raw_packet_buffer;
617 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
618 s->raw_packet_buffer = pktl->next;
625 ret= s->iformat->read_packet(s, pkt);
628 st= s->streams[pkt->stream_index];
630 switch(st->codec->codec_type){
631 case CODEC_TYPE_VIDEO:
632 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
634 case CODEC_TYPE_AUDIO:
635 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
637 case CODEC_TYPE_SUBTITLE:
638 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
642 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
645 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
647 if(st->codec->codec_id == CODEC_ID_PROBE){
648 AVProbeData *pd = &st->probe_data;
650 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
651 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
652 pd->buf_size += pkt->size;
653 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
655 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
656 set_codec_from_probe_data(st, pd, 1);
657 if(st->codec->codec_id != CODEC_ID_PROBE){
666 /**********************************************************/
669 * Get the number of samples of an audio frame. Return -1 on error.
671 static int get_audio_frame_size(AVCodecContext *enc, int size)
675 if(enc->codec_id == CODEC_ID_VORBIS)
678 if (enc->frame_size <= 1) {
679 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
681 if (bits_per_sample) {
682 if (enc->channels == 0)
684 frame_size = (size << 3) / (bits_per_sample * enc->channels);
686 /* used for example by ADPCM codecs */
687 if (enc->bit_rate == 0)
689 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
692 frame_size = enc->frame_size;
699 * Return the frame duration in seconds. Return 0 if not available.
701 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
702 AVCodecParserContext *pc, AVPacket *pkt)
708 switch(st->codec->codec_type) {
709 case CODEC_TYPE_VIDEO:
710 if(st->time_base.num*1000LL > st->time_base.den){
711 *pnum = st->time_base.num;
712 *pden = st->time_base.den;
713 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
714 *pnum = st->codec->time_base.num;
715 *pden = st->codec->time_base.den;
716 if (pc && pc->repeat_pict) {
718 *pnum = (*pnum) * (2 + pc->repeat_pict);
722 case CODEC_TYPE_AUDIO:
723 frame_size = get_audio_frame_size(st->codec, pkt->size);
727 *pden = st->codec->sample_rate;
734 static int is_intra_only(AVCodecContext *enc){
735 if(enc->codec_type == CODEC_TYPE_AUDIO){
737 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
738 switch(enc->codec_id){
740 case CODEC_ID_MJPEGB:
742 case CODEC_ID_RAWVIDEO:
743 case CODEC_ID_DVVIDEO:
744 case CODEC_ID_HUFFYUV:
745 case CODEC_ID_FFVHUFF:
756 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
757 int64_t dts, int64_t pts)
759 AVStream *st= s->streams[stream_index];
760 AVPacketList *pktl= s->packet_buffer;
762 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
765 st->first_dts= dts - st->cur_dts;
768 for(; pktl; pktl= pktl->next){
769 if(pktl->pkt.stream_index != stream_index)
771 //FIXME think more about this check
772 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
773 pktl->pkt.pts += st->first_dts;
775 if(pktl->pkt.dts != AV_NOPTS_VALUE)
776 pktl->pkt.dts += st->first_dts;
778 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
779 st->start_time= pktl->pkt.pts;
781 if (st->start_time == AV_NOPTS_VALUE)
782 st->start_time = pts;
785 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
787 AVPacketList *pktl= s->packet_buffer;
790 if(st->first_dts != AV_NOPTS_VALUE){
791 cur_dts= st->first_dts;
792 for(; pktl; pktl= pktl->next){
793 if(pktl->pkt.stream_index == pkt->stream_index){
794 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
796 cur_dts -= pkt->duration;
799 pktl= s->packet_buffer;
800 st->first_dts = cur_dts;
801 }else if(st->cur_dts)
804 for(; pktl; pktl= pktl->next){
805 if(pktl->pkt.stream_index != pkt->stream_index)
807 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
808 && !pktl->pkt.duration){
809 pktl->pkt.dts= cur_dts;
810 if(!st->codec->has_b_frames)
811 pktl->pkt.pts= cur_dts;
812 cur_dts += pkt->duration;
813 pktl->pkt.duration= pkt->duration;
817 if(st->first_dts == AV_NOPTS_VALUE)
818 st->cur_dts= cur_dts;
821 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
822 AVCodecParserContext *pc, AVPacket *pkt)
824 int num, den, presentation_delayed, delay, i;
827 /* do we have a video B-frame ? */
828 delay= st->codec->has_b_frames;
829 presentation_delayed = 0;
830 /* XXX: need has_b_frame, but cannot get it if the codec is
833 pc && pc->pict_type != FF_B_TYPE)
834 presentation_delayed = 1;
836 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
837 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
838 pkt->dts -= 1LL<<st->pts_wrap_bits;
841 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
842 // we take the conservative approach and discard both
843 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
844 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
845 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
846 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
849 if (pkt->duration == 0) {
850 compute_frame_duration(&num, &den, st, pc, pkt);
852 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
854 if(pkt->duration != 0 && s->packet_buffer)
855 update_initial_durations(s, st, pkt);
859 /* correct timestamps with byte offset if demuxers only have timestamps
860 on packet boundaries */
861 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
862 /* this will estimate bitrate based on this frame's duration and size */
863 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
864 if(pkt->pts != AV_NOPTS_VALUE)
866 if(pkt->dts != AV_NOPTS_VALUE)
870 /* This may be redundant, but it should not hurt. */
871 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
872 presentation_delayed = 1;
874 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
875 /* interpolate PTS and DTS if they are not present */
876 if(delay==0 || (delay==1 && pc)){
877 if (presentation_delayed) {
878 /* DTS = decompression timestamp */
879 /* PTS = presentation timestamp */
880 if (pkt->dts == AV_NOPTS_VALUE)
881 pkt->dts = st->last_IP_pts;
882 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
883 if (pkt->dts == AV_NOPTS_VALUE)
884 pkt->dts = st->cur_dts;
886 /* this is tricky: the dts must be incremented by the duration
887 of the frame we are displaying, i.e. the last I- or P-frame */
888 if (st->last_IP_duration == 0)
889 st->last_IP_duration = pkt->duration;
890 if(pkt->dts != AV_NOPTS_VALUE)
891 st->cur_dts = pkt->dts + st->last_IP_duration;
892 st->last_IP_duration = pkt->duration;
893 st->last_IP_pts= pkt->pts;
894 /* cannot compute PTS if not present (we can compute it only
895 by knowing the future */
896 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
897 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
898 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
899 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
900 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
901 pkt->pts += pkt->duration;
902 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
906 /* presentation is not delayed : PTS and DTS are the same */
907 if(pkt->pts == AV_NOPTS_VALUE)
909 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
910 if(pkt->pts == AV_NOPTS_VALUE)
911 pkt->pts = st->cur_dts;
913 if(pkt->pts != AV_NOPTS_VALUE)
914 st->cur_dts = pkt->pts + pkt->duration;
918 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
919 st->pts_buffer[0]= pkt->pts;
920 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
921 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
922 if(pkt->dts == AV_NOPTS_VALUE)
923 pkt->dts= st->pts_buffer[0];
925 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
927 if(pkt->dts > st->cur_dts)
928 st->cur_dts = pkt->dts;
931 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
934 if(is_intra_only(st->codec))
935 pkt->flags |= PKT_FLAG_KEY;
938 /* keyframe computation */
939 if (pc->pict_type == FF_I_TYPE)
940 pkt->flags |= PKT_FLAG_KEY;
944 void av_destruct_packet_nofree(AVPacket *pkt)
946 pkt->data = NULL; pkt->size = 0;
949 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
957 /* select current input stream component */
960 if (!st->need_parsing || !st->parser) {
961 /* no parsing needed: we just output the packet as is */
962 /* raw data support */
964 compute_pkt_fields(s, st, NULL, pkt);
967 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
968 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
969 s->cur_ptr, s->cur_len,
970 s->cur_pkt.pts, s->cur_pkt.dts);
971 s->cur_pkt.pts = AV_NOPTS_VALUE;
972 s->cur_pkt.dts = AV_NOPTS_VALUE;
973 /* increment read pointer */
977 /* return packet if any */
980 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
982 pkt->stream_index = st->index;
983 pkt->pts = st->parser->pts;
984 pkt->dts = st->parser->dts;
985 pkt->destruct = av_destruct_packet_nofree;
986 compute_pkt_fields(s, st, st->parser, pkt);
988 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
989 ff_reduce_index(s, st->index);
990 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
991 0, 0, AVINDEX_KEYFRAME);
998 av_free_packet(&s->cur_pkt);
1002 /* read next packet */
1003 ret = av_read_packet(s, &s->cur_pkt);
1005 if (ret == AVERROR(EAGAIN))
1007 /* return the last frames, if any */
1008 for(i = 0; i < s->nb_streams; i++) {
1010 if (st->parser && st->need_parsing) {
1011 av_parser_parse(st->parser, st->codec,
1012 &pkt->data, &pkt->size,
1014 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1019 /* no more packets: really terminate parsing */
1023 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1024 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1025 s->cur_pkt.pts < s->cur_pkt.dts){
1026 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1027 s->cur_pkt.stream_index,
1031 // av_free_packet(&s->cur_pkt);
1035 st = s->streams[s->cur_pkt.stream_index];
1036 if(s->debug & FF_FDEBUG_TS)
1037 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1038 s->cur_pkt.stream_index,
1045 s->cur_ptr = s->cur_pkt.data;
1046 s->cur_len = s->cur_pkt.size;
1047 if (st->need_parsing && !st->parser) {
1048 st->parser = av_parser_init(st->codec->codec_id);
1050 /* no parser available: just output the raw packets */
1051 st->need_parsing = AVSTREAM_PARSE_NONE;
1052 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1053 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1055 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1056 st->parser->next_frame_offset=
1057 st->parser->cur_offset= s->cur_pkt.pos;
1062 if(s->debug & FF_FDEBUG_TS)
1063 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1073 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1077 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1080 pktl = s->packet_buffer;
1082 AVPacket *next_pkt= &pktl->pkt;
1084 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1085 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1086 if( pktl->pkt.stream_index == next_pkt->stream_index
1087 && next_pkt->dts < pktl->pkt.dts
1088 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1089 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1090 next_pkt->pts= pktl->pkt.dts;
1094 pktl = s->packet_buffer;
1097 if( next_pkt->pts != AV_NOPTS_VALUE
1098 || next_pkt->dts == AV_NOPTS_VALUE
1100 /* read packet from packet buffer, if there is data */
1102 s->packet_buffer = pktl->next;
1108 int ret= av_read_frame_internal(s, pkt);
1110 if(pktl && ret != AVERROR(EAGAIN)){
1117 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1118 &s->packet_buffer_end)) < 0)
1119 return AVERROR(ENOMEM);
1121 assert(!s->packet_buffer);
1122 return av_read_frame_internal(s, pkt);
1127 /* XXX: suppress the packet queue */
1128 static void flush_packet_queue(AVFormatContext *s)
1133 pktl = s->packet_buffer;
1136 s->packet_buffer = pktl->next;
1137 av_free_packet(&pktl->pkt);
1142 /*******************************************************/
1145 int av_find_default_stream_index(AVFormatContext *s)
1147 int first_audio_index = -1;
1151 if (s->nb_streams <= 0)
1153 for(i = 0; i < s->nb_streams; i++) {
1155 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1158 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1159 first_audio_index = i;
1161 return first_audio_index >= 0 ? first_audio_index : 0;
1165 * Flush the frame reader.
1167 static void av_read_frame_flush(AVFormatContext *s)
1172 flush_packet_queue(s);
1174 /* free previous packet */
1176 if (s->cur_st->parser)
1177 av_free_packet(&s->cur_pkt);
1184 /* for each stream, reset read state */
1185 for(i = 0; i < s->nb_streams; i++) {
1189 av_parser_close(st->parser);
1192 st->last_IP_pts = AV_NOPTS_VALUE;
1193 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1197 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1200 for(i = 0; i < s->nb_streams; i++) {
1201 AVStream *st = s->streams[i];
1203 st->cur_dts = av_rescale(timestamp,
1204 st->time_base.den * (int64_t)ref_st->time_base.num,
1205 st->time_base.num * (int64_t)ref_st->time_base.den);
1209 void ff_reduce_index(AVFormatContext *s, int stream_index)
1211 AVStream *st= s->streams[stream_index];
1212 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1214 if((unsigned)st->nb_index_entries >= max_entries){
1216 for(i=0; 2*i<st->nb_index_entries; i++)
1217 st->index_entries[i]= st->index_entries[2*i];
1218 st->nb_index_entries= i;
1222 int av_add_index_entry(AVStream *st,
1223 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1225 AVIndexEntry *entries, *ie;
1228 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1231 entries = av_fast_realloc(st->index_entries,
1232 &st->index_entries_allocated_size,
1233 (st->nb_index_entries + 1) *
1234 sizeof(AVIndexEntry));
1238 st->index_entries= entries;
1240 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1243 index= st->nb_index_entries++;
1244 ie= &entries[index];
1245 assert(index==0 || ie[-1].timestamp < timestamp);
1247 ie= &entries[index];
1248 if(ie->timestamp != timestamp){
1249 if(ie->timestamp <= timestamp)
1251 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1252 st->nb_index_entries++;
1253 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1254 distance= ie->min_distance;
1258 ie->timestamp = timestamp;
1259 ie->min_distance= distance;
1266 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1269 AVIndexEntry *entries= st->index_entries;
1270 int nb_entries= st->nb_index_entries;
1279 timestamp = entries[m].timestamp;
1280 if(timestamp >= wanted_timestamp)
1282 if(timestamp <= wanted_timestamp)
1285 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1287 if(!(flags & AVSEEK_FLAG_ANY)){
1288 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1289 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1300 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1301 AVInputFormat *avif= s->iformat;
1302 int64_t pos_min, pos_max, pos, pos_limit;
1303 int64_t ts_min, ts_max, ts;
1307 if (stream_index < 0)
1311 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1315 ts_min= AV_NOPTS_VALUE;
1316 pos_limit= -1; //gcc falsely says it may be uninitialized
1318 st= s->streams[stream_index];
1319 if(st->index_entries){
1322 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1323 index= FFMAX(index, 0);
1324 e= &st->index_entries[index];
1326 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1328 ts_min= e->timestamp;
1330 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1337 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1338 assert(index < st->nb_index_entries);
1340 e= &st->index_entries[index];
1341 assert(e->timestamp >= target_ts);
1343 ts_max= e->timestamp;
1344 pos_limit= pos_max - e->min_distance;
1346 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1347 pos_max,pos_limit, ts_max);
1352 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1357 url_fseek(s->pb, pos, SEEK_SET);
1359 av_update_cur_dts(s, st, ts);
1364 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1366 int64_t start_pos, filesize;
1370 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1373 if(ts_min == AV_NOPTS_VALUE){
1374 pos_min = s->data_offset;
1375 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1376 if (ts_min == AV_NOPTS_VALUE)
1380 if(ts_max == AV_NOPTS_VALUE){
1382 filesize = url_fsize(s->pb);
1383 pos_max = filesize - 1;
1386 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1388 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1389 if (ts_max == AV_NOPTS_VALUE)
1393 int64_t tmp_pos= pos_max + 1;
1394 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1395 if(tmp_ts == AV_NOPTS_VALUE)
1399 if(tmp_pos >= filesize)
1405 if(ts_min > ts_max){
1407 }else if(ts_min == ts_max){
1412 while (pos_min < pos_limit) {
1414 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1418 assert(pos_limit <= pos_max);
1421 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1422 // interpolate position (better than dichotomy)
1423 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1424 + pos_min - approximate_keyframe_distance;
1425 }else if(no_change==1){
1426 // bisection, if interpolation failed to change min or max pos last time
1427 pos = (pos_min + pos_limit)>>1;
1429 /* linear search if bisection failed, can only happen if there
1430 are very few or no keyframes between min/max */
1435 else if(pos > pos_limit)
1439 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1445 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1447 if(ts == AV_NOPTS_VALUE){
1448 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1451 assert(ts != AV_NOPTS_VALUE);
1452 if (target_ts <= ts) {
1453 pos_limit = start_pos - 1;
1457 if (target_ts >= ts) {
1463 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1464 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1467 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1469 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1470 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1471 pos, ts_min, target_ts, ts_max);
1477 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1478 int64_t pos_min, pos_max;
1482 if (stream_index < 0)
1485 st= s->streams[stream_index];
1488 pos_min = s->data_offset;
1489 pos_max = url_fsize(s->pb) - 1;
1491 if (pos < pos_min) pos= pos_min;
1492 else if(pos > pos_max) pos= pos_max;
1494 url_fseek(s->pb, pos, SEEK_SET);
1497 av_update_cur_dts(s, st, ts);
1502 static int av_seek_frame_generic(AVFormatContext *s,
1503 int stream_index, int64_t timestamp, int flags)
1509 st = s->streams[stream_index];
1511 index = av_index_search_timestamp(st, timestamp, flags);
1513 if(index < 0 || index==st->nb_index_entries-1){
1517 if(st->nb_index_entries){
1518 assert(st->index_entries);
1519 ie= &st->index_entries[st->nb_index_entries-1];
1520 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1522 av_update_cur_dts(s, st, ie->timestamp);
1524 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1528 int ret = av_read_frame(s, &pkt);
1531 av_free_packet(&pkt);
1532 if(stream_index == pkt.stream_index){
1533 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1537 index = av_index_search_timestamp(st, timestamp, flags);
1542 av_read_frame_flush(s);
1543 if (s->iformat->read_seek){
1544 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1547 ie = &st->index_entries[index];
1548 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1550 av_update_cur_dts(s, st, ie->timestamp);
1555 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1560 av_read_frame_flush(s);
1562 if(flags & AVSEEK_FLAG_BYTE)
1563 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1565 if(stream_index < 0){
1566 stream_index= av_find_default_stream_index(s);
1567 if(stream_index < 0)
1570 st= s->streams[stream_index];
1571 /* timestamp for default must be expressed in AV_TIME_BASE units */
1572 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1575 /* first, we try the format specific seek */
1576 if (s->iformat->read_seek)
1577 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1584 if(s->iformat->read_timestamp)
1585 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1587 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1590 /*******************************************************/
1593 * Returns TRUE if the stream has accurate duration in any stream.
1595 * @return TRUE if the stream has accurate duration for at least one component.
1597 static int av_has_duration(AVFormatContext *ic)
1602 for(i = 0;i < ic->nb_streams; i++) {
1603 st = ic->streams[i];
1604 if (st->duration != AV_NOPTS_VALUE)
1611 * Estimate the stream timings from the one of each components.
1613 * Also computes the global bitrate if possible.
1615 static void av_update_stream_timings(AVFormatContext *ic)
1617 int64_t start_time, start_time1, end_time, end_time1;
1618 int64_t duration, duration1;
1622 start_time = INT64_MAX;
1623 end_time = INT64_MIN;
1624 duration = INT64_MIN;
1625 for(i = 0;i < ic->nb_streams; i++) {
1626 st = ic->streams[i];
1627 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1628 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1629 if (start_time1 < start_time)
1630 start_time = start_time1;
1631 if (st->duration != AV_NOPTS_VALUE) {
1632 end_time1 = start_time1
1633 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1634 if (end_time1 > end_time)
1635 end_time = end_time1;
1638 if (st->duration != AV_NOPTS_VALUE) {
1639 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1640 if (duration1 > duration)
1641 duration = duration1;
1644 if (start_time != INT64_MAX) {
1645 ic->start_time = start_time;
1646 if (end_time != INT64_MIN) {
1647 if (end_time - start_time > duration)
1648 duration = end_time - start_time;
1651 if (duration != INT64_MIN) {
1652 ic->duration = duration;
1653 if (ic->file_size > 0) {
1654 /* compute the bitrate */
1655 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1656 (double)ic->duration;
1661 static void fill_all_stream_timings(AVFormatContext *ic)
1666 av_update_stream_timings(ic);
1667 for(i = 0;i < ic->nb_streams; i++) {
1668 st = ic->streams[i];
1669 if (st->start_time == AV_NOPTS_VALUE) {
1670 if(ic->start_time != AV_NOPTS_VALUE)
1671 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1672 if(ic->duration != AV_NOPTS_VALUE)
1673 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1678 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1680 int64_t filesize, duration;
1684 /* if bit_rate is already set, we believe it */
1685 if (ic->bit_rate == 0) {
1687 for(i=0;i<ic->nb_streams;i++) {
1688 st = ic->streams[i];
1689 bit_rate += st->codec->bit_rate;
1691 ic->bit_rate = bit_rate;
1694 /* if duration is already set, we believe it */
1695 if (ic->duration == AV_NOPTS_VALUE &&
1696 ic->bit_rate != 0 &&
1697 ic->file_size != 0) {
1698 filesize = ic->file_size;
1700 for(i = 0; i < ic->nb_streams; i++) {
1701 st = ic->streams[i];
1702 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1703 if (st->duration == AV_NOPTS_VALUE)
1704 st->duration = duration;
1710 #define DURATION_MAX_READ_SIZE 250000
1712 /* only usable for MPEG-PS streams */
1713 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1715 AVPacket pkt1, *pkt = &pkt1;
1717 int read_size, i, ret;
1719 int64_t filesize, offset, duration;
1721 /* free previous packet */
1722 if (ic->cur_st && ic->cur_st->parser)
1723 av_free_packet(&ic->cur_pkt);
1726 /* flush packet queue */
1727 flush_packet_queue(ic);
1729 for(i=0;i<ic->nb_streams;i++) {
1730 st = ic->streams[i];
1732 av_parser_close(st->parser);
1737 /* we read the first packets to get the first PTS (not fully
1738 accurate, but it is enough now) */
1739 url_fseek(ic->pb, 0, SEEK_SET);
1742 if (read_size >= DURATION_MAX_READ_SIZE)
1744 /* if all info is available, we can stop */
1745 for(i = 0;i < ic->nb_streams; i++) {
1746 st = ic->streams[i];
1747 if (st->start_time == AV_NOPTS_VALUE)
1750 if (i == ic->nb_streams)
1753 ret = av_read_packet(ic, pkt);
1756 read_size += pkt->size;
1757 st = ic->streams[pkt->stream_index];
1758 if (pkt->pts != AV_NOPTS_VALUE) {
1759 if (st->start_time == AV_NOPTS_VALUE)
1760 st->start_time = pkt->pts;
1762 av_free_packet(pkt);
1765 /* estimate the end time (duration) */
1766 /* XXX: may need to support wrapping */
1767 filesize = ic->file_size;
1768 offset = filesize - DURATION_MAX_READ_SIZE;
1772 url_fseek(ic->pb, offset, SEEK_SET);
1775 if (read_size >= DURATION_MAX_READ_SIZE)
1778 ret = av_read_packet(ic, pkt);
1781 read_size += pkt->size;
1782 st = ic->streams[pkt->stream_index];
1783 if (pkt->pts != AV_NOPTS_VALUE &&
1784 st->start_time != AV_NOPTS_VALUE) {
1785 end_time = pkt->pts;
1786 duration = end_time - st->start_time;
1788 if (st->duration == AV_NOPTS_VALUE ||
1789 st->duration < duration)
1790 st->duration = duration;
1793 av_free_packet(pkt);
1796 fill_all_stream_timings(ic);
1798 url_fseek(ic->pb, old_offset, SEEK_SET);
1799 for(i=0; i<ic->nb_streams; i++){
1801 st->cur_dts= st->first_dts;
1802 st->last_IP_pts = AV_NOPTS_VALUE;
1806 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1810 /* get the file size, if possible */
1811 if (ic->iformat->flags & AVFMT_NOFILE) {
1814 file_size = url_fsize(ic->pb);
1818 ic->file_size = file_size;
1820 if ((!strcmp(ic->iformat->name, "mpeg") ||
1821 !strcmp(ic->iformat->name, "mpegts")) &&
1822 file_size && !url_is_streamed(ic->pb)) {
1823 /* get accurate estimate from the PTSes */
1824 av_estimate_timings_from_pts(ic, old_offset);
1825 } else if (av_has_duration(ic)) {
1826 /* at least one component has timings - we use them for all
1828 fill_all_stream_timings(ic);
1830 /* less precise: use bitrate info */
1831 av_estimate_timings_from_bit_rate(ic);
1833 av_update_stream_timings(ic);
1839 for(i = 0;i < ic->nb_streams; i++) {
1840 st = ic->streams[i];
1841 printf("%d: start_time: %0.3f duration: %0.3f\n",
1842 i, (double)st->start_time / AV_TIME_BASE,
1843 (double)st->duration / AV_TIME_BASE);
1845 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1846 (double)ic->start_time / AV_TIME_BASE,
1847 (double)ic->duration / AV_TIME_BASE,
1848 ic->bit_rate / 1000);
1853 static int has_codec_parameters(AVCodecContext *enc)
1856 switch(enc->codec_type) {
1857 case CODEC_TYPE_AUDIO:
1858 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1859 if(!enc->frame_size &&
1860 (enc->codec_id == CODEC_ID_VORBIS ||
1861 enc->codec_id == CODEC_ID_AAC))
1864 case CODEC_TYPE_VIDEO:
1865 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1871 return enc->codec_id != CODEC_ID_NONE && val != 0;
1874 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1878 int got_picture, data_size, ret=0;
1881 if(!st->codec->codec){
1882 codec = avcodec_find_decoder(st->codec->codec_id);
1885 ret = avcodec_open(st->codec, codec);
1890 if(!has_codec_parameters(st->codec)){
1891 switch(st->codec->codec_type) {
1892 case CODEC_TYPE_VIDEO:
1893 ret = avcodec_decode_video(st->codec, &picture,
1894 &got_picture, data, size);
1896 case CODEC_TYPE_AUDIO:
1897 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1898 samples = av_malloc(data_size);
1901 ret = avcodec_decode_audio2(st->codec, samples,
1902 &data_size, data, size);
1913 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1915 while (tags->id != CODEC_ID_NONE) {
1923 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1926 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1927 if(tag == tags[i].tag)
1930 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1931 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1932 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1933 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1934 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1937 return CODEC_ID_NONE;
1940 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1943 for(i=0; tags && tags[i]; i++){
1944 int tag= codec_get_tag(tags[i], id);
1950 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1953 for(i=0; tags && tags[i]; i++){
1954 enum CodecID id= codec_get_id(tags[i], tag);
1955 if(id!=CODEC_ID_NONE) return id;
1957 return CODEC_ID_NONE;
1960 static void compute_chapters_end(AVFormatContext *s)
1964 for (i=0; i+1<s->nb_chapters; i++)
1965 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1966 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1967 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1968 s->chapters[i]->end = s->chapters[i+1]->start;
1971 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1972 assert(s->start_time != AV_NOPTS_VALUE);
1973 assert(s->duration > 0);
1974 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1976 s->chapters[i]->time_base);
1980 /* absolute maximum size we read until we abort */
1981 #define MAX_READ_SIZE 5000000
1983 #define MAX_STD_TIMEBASES (60*12+5)
1984 static int get_std_framerate(int i){
1985 if(i<60*12) return i*1001;
1986 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1990 * Is the time base unreliable.
1991 * This is a heuristic to balance between quick acceptance of the values in
1992 * the headers vs. some extra checks.
1993 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1994 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1995 * And there are "variable" fps files this needs to detect as well.
1997 static int tb_unreliable(AVCodecContext *c){
1998 if( c->time_base.den >= 101L*c->time_base.num
1999 || c->time_base.den < 5L*c->time_base.num
2000 /* || c->codec_tag == ff_get_fourcc("DIVX")
2001 || c->codec_tag == ff_get_fourcc("XVID")*/
2002 || c->codec_id == CODEC_ID_MPEG2VIDEO)
2007 int av_find_stream_info(AVFormatContext *ic)
2009 int i, count, ret, read_size, j;
2011 AVPacket pkt1, *pkt;
2012 int64_t last_dts[MAX_STREAMS];
2013 int duration_count[MAX_STREAMS]={0};
2014 double (*duration_error)[MAX_STD_TIMEBASES];
2015 int64_t old_offset = url_ftell(ic->pb);
2016 int64_t codec_info_duration[MAX_STREAMS]={0};
2017 int codec_info_nb_frames[MAX_STREAMS]={0};
2019 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2020 if (!duration_error) return AVERROR(ENOMEM);
2022 for(i=0;i<ic->nb_streams;i++) {
2023 st = ic->streams[i];
2024 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2025 /* if(!st->time_base.num)
2027 if(!st->codec->time_base.num)
2028 st->codec->time_base= st->time_base;
2030 //only for the split stuff
2032 st->parser = av_parser_init(st->codec->codec_id);
2033 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2034 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2039 for(i=0;i<MAX_STREAMS;i++){
2040 last_dts[i]= AV_NOPTS_VALUE;
2046 /* check if one codec still needs to be handled */
2047 for(i=0;i<ic->nb_streams;i++) {
2048 st = ic->streams[i];
2049 if (!has_codec_parameters(st->codec))
2051 /* variable fps and no guess at the real fps */
2052 if( tb_unreliable(st->codec)
2053 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2055 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2057 if(st->first_dts == AV_NOPTS_VALUE)
2060 if (i == ic->nb_streams) {
2061 /* NOTE: if the format has no header, then we need to read
2062 some packets to get most of the streams, so we cannot
2064 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2065 /* if we found the info for all the codecs, we can stop */
2070 /* we did not get all the codec info, but we read too much data */
2071 if (read_size >= MAX_READ_SIZE) {
2076 /* NOTE: a new stream can be added there if no header in file
2077 (AVFMTCTX_NOHEADER) */
2078 ret = av_read_frame_internal(ic, &pkt1);
2081 ret = -1; /* we could not have all the codec parameters before EOF */
2082 for(i=0;i<ic->nb_streams;i++) {
2083 st = ic->streams[i];
2084 if (!has_codec_parameters(st->codec)){
2086 avcodec_string(buf, sizeof(buf), st->codec, 0);
2087 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2095 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2096 if(av_dup_packet(pkt) < 0) {
2097 av_free(duration_error);
2098 return AVERROR(ENOMEM);
2101 read_size += pkt->size;
2103 st = ic->streams[pkt->stream_index];
2104 if(codec_info_nb_frames[st->index]>1)
2105 codec_info_duration[st->index] += pkt->duration;
2106 if (pkt->duration != 0)
2107 codec_info_nb_frames[st->index]++;
2110 int index= pkt->stream_index;
2111 int64_t last= last_dts[index];
2112 int64_t duration= pkt->dts - last;
2114 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2115 double dur= duration * av_q2d(st->time_base);
2117 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2118 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2119 if(duration_count[index] < 2)
2120 memset(duration_error[index], 0, sizeof(*duration_error));
2121 for(i=1; i<MAX_STD_TIMEBASES; i++){
2122 int framerate= get_std_framerate(i);
2123 int ticks= lrintf(dur*framerate/(1001*12));
2124 double error= dur - ticks*1001*12/(double)framerate;
2125 duration_error[index][i] += error*error;
2127 duration_count[index]++;
2129 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2130 last_dts[pkt->stream_index]= pkt->dts;
2132 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2133 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2135 st->codec->extradata_size= i;
2136 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2137 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2138 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2142 /* if still no information, we try to open the codec and to
2143 decompress the frame. We try to avoid that in most cases as
2144 it takes longer and uses more memory. For MPEG-4, we need to
2145 decompress for QuickTime. */
2146 if (!has_codec_parameters(st->codec) /*&&
2147 (st->codec->codec_id == CODEC_ID_FLV1 ||
2148 st->codec->codec_id == CODEC_ID_H264 ||
2149 st->codec->codec_id == CODEC_ID_H263 ||
2150 st->codec->codec_id == CODEC_ID_H261 ||
2151 st->codec->codec_id == CODEC_ID_VORBIS ||
2152 st->codec->codec_id == CODEC_ID_MJPEG ||
2153 st->codec->codec_id == CODEC_ID_PNG ||
2154 st->codec->codec_id == CODEC_ID_PAM ||
2155 st->codec->codec_id == CODEC_ID_PGM ||
2156 st->codec->codec_id == CODEC_ID_PGMYUV ||
2157 st->codec->codec_id == CODEC_ID_PBM ||
2158 st->codec->codec_id == CODEC_ID_PPM ||
2159 st->codec->codec_id == CODEC_ID_SHORTEN ||
2160 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2161 try_decode_frame(st, pkt->data, pkt->size);
2163 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2169 // close codecs which were opened in try_decode_frame()
2170 for(i=0;i<ic->nb_streams;i++) {
2171 st = ic->streams[i];
2172 if(st->codec->codec)
2173 avcodec_close(st->codec);
2175 for(i=0;i<ic->nb_streams;i++) {
2176 st = ic->streams[i];
2177 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2178 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2179 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2181 if(duration_count[i]
2182 && tb_unreliable(st->codec) /*&&
2183 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2184 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2185 double best_error= 2*av_q2d(st->time_base);
2186 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2188 for(j=1; j<MAX_STD_TIMEBASES; j++){
2189 double error= duration_error[i][j] * get_std_framerate(j);
2190 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2191 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2192 if(error < best_error){
2194 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2199 if (!st->r_frame_rate.num){
2200 if( st->codec->time_base.den * (int64_t)st->time_base.num
2201 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2202 st->r_frame_rate.num = st->codec->time_base.den;
2203 st->r_frame_rate.den = st->codec->time_base.num;
2205 st->r_frame_rate.num = st->time_base.den;
2206 st->r_frame_rate.den = st->time_base.num;
2209 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2210 if(!st->codec->bits_per_coded_sample)
2211 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2215 av_estimate_timings(ic, old_offset);
2217 compute_chapters_end(ic);
2220 /* correct DTS for B-frame streams with no timestamps */
2221 for(i=0;i<ic->nb_streams;i++) {
2222 st = ic->streams[i];
2223 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2225 ppktl = &ic->packet_buffer;
2227 if(ppkt1->stream_index != i)
2229 if(ppkt1->pkt->dts < 0)
2231 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2233 ppkt1->pkt->dts -= delta;
2238 st->cur_dts -= delta;
2244 av_free(duration_error);
2249 /*******************************************************/
2251 int av_read_play(AVFormatContext *s)
2253 if (s->iformat->read_play)
2254 return s->iformat->read_play(s);
2256 return av_url_read_fpause(s->pb, 0);
2257 return AVERROR(ENOSYS);
2260 int av_read_pause(AVFormatContext *s)
2262 if (s->iformat->read_pause)
2263 return s->iformat->read_pause(s);
2265 return av_url_read_fpause(s->pb, 1);
2266 return AVERROR(ENOSYS);
2269 void av_close_input_stream(AVFormatContext *s)
2274 /* free previous packet */
2275 if (s->cur_st && s->cur_st->parser)
2276 av_free_packet(&s->cur_pkt);
2278 if (s->iformat->read_close)
2279 s->iformat->read_close(s);
2280 for(i=0;i<s->nb_streams;i++) {
2281 /* free all data in a stream component */
2284 av_parser_close(st->parser);
2286 av_free(st->index_entries);
2287 av_free(st->codec->extradata);
2289 av_free(st->filename);
2290 av_free(st->priv_data);
2293 for(i=s->nb_programs-1; i>=0; i--) {
2294 av_freep(&s->programs[i]->provider_name);
2295 av_freep(&s->programs[i]->name);
2296 av_freep(&s->programs[i]->stream_index);
2297 av_freep(&s->programs[i]);
2299 av_freep(&s->programs);
2300 flush_packet_queue(s);
2301 av_freep(&s->priv_data);
2302 while(s->nb_chapters--) {
2303 av_free(s->chapters[s->nb_chapters]->title);
2304 av_free(s->chapters[s->nb_chapters]);
2306 av_freep(&s->chapters);
2310 void av_close_input_file(AVFormatContext *s)
2312 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2313 av_close_input_stream(s);
2318 AVStream *av_new_stream(AVFormatContext *s, int id)
2323 if (s->nb_streams >= MAX_STREAMS)
2326 st = av_mallocz(sizeof(AVStream));
2330 st->codec= avcodec_alloc_context();
2332 /* no default bitrate if decoding */
2333 st->codec->bit_rate = 0;
2335 st->index = s->nb_streams;
2337 st->start_time = AV_NOPTS_VALUE;
2338 st->duration = AV_NOPTS_VALUE;
2339 /* we set the current DTS to 0 so that formats without any timestamps
2340 but durations get some timestamps, formats with some unknown
2341 timestamps have their first few packets buffered and the
2342 timestamps corrected before they are returned to the user */
2344 st->first_dts = AV_NOPTS_VALUE;
2346 /* default pts setting is MPEG-like */
2347 av_set_pts_info(st, 33, 1, 90000);
2348 st->last_IP_pts = AV_NOPTS_VALUE;
2349 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2350 st->pts_buffer[i]= AV_NOPTS_VALUE;
2352 st->sample_aspect_ratio = (AVRational){0,1};
2354 s->streams[s->nb_streams++] = st;
2358 AVProgram *av_new_program(AVFormatContext *ac, int id)
2360 AVProgram *program=NULL;
2364 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2367 for(i=0; i<ac->nb_programs; i++)
2368 if(ac->programs[i]->id == id)
2369 program = ac->programs[i];
2372 program = av_mallocz(sizeof(AVProgram));
2375 dynarray_add(&ac->programs, &ac->nb_programs, program);
2376 program->discard = AVDISCARD_NONE;
2383 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2385 assert(!provider_name == !name);
2387 av_free(program->provider_name);
2388 av_free(program-> name);
2389 program->provider_name = av_strdup(provider_name);
2390 program-> name = av_strdup( name);
2394 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2396 AVChapter *chapter = NULL;
2399 for(i=0; i<s->nb_chapters; i++)
2400 if(s->chapters[i]->id == id)
2401 chapter = s->chapters[i];
2404 chapter= av_mallocz(sizeof(AVChapter));
2407 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2409 av_free(chapter->title);
2410 chapter->title = av_strdup(title);
2412 chapter->time_base= time_base;
2413 chapter->start = start;
2419 /************************************************************/
2420 /* output media file */
2422 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2426 if (s->oformat->priv_data_size > 0) {
2427 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2429 return AVERROR(ENOMEM);
2431 s->priv_data = NULL;
2433 if (s->oformat->set_parameters) {
2434 ret = s->oformat->set_parameters(s, ap);
2441 int av_write_header(AVFormatContext *s)
2446 // some sanity checks
2447 for(i=0;i<s->nb_streams;i++) {
2450 switch (st->codec->codec_type) {
2451 case CODEC_TYPE_AUDIO:
2452 if(st->codec->sample_rate<=0){
2453 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2456 if(!st->codec->block_align)
2457 st->codec->block_align = st->codec->channels *
2458 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2460 case CODEC_TYPE_VIDEO:
2461 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2462 av_log(s, AV_LOG_ERROR, "time base not set\n");
2465 if(st->codec->width<=0 || st->codec->height<=0){
2466 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2469 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2470 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2476 if(s->oformat->codec_tag){
2477 if(st->codec->codec_tag){
2479 //check that tag + id is in the table
2480 //if neither is in the table -> OK
2481 //if tag is in the table with another id -> FAIL
2482 //if id is in the table with another tag -> FAIL unless strict < ?
2484 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2488 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2489 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2491 return AVERROR(ENOMEM);
2494 if(s->oformat->write_header){
2495 ret = s->oformat->write_header(s);
2500 /* init PTS generation */
2501 for(i=0;i<s->nb_streams;i++) {
2502 int64_t den = AV_NOPTS_VALUE;
2505 switch (st->codec->codec_type) {
2506 case CODEC_TYPE_AUDIO:
2507 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2509 case CODEC_TYPE_VIDEO:
2510 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2515 if (den != AV_NOPTS_VALUE) {
2517 return AVERROR_INVALIDDATA;
2518 av_frac_init(&st->pts, 0, 0, den);
2524 //FIXME merge with compute_pkt_fields
2525 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2526 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2527 int num, den, frame_size, i;
2529 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2531 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2534 /* duration field */
2535 if (pkt->duration == 0) {
2536 compute_frame_duration(&num, &den, st, NULL, pkt);
2538 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2542 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2545 //XXX/FIXME this is a temporary hack until all encoders output pts
2546 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2548 // pkt->pts= st->cur_dts;
2549 pkt->pts= st->pts.val;
2552 //calculate dts from pts
2553 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2554 st->pts_buffer[0]= pkt->pts;
2555 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2556 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2557 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2558 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2560 pkt->dts= st->pts_buffer[0];
2563 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2564 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2567 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2568 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2572 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2573 st->cur_dts= pkt->dts;
2574 st->pts.val= pkt->dts;
2577 switch (st->codec->codec_type) {
2578 case CODEC_TYPE_AUDIO:
2579 frame_size = get_audio_frame_size(st->codec, pkt->size);
2581 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2582 likely equal to the encoder delay, but it would be better if we
2583 had the real timestamps from the encoder */
2584 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2585 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2588 case CODEC_TYPE_VIDEO:
2589 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2597 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2599 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2601 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2604 ret= s->oformat->write_packet(s, pkt);
2606 ret= url_ferror(s->pb);
2610 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2611 AVPacketList *pktl, **next_point, *this_pktl;
2613 int streams[MAX_STREAMS];
2616 AVStream *st= s->streams[ pkt->stream_index];
2618 // assert(pkt->destruct != av_destruct_packet); //FIXME
2620 this_pktl = av_mallocz(sizeof(AVPacketList));
2621 this_pktl->pkt= *pkt;
2622 if(pkt->destruct == av_destruct_packet)
2623 pkt->destruct= NULL; // not shared -> must keep original from being freed
2625 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2627 next_point = &s->packet_buffer;
2629 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2630 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2631 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2632 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2634 next_point= &(*next_point)->next;
2636 this_pktl->next= *next_point;
2637 *next_point= this_pktl;
2640 memset(streams, 0, sizeof(streams));
2641 pktl= s->packet_buffer;
2643 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2644 if(streams[ pktl->pkt.stream_index ] == 0)
2646 streams[ pktl->pkt.stream_index ]++;
2650 if(stream_count && (s->nb_streams == stream_count || flush)){
2651 pktl= s->packet_buffer;
2654 s->packet_buffer= pktl->next;
2658 av_init_packet(out);
2664 * Interleaves an AVPacket correctly so it can be muxed.
2665 * @param out the interleaved packet will be output here
2666 * @param in the input packet
2667 * @param flush 1 if no further packets are available as input and all
2668 * remaining packets should be output
2669 * @return 1 if a packet was output, 0 if no packet could be output,
2670 * < 0 if an error occurred
2672 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2673 if(s->oformat->interleave_packet)
2674 return s->oformat->interleave_packet(s, out, in, flush);
2676 return av_interleave_packet_per_dts(s, out, in, flush);
2679 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2680 AVStream *st= s->streams[ pkt->stream_index];
2682 //FIXME/XXX/HACK drop zero sized packets
2683 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2686 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2687 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2690 if(pkt->dts == AV_NOPTS_VALUE)
2695 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2696 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2699 ret= s->oformat->write_packet(s, &opkt);
2701 av_free_packet(&opkt);
2706 if(url_ferror(s->pb))
2707 return url_ferror(s->pb);
2711 int av_write_trailer(AVFormatContext *s)
2717 ret= av_interleave_packet(s, &pkt, NULL, 1);
2718 if(ret<0) //FIXME cleanup needed for ret<0 ?
2723 ret= s->oformat->write_packet(s, &pkt);
2725 av_free_packet(&pkt);
2729 if(url_ferror(s->pb))
2733 if(s->oformat->write_trailer)
2734 ret = s->oformat->write_trailer(s);
2737 ret=url_ferror(s->pb);
2738 for(i=0;i<s->nb_streams;i++)
2739 av_freep(&s->streams[i]->priv_data);
2740 av_freep(&s->priv_data);
2744 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2747 AVProgram *program=NULL;
2750 for(i=0; i<ac->nb_programs; i++){
2751 if(ac->programs[i]->id != progid)
2753 program = ac->programs[i];
2754 for(j=0; j<program->nb_stream_indexes; j++)
2755 if(program->stream_index[j] == idx)
2758 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2761 program->stream_index = tmp;
2762 program->stream_index[program->nb_stream_indexes++] = idx;
2767 /* "user interface" functions */
2768 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2771 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2772 AVStream *st = ic->streams[i];
2773 int g = ff_gcd(st->time_base.num, st->time_base.den);
2774 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2775 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2776 /* the pid is an important information, so we display it */
2777 /* XXX: add a generic system */
2778 if (flags & AVFMT_SHOW_IDS)
2779 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2780 if (strlen(st->language) > 0)
2781 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2782 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2783 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2784 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2785 if(st->r_frame_rate.den && st->r_frame_rate.num)
2786 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2787 /* else if(st->time_base.den && st->time_base.num)
2788 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2790 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2792 av_log(NULL, AV_LOG_INFO, "\n");
2795 void dump_format(AVFormatContext *ic,
2802 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2803 is_output ? "Output" : "Input",
2805 is_output ? ic->oformat->name : ic->iformat->name,
2806 is_output ? "to" : "from", url);
2808 av_log(NULL, AV_LOG_INFO, " Duration: ");
2809 if (ic->duration != AV_NOPTS_VALUE) {
2810 int hours, mins, secs, us;
2811 secs = ic->duration / AV_TIME_BASE;
2812 us = ic->duration % AV_TIME_BASE;
2817 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2818 (100 * us) / AV_TIME_BASE);
2820 av_log(NULL, AV_LOG_INFO, "N/A");
2822 if (ic->start_time != AV_NOPTS_VALUE) {
2824 av_log(NULL, AV_LOG_INFO, ", start: ");
2825 secs = ic->start_time / AV_TIME_BASE;
2826 us = ic->start_time % AV_TIME_BASE;
2827 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2828 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2830 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2832 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2834 av_log(NULL, AV_LOG_INFO, "N/A");
2836 av_log(NULL, AV_LOG_INFO, "\n");
2838 if(ic->nb_programs) {
2840 for(j=0; j<ic->nb_programs; j++) {
2841 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2842 ic->programs[j]->name ? ic->programs[j]->name : "");
2843 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2844 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2847 for(i=0;i<ic->nb_streams;i++)
2848 dump_stream_format(ic, i, index, is_output);
2851 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2853 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2856 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2858 AVRational frame_rate;
2859 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2860 *frame_rate_num= frame_rate.num;
2861 *frame_rate_den= frame_rate.den;
2865 int64_t av_gettime(void)
2868 gettimeofday(&tv,NULL);
2869 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2872 int64_t parse_date(const char *datestr, int duration)
2878 static const char * const date_fmt[] = {
2882 static const char * const time_fmt[] = {
2892 time_t now = time(0);
2894 len = strlen(datestr);
2896 lastch = datestr[len - 1];
2899 is_utc = (lastch == 'z' || lastch == 'Z');
2901 memset(&dt, 0, sizeof(dt));
2906 /* parse the year-month-day part */
2907 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2908 q = small_strptime(p, date_fmt[i], &dt);
2914 /* if the year-month-day part is missing, then take the
2915 * current year-month-day time */
2920 dt = *localtime(&now);
2922 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2927 if (*p == 'T' || *p == 't' || *p == ' ')
2930 /* parse the hour-minute-second part */
2931 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2932 q = small_strptime(p, time_fmt[i], &dt);
2938 /* parse datestr as a duration */
2943 /* parse datestr as HH:MM:SS */
2944 q = small_strptime(p, time_fmt[0], &dt);
2946 /* parse datestr as S+ */
2947 dt.tm_sec = strtol(p, (char **)&q, 10);
2949 /* the parsing didn't succeed */
2956 /* Now we have all the fields that we can get */
2962 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2964 dt.tm_isdst = -1; /* unknown */
2974 /* parse the .m... part */
2978 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2981 val += n * (*q - '0');
2985 return negative ? -t : t;
2988 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2998 while (*p != '\0' && *p != '=' && *p != '&') {
2999 if ((q - tag) < sizeof(tag) - 1)
3007 while (*p != '&' && *p != '\0') {
3008 if ((q - arg) < arg_size - 1) {
3018 if (!strcmp(tag, tag1))
3027 int av_get_frame_filename(char *buf, int buf_size,
3028 const char *path, int number)
3031 char *q, buf1[20], c;
3032 int nd, len, percentd_found;
3044 while (isdigit(*p)) {
3045 nd = nd * 10 + *p++ - '0';
3048 } while (isdigit(c));
3057 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3059 if ((q - buf + len) > buf_size - 1)
3061 memcpy(q, buf1, len);
3069 if ((q - buf) < buf_size - 1)
3073 if (!percentd_found)
3082 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3085 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3087 for(i=0;i<size;i+=16) {
3094 PRINT(" %02x", buf[i+j]);
3099 for(j=0;j<len;j++) {
3101 if (c < ' ' || c > '~')
3110 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3112 hex_dump_internal(NULL, f, 0, buf, size);
3115 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3117 hex_dump_internal(avcl, NULL, level, buf, size);
3120 //FIXME needs to know the time_base
3121 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3123 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3124 PRINT("stream #%d:\n", pkt->stream_index);
3125 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3126 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3127 /* DTS is _always_ valid after av_read_frame() */
3129 if (pkt->dts == AV_NOPTS_VALUE)
3132 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3133 /* PTS may not be known if B-frames are present. */
3135 if (pkt->pts == AV_NOPTS_VALUE)
3138 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3140 PRINT(" size=%d\n", pkt->size);
3143 av_hex_dump(f, pkt->data, pkt->size);
3146 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3148 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3151 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3153 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3156 void url_split(char *proto, int proto_size,
3157 char *authorization, int authorization_size,
3158 char *hostname, int hostname_size,
3160 char *path, int path_size,
3163 const char *p, *ls, *at, *col, *brk;
3165 if (port_ptr) *port_ptr = -1;
3166 if (proto_size > 0) proto[0] = 0;
3167 if (authorization_size > 0) authorization[0] = 0;
3168 if (hostname_size > 0) hostname[0] = 0;
3169 if (path_size > 0) path[0] = 0;
3171 /* parse protocol */
3172 if ((p = strchr(url, ':'))) {
3173 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3178 /* no protocol means plain filename */
3179 av_strlcpy(path, url, path_size);
3183 /* separate path from hostname */
3184 ls = strchr(p, '/');
3186 ls = strchr(p, '?');
3188 av_strlcpy(path, ls, path_size);
3190 ls = &p[strlen(p)]; // XXX
3192 /* the rest is hostname, use that to parse auth/port */
3194 /* authorization (user[:pass]@hostname) */
3195 if ((at = strchr(p, '@')) && at < ls) {
3196 av_strlcpy(authorization, p,
3197 FFMIN(authorization_size, at + 1 - p));
3198 p = at + 1; /* skip '@' */
3201 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3203 av_strlcpy(hostname, p + 1,
3204 FFMIN(hostname_size, brk - p));
3205 if (brk[1] == ':' && port_ptr)
3206 *port_ptr = atoi(brk + 2);
3207 } else if ((col = strchr(p, ':')) && col < ls) {
3208 av_strlcpy(hostname, p,
3209 FFMIN(col + 1 - p, hostname_size));
3210 if (port_ptr) *port_ptr = atoi(col + 1);
3212 av_strlcpy(hostname, p,
3213 FFMIN(ls + 1 - p, hostname_size));
3217 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3220 static const char hex_table[16] = { '0', '1', '2', '3',
3223 'C', 'D', 'E', 'F' };
3225 for(i = 0; i < s; i++) {
3226 buff[i * 2] = hex_table[src[i] >> 4];
3227 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3233 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3234 int pts_num, int pts_den)
3236 unsigned int gcd= ff_gcd(pts_num, pts_den);
3237 s->pts_wrap_bits = pts_wrap_bits;
3238 s->time_base.num = pts_num/gcd;
3239 s->time_base.den = pts_den/gcd;
3242 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);