2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
24 #include "libavutil/avstring.h"
34 * @file libavformat/utils.c
35 * various utility functions for use within FFmpeg
38 unsigned avformat_version(void)
40 return LIBAVFORMAT_VERSION_INT;
43 /* fraction handling */
46 * f = val + (num / den) + 0.5.
48 * 'num' is normalized so that it is such as 0 <= num < den.
50 * @param f fractional number
51 * @param val integer value
52 * @param num must be >= 0
53 * @param den must be >= 1
55 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
68 * Fractional addition to f: f = f + (incr / f->den).
70 * @param f fractional number
71 * @param incr increment, can be positive or negative
73 static void av_frac_add(AVFrac *f, int64_t incr)
86 } else if (num >= den) {
93 /** head of registered input format linked list */
94 AVInputFormat *first_iformat = NULL;
95 /** head of registered output format linked list */
96 AVOutputFormat *first_oformat = NULL;
98 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 if(f) return f->next;
101 else return first_iformat;
104 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
106 if(f) return f->next;
107 else return first_oformat;
110 void av_register_input_format(AVInputFormat *format)
114 while (*p != NULL) p = &(*p)->next;
119 void av_register_output_format(AVOutputFormat *format)
123 while (*p != NULL) p = &(*p)->next;
128 int match_ext(const char *filename, const char *extensions)
136 ext = strrchr(filename, '.');
142 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
145 if (!strcasecmp(ext1, ext))
155 AVOutputFormat *guess_format(const char *short_name, const char *filename,
156 const char *mime_type)
158 AVOutputFormat *fmt, *fmt_found;
159 int score_max, score;
161 /* specific test for image sequences */
162 #ifdef CONFIG_IMAGE2_MUXER
163 if (!short_name && filename &&
164 av_filename_number_test(filename) &&
165 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
166 return guess_format("image2", NULL, NULL);
169 /* Find the proper file type. */
173 while (fmt != NULL) {
175 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
177 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
179 if (filename && fmt->extensions &&
180 match_ext(filename, fmt->extensions)) {
183 if (score > score_max) {
192 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
193 const char *mime_type)
195 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
198 AVOutputFormat *stream_fmt;
199 char stream_format_name[64];
201 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
202 stream_fmt = guess_format(stream_format_name, NULL, NULL);
211 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
212 const char *filename, const char *mime_type, enum CodecType type){
213 if(type == CODEC_TYPE_VIDEO){
214 enum CodecID codec_id= CODEC_ID_NONE;
216 #ifdef CONFIG_IMAGE2_MUXER
217 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
218 codec_id= av_guess_image2_codec(filename);
221 if(codec_id == CODEC_ID_NONE)
222 codec_id= fmt->video_codec;
224 }else if(type == CODEC_TYPE_AUDIO)
225 return fmt->audio_codec;
227 return CODEC_ID_NONE;
230 AVInputFormat *av_find_input_format(const char *short_name)
233 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
234 if (!strcmp(fmt->name, short_name))
240 /* memory handling */
242 void av_destruct_packet(AVPacket *pkt)
245 pkt->data = NULL; pkt->size = 0;
248 void av_init_packet(AVPacket *pkt)
250 pkt->pts = AV_NOPTS_VALUE;
251 pkt->dts = AV_NOPTS_VALUE;
255 pkt->stream_index = 0;
256 pkt->destruct= av_destruct_packet_nofree;
259 int av_new_packet(AVPacket *pkt, int size)
262 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
263 return AVERROR(ENOMEM);
264 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
266 return AVERROR(ENOMEM);
267 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
272 pkt->destruct = av_destruct_packet;
276 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
278 int ret= av_new_packet(pkt, size);
283 pkt->pos= url_ftell(s);
285 ret= get_buffer(s, pkt->data, size);
294 int av_dup_packet(AVPacket *pkt)
296 if (pkt->destruct != av_destruct_packet) {
298 /* We duplicate the packet and don't forget to add the padding again. */
299 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
300 return AVERROR(ENOMEM);
301 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
303 return AVERROR(ENOMEM);
305 memcpy(data, pkt->data, pkt->size);
306 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
308 pkt->destruct = av_destruct_packet;
313 int av_filename_number_test(const char *filename)
316 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
319 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
321 AVInputFormat *fmt1, *fmt;
325 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
326 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
329 if (fmt1->read_probe) {
330 score = fmt1->read_probe(pd);
331 } else if (fmt1->extensions) {
332 if (match_ext(pd->filename, fmt1->extensions)) {
336 if (score > *score_max) {
339 }else if (score == *score_max)
345 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
347 return av_probe_input_format2(pd, is_opened, &score);
350 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
353 fmt = av_probe_input_format2(pd, 1, &score);
356 if (!strcmp(fmt->name, "mp3")) {
357 st->codec->codec_id = CODEC_ID_MP3;
358 st->codec->codec_type = CODEC_TYPE_AUDIO;
359 } else if (!strcmp(fmt->name, "ac3")) {
360 st->codec->codec_id = CODEC_ID_AC3;
361 st->codec->codec_type = CODEC_TYPE_AUDIO;
362 } else if (!strcmp(fmt->name, "mpegvideo")) {
363 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
364 st->codec->codec_type = CODEC_TYPE_VIDEO;
365 } else if (!strcmp(fmt->name, "h264")) {
366 st->codec->codec_id = CODEC_ID_H264;
367 st->codec->codec_type = CODEC_TYPE_VIDEO;
373 /************************************************************/
374 /* input media file */
377 * Open a media file from an IO stream. 'fmt' must be specified.
379 static const char* format_to_name(void* ptr)
381 AVFormatContext* fc = (AVFormatContext*) ptr;
382 if(fc->iformat) return fc->iformat->name;
383 else if(fc->oformat) return fc->oformat->name;
387 #define OFFSET(x) offsetof(AVFormatContext,x)
388 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
389 //these names are too long to be readable
390 #define E AV_OPT_FLAG_ENCODING_PARAM
391 #define D AV_OPT_FLAG_DECODING_PARAM
393 static const AVOption options[]={
394 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
395 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
396 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
397 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
398 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
399 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
400 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
401 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
402 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
403 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
404 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
405 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
406 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
407 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
415 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
417 static void avformat_get_context_defaults(AVFormatContext *s)
419 memset(s, 0, sizeof(AVFormatContext));
421 s->av_class = &av_format_context_class;
423 av_opt_set_defaults(s);
426 AVFormatContext *av_alloc_format_context(void)
429 ic = av_malloc(sizeof(AVFormatContext));
431 avformat_get_context_defaults(ic);
432 ic->av_class = &av_format_context_class;
436 int av_open_input_stream(AVFormatContext **ic_ptr,
437 ByteIOContext *pb, const char *filename,
438 AVInputFormat *fmt, AVFormatParameters *ap)
442 AVFormatParameters default_ap;
446 memset(ap, 0, sizeof(default_ap));
449 if(!ap->prealloced_context)
450 ic = av_alloc_format_context();
454 err = AVERROR(ENOMEM);
459 ic->duration = AV_NOPTS_VALUE;
460 ic->start_time = AV_NOPTS_VALUE;
461 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
463 /* allocate private data */
464 if (fmt->priv_data_size > 0) {
465 ic->priv_data = av_mallocz(fmt->priv_data_size);
466 if (!ic->priv_data) {
467 err = AVERROR(ENOMEM);
471 ic->priv_data = NULL;
474 if (ic->iformat->read_header) {
475 err = ic->iformat->read_header(ic, ap);
480 if (pb && !ic->data_offset)
481 ic->data_offset = url_ftell(ic->pb);
488 av_freep(&ic->priv_data);
489 for(i=0;i<ic->nb_streams;i++) {
490 AVStream *st = ic->streams[i];
492 av_free(st->priv_data);
493 av_free(st->codec->extradata);
503 /** size of probe buffer, for guessing file type from file contents */
504 #define PROBE_BUF_MIN 2048
505 #define PROBE_BUF_MAX (1<<20)
507 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
510 AVFormatParameters *ap)
513 AVProbeData probe_data, *pd = &probe_data;
514 ByteIOContext *pb = NULL;
518 pd->filename = filename;
523 /* guess format if no file can be opened */
524 fmt = av_probe_input_format(pd, 0);
527 /* Do not open file if the format does not need it. XXX: specific
528 hack needed to handle RTSP/TCP */
529 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
530 /* if no file needed do not try to open one */
531 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
535 url_setbufsize(pb, buf_size);
538 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
539 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
540 /* read probe data */
541 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
542 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
543 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
544 if (url_fseek(pb, 0, SEEK_SET) < 0) {
546 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
552 /* guess file format */
553 fmt = av_probe_input_format2(pd, 1, &score);
558 /* if still no format found, error */
564 /* check filename in case an image number is expected */
565 if (fmt->flags & AVFMT_NEEDNUMBER) {
566 if (!av_filename_number_test(filename)) {
567 err = AVERROR_NUMEXPECTED;
571 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
584 /*******************************************************/
586 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
587 AVPacketList **plast_pktl){
588 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
593 (*plast_pktl)->next = pktl;
595 *packet_buffer = pktl;
597 /* add the packet in the buffered packet list */
603 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
609 AVPacketList *pktl = s->raw_packet_buffer;
613 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
614 s->raw_packet_buffer = pktl->next;
621 ret= s->iformat->read_packet(s, pkt);
624 st= s->streams[pkt->stream_index];
626 switch(st->codec->codec_type){
627 case CODEC_TYPE_VIDEO:
628 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
630 case CODEC_TYPE_AUDIO:
631 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
633 case CODEC_TYPE_SUBTITLE:
634 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
638 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
641 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
643 if(st->codec->codec_id == CODEC_ID_PROBE){
644 AVProbeData *pd = &st->probe_data;
646 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
647 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
648 pd->buf_size += pkt->size;
649 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
651 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
652 set_codec_from_probe_data(st, pd, 1);
653 if(st->codec->codec_id != CODEC_ID_PROBE){
662 /**********************************************************/
665 * Get the number of samples of an audio frame. Return -1 on error.
667 static int get_audio_frame_size(AVCodecContext *enc, int size)
671 if(enc->codec_id == CODEC_ID_VORBIS)
674 if (enc->frame_size <= 1) {
675 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
677 if (bits_per_sample) {
678 if (enc->channels == 0)
680 frame_size = (size << 3) / (bits_per_sample * enc->channels);
682 /* used for example by ADPCM codecs */
683 if (enc->bit_rate == 0)
685 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
688 frame_size = enc->frame_size;
695 * Return the frame duration in seconds. Return 0 if not available.
697 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
698 AVCodecParserContext *pc, AVPacket *pkt)
704 switch(st->codec->codec_type) {
705 case CODEC_TYPE_VIDEO:
706 if(st->time_base.num*1000LL > st->time_base.den){
707 *pnum = st->time_base.num;
708 *pden = st->time_base.den;
709 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
710 *pnum = st->codec->time_base.num;
711 *pden = st->codec->time_base.den;
712 if (pc && pc->repeat_pict) {
714 *pnum = (*pnum) * (2 + pc->repeat_pict);
718 case CODEC_TYPE_AUDIO:
719 frame_size = get_audio_frame_size(st->codec, pkt->size);
723 *pden = st->codec->sample_rate;
730 static int is_intra_only(AVCodecContext *enc){
731 if(enc->codec_type == CODEC_TYPE_AUDIO){
733 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
734 switch(enc->codec_id){
736 case CODEC_ID_MJPEGB:
738 case CODEC_ID_RAWVIDEO:
739 case CODEC_ID_DVVIDEO:
740 case CODEC_ID_HUFFYUV:
741 case CODEC_ID_FFVHUFF:
752 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
753 int64_t dts, int64_t pts)
755 AVStream *st= s->streams[stream_index];
756 AVPacketList *pktl= s->packet_buffer;
758 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
761 st->first_dts= dts - st->cur_dts;
764 for(; pktl; pktl= pktl->next){
765 if(pktl->pkt.stream_index != stream_index)
767 //FIXME think more about this check
768 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
769 pktl->pkt.pts += st->first_dts;
771 if(pktl->pkt.dts != AV_NOPTS_VALUE)
772 pktl->pkt.dts += st->first_dts;
774 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
775 st->start_time= pktl->pkt.pts;
777 if (st->start_time == AV_NOPTS_VALUE)
778 st->start_time = pts;
781 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
783 AVPacketList *pktl= s->packet_buffer;
786 if(st->first_dts != AV_NOPTS_VALUE){
787 cur_dts= st->first_dts;
788 for(; pktl; pktl= pktl->next){
789 if(pktl->pkt.stream_index == pkt->stream_index){
790 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
792 cur_dts -= pkt->duration;
795 pktl= s->packet_buffer;
796 st->first_dts = cur_dts;
797 }else if(st->cur_dts)
800 for(; pktl; pktl= pktl->next){
801 if(pktl->pkt.stream_index != pkt->stream_index)
803 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
804 && !pktl->pkt.duration){
805 pktl->pkt.dts= cur_dts;
806 if(!st->codec->has_b_frames)
807 pktl->pkt.pts= cur_dts;
808 cur_dts += pkt->duration;
809 pktl->pkt.duration= pkt->duration;
813 if(st->first_dts == AV_NOPTS_VALUE)
814 st->cur_dts= cur_dts;
817 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
818 AVCodecParserContext *pc, AVPacket *pkt)
820 int num, den, presentation_delayed, delay, i;
823 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
824 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
825 pkt->dts -= 1LL<<st->pts_wrap_bits;
828 if (pkt->duration == 0) {
829 compute_frame_duration(&num, &den, st, pc, pkt);
831 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
833 if(pkt->duration != 0 && s->packet_buffer)
834 update_initial_durations(s, st, pkt);
838 /* correct timestamps with byte offset if demuxers only have timestamps
839 on packet boundaries */
840 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
841 /* this will estimate bitrate based on this frame's duration and size */
842 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
843 if(pkt->pts != AV_NOPTS_VALUE)
845 if(pkt->dts != AV_NOPTS_VALUE)
849 /* do we have a video B-frame ? */
850 delay= st->codec->has_b_frames;
851 presentation_delayed = 0;
852 /* XXX: need has_b_frame, but cannot get it if the codec is
855 pc && pc->pict_type != FF_B_TYPE)
856 presentation_delayed = 1;
857 /* This may be redundant, but it should not hurt. */
858 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
859 presentation_delayed = 1;
861 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
862 /* interpolate PTS and DTS if they are not present */
863 if(delay==0 || (delay==1 && pc)){
864 if (presentation_delayed) {
865 /* DTS = decompression timestamp */
866 /* PTS = presentation timestamp */
867 if (pkt->dts == AV_NOPTS_VALUE)
868 pkt->dts = st->last_IP_pts;
869 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
870 if (pkt->dts == AV_NOPTS_VALUE)
871 pkt->dts = st->cur_dts;
873 /* this is tricky: the dts must be incremented by the duration
874 of the frame we are displaying, i.e. the last I- or P-frame */
875 if (st->last_IP_duration == 0)
876 st->last_IP_duration = pkt->duration;
877 if(pkt->dts != AV_NOPTS_VALUE)
878 st->cur_dts = pkt->dts + st->last_IP_duration;
879 st->last_IP_duration = pkt->duration;
880 st->last_IP_pts= pkt->pts;
881 /* cannot compute PTS if not present (we can compute it only
882 by knowing the future */
883 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
884 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
885 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
886 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
887 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
888 pkt->pts += pkt->duration;
889 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
893 /* presentation is not delayed : PTS and DTS are the same */
894 if(pkt->pts == AV_NOPTS_VALUE)
896 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
897 if(pkt->pts == AV_NOPTS_VALUE)
898 pkt->pts = st->cur_dts;
900 if(pkt->pts != AV_NOPTS_VALUE)
901 st->cur_dts = pkt->pts + pkt->duration;
905 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
906 st->pts_buffer[0]= pkt->pts;
907 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
908 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
909 if(pkt->dts == AV_NOPTS_VALUE)
910 pkt->dts= st->pts_buffer[0];
912 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
914 if(pkt->dts > st->cur_dts)
915 st->cur_dts = pkt->dts;
918 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
921 if(is_intra_only(st->codec))
922 pkt->flags |= PKT_FLAG_KEY;
925 /* keyframe computation */
926 if (pc->pict_type == FF_I_TYPE)
927 pkt->flags |= PKT_FLAG_KEY;
931 void av_destruct_packet_nofree(AVPacket *pkt)
933 pkt->data = NULL; pkt->size = 0;
936 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
944 /* select current input stream component */
947 if (!st->need_parsing || !st->parser) {
948 /* no parsing needed: we just output the packet as is */
949 /* raw data support */
951 compute_pkt_fields(s, st, NULL, pkt);
954 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
955 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
956 s->cur_ptr, s->cur_len,
957 s->cur_pkt.pts, s->cur_pkt.dts);
958 s->cur_pkt.pts = AV_NOPTS_VALUE;
959 s->cur_pkt.dts = AV_NOPTS_VALUE;
960 /* increment read pointer */
964 /* return packet if any */
967 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
969 pkt->stream_index = st->index;
970 pkt->pts = st->parser->pts;
971 pkt->dts = st->parser->dts;
972 pkt->destruct = av_destruct_packet_nofree;
973 compute_pkt_fields(s, st, st->parser, pkt);
975 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
976 ff_reduce_index(s, st->index);
977 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
978 0, 0, AVINDEX_KEYFRAME);
985 av_free_packet(&s->cur_pkt);
989 /* read next packet */
990 ret = av_read_packet(s, &s->cur_pkt);
992 if (ret == AVERROR(EAGAIN))
994 /* return the last frames, if any */
995 for(i = 0; i < s->nb_streams; i++) {
997 if (st->parser && st->need_parsing) {
998 av_parser_parse(st->parser, st->codec,
999 &pkt->data, &pkt->size,
1001 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1006 /* no more packets: really terminate parsing */
1010 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1011 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1012 s->cur_pkt.pts < s->cur_pkt.dts){
1013 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1014 s->cur_pkt.stream_index,
1018 // av_free_packet(&s->cur_pkt);
1022 st = s->streams[s->cur_pkt.stream_index];
1023 if(s->debug & FF_FDEBUG_TS)
1024 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1025 s->cur_pkt.stream_index,
1032 s->cur_ptr = s->cur_pkt.data;
1033 s->cur_len = s->cur_pkt.size;
1034 if (st->need_parsing && !st->parser) {
1035 st->parser = av_parser_init(st->codec->codec_id);
1037 /* no parser available: just output the raw packets */
1038 st->need_parsing = AVSTREAM_PARSE_NONE;
1039 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1040 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1042 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1043 st->parser->next_frame_offset=
1044 st->parser->cur_offset= s->cur_pkt.pos;
1049 if(s->debug & FF_FDEBUG_TS)
1050 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1060 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1064 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1067 pktl = s->packet_buffer;
1069 AVPacket *next_pkt= &pktl->pkt;
1071 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1072 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1073 if( pktl->pkt.stream_index == next_pkt->stream_index
1074 && next_pkt->dts < pktl->pkt.dts
1075 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1076 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1077 next_pkt->pts= pktl->pkt.dts;
1081 pktl = s->packet_buffer;
1084 if( next_pkt->pts != AV_NOPTS_VALUE
1085 || next_pkt->dts == AV_NOPTS_VALUE
1087 /* read packet from packet buffer, if there is data */
1089 s->packet_buffer = pktl->next;
1095 int ret= av_read_frame_internal(s, pkt);
1097 if(pktl && ret != AVERROR(EAGAIN)){
1104 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1105 &s->packet_buffer_end)) < 0)
1106 return AVERROR(ENOMEM);
1108 assert(!s->packet_buffer);
1109 return av_read_frame_internal(s, pkt);
1114 /* XXX: suppress the packet queue */
1115 static void flush_packet_queue(AVFormatContext *s)
1120 pktl = s->packet_buffer;
1123 s->packet_buffer = pktl->next;
1124 av_free_packet(&pktl->pkt);
1129 /*******************************************************/
1132 int av_find_default_stream_index(AVFormatContext *s)
1134 int first_audio_index = -1;
1138 if (s->nb_streams <= 0)
1140 for(i = 0; i < s->nb_streams; i++) {
1142 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1145 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1146 first_audio_index = i;
1148 return first_audio_index >= 0 ? first_audio_index : 0;
1152 * Flush the frame reader.
1154 static void av_read_frame_flush(AVFormatContext *s)
1159 flush_packet_queue(s);
1161 /* free previous packet */
1163 if (s->cur_st->parser)
1164 av_free_packet(&s->cur_pkt);
1171 /* for each stream, reset read state */
1172 for(i = 0; i < s->nb_streams; i++) {
1176 av_parser_close(st->parser);
1179 st->last_IP_pts = AV_NOPTS_VALUE;
1180 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1184 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1187 for(i = 0; i < s->nb_streams; i++) {
1188 AVStream *st = s->streams[i];
1190 st->cur_dts = av_rescale(timestamp,
1191 st->time_base.den * (int64_t)ref_st->time_base.num,
1192 st->time_base.num * (int64_t)ref_st->time_base.den);
1196 void ff_reduce_index(AVFormatContext *s, int stream_index)
1198 AVStream *st= s->streams[stream_index];
1199 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1201 if((unsigned)st->nb_index_entries >= max_entries){
1203 for(i=0; 2*i<st->nb_index_entries; i++)
1204 st->index_entries[i]= st->index_entries[2*i];
1205 st->nb_index_entries= i;
1209 int av_add_index_entry(AVStream *st,
1210 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1212 AVIndexEntry *entries, *ie;
1215 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1218 entries = av_fast_realloc(st->index_entries,
1219 &st->index_entries_allocated_size,
1220 (st->nb_index_entries + 1) *
1221 sizeof(AVIndexEntry));
1225 st->index_entries= entries;
1227 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1230 index= st->nb_index_entries++;
1231 ie= &entries[index];
1232 assert(index==0 || ie[-1].timestamp < timestamp);
1234 ie= &entries[index];
1235 if(ie->timestamp != timestamp){
1236 if(ie->timestamp <= timestamp)
1238 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1239 st->nb_index_entries++;
1240 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1241 distance= ie->min_distance;
1245 ie->timestamp = timestamp;
1246 ie->min_distance= distance;
1253 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1256 AVIndexEntry *entries= st->index_entries;
1257 int nb_entries= st->nb_index_entries;
1266 timestamp = entries[m].timestamp;
1267 if(timestamp >= wanted_timestamp)
1269 if(timestamp <= wanted_timestamp)
1272 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1274 if(!(flags & AVSEEK_FLAG_ANY)){
1275 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1276 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1287 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1288 AVInputFormat *avif= s->iformat;
1289 int64_t pos_min, pos_max, pos, pos_limit;
1290 int64_t ts_min, ts_max, ts;
1294 if (stream_index < 0)
1298 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1302 ts_min= AV_NOPTS_VALUE;
1303 pos_limit= -1; //gcc falsely says it may be uninitialized
1305 st= s->streams[stream_index];
1306 if(st->index_entries){
1309 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1310 index= FFMAX(index, 0);
1311 e= &st->index_entries[index];
1313 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1315 ts_min= e->timestamp;
1317 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1324 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1325 assert(index < st->nb_index_entries);
1327 e= &st->index_entries[index];
1328 assert(e->timestamp >= target_ts);
1330 ts_max= e->timestamp;
1331 pos_limit= pos_max - e->min_distance;
1333 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1334 pos_max,pos_limit, ts_max);
1339 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1344 url_fseek(s->pb, pos, SEEK_SET);
1346 av_update_cur_dts(s, st, ts);
1351 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1353 int64_t start_pos, filesize;
1357 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1360 if(ts_min == AV_NOPTS_VALUE){
1361 pos_min = s->data_offset;
1362 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1363 if (ts_min == AV_NOPTS_VALUE)
1367 if(ts_max == AV_NOPTS_VALUE){
1369 filesize = url_fsize(s->pb);
1370 pos_max = filesize - 1;
1373 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1375 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1376 if (ts_max == AV_NOPTS_VALUE)
1380 int64_t tmp_pos= pos_max + 1;
1381 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1382 if(tmp_ts == AV_NOPTS_VALUE)
1386 if(tmp_pos >= filesize)
1392 if(ts_min > ts_max){
1394 }else if(ts_min == ts_max){
1399 while (pos_min < pos_limit) {
1401 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1405 assert(pos_limit <= pos_max);
1408 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1409 // interpolate position (better than dichotomy)
1410 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1411 + pos_min - approximate_keyframe_distance;
1412 }else if(no_change==1){
1413 // bisection, if interpolation failed to change min or max pos last time
1414 pos = (pos_min + pos_limit)>>1;
1416 /* linear search if bisection failed, can only happen if there
1417 are very few or no keyframes between min/max */
1422 else if(pos > pos_limit)
1426 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1432 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1434 if(ts == AV_NOPTS_VALUE){
1435 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1438 assert(ts != AV_NOPTS_VALUE);
1439 if (target_ts <= ts) {
1440 pos_limit = start_pos - 1;
1444 if (target_ts >= ts) {
1450 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1451 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1454 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1456 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1457 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1458 pos, ts_min, target_ts, ts_max);
1464 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1465 int64_t pos_min, pos_max;
1469 if (stream_index < 0)
1472 st= s->streams[stream_index];
1475 pos_min = s->data_offset;
1476 pos_max = url_fsize(s->pb) - 1;
1478 if (pos < pos_min) pos= pos_min;
1479 else if(pos > pos_max) pos= pos_max;
1481 url_fseek(s->pb, pos, SEEK_SET);
1484 av_update_cur_dts(s, st, ts);
1489 static int av_seek_frame_generic(AVFormatContext *s,
1490 int stream_index, int64_t timestamp, int flags)
1496 st = s->streams[stream_index];
1498 index = av_index_search_timestamp(st, timestamp, flags);
1500 if(index < 0 || index==st->nb_index_entries-1){
1504 if(st->nb_index_entries){
1505 assert(st->index_entries);
1506 ie= &st->index_entries[st->nb_index_entries-1];
1507 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1509 av_update_cur_dts(s, st, ie->timestamp);
1511 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1515 int ret = av_read_frame(s, &pkt);
1518 av_free_packet(&pkt);
1519 if(stream_index == pkt.stream_index){
1520 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1524 index = av_index_search_timestamp(st, timestamp, flags);
1529 av_read_frame_flush(s);
1530 if (s->iformat->read_seek){
1531 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1534 ie = &st->index_entries[index];
1535 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1537 av_update_cur_dts(s, st, ie->timestamp);
1542 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1547 av_read_frame_flush(s);
1549 if(flags & AVSEEK_FLAG_BYTE)
1550 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1552 if(stream_index < 0){
1553 stream_index= av_find_default_stream_index(s);
1554 if(stream_index < 0)
1557 st= s->streams[stream_index];
1558 /* timestamp for default must be expressed in AV_TIME_BASE units */
1559 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1562 /* first, we try the format specific seek */
1563 if (s->iformat->read_seek)
1564 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1571 if(s->iformat->read_timestamp)
1572 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1574 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1577 /*******************************************************/
1580 * Returns TRUE if the stream has accurate duration in any stream.
1582 * @return TRUE if the stream has accurate duration for at least one component.
1584 static int av_has_duration(AVFormatContext *ic)
1589 for(i = 0;i < ic->nb_streams; i++) {
1590 st = ic->streams[i];
1591 if (st->duration != AV_NOPTS_VALUE)
1598 * Estimate the stream timings from the one of each components.
1600 * Also computes the global bitrate if possible.
1602 static void av_update_stream_timings(AVFormatContext *ic)
1604 int64_t start_time, start_time1, end_time, end_time1;
1605 int64_t duration, duration1;
1609 start_time = INT64_MAX;
1610 end_time = INT64_MIN;
1611 duration = INT64_MIN;
1612 for(i = 0;i < ic->nb_streams; i++) {
1613 st = ic->streams[i];
1614 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1615 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1616 if (start_time1 < start_time)
1617 start_time = start_time1;
1618 if (st->duration != AV_NOPTS_VALUE) {
1619 end_time1 = start_time1
1620 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1621 if (end_time1 > end_time)
1622 end_time = end_time1;
1625 if (st->duration != AV_NOPTS_VALUE) {
1626 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1627 if (duration1 > duration)
1628 duration = duration1;
1631 if (start_time != INT64_MAX) {
1632 ic->start_time = start_time;
1633 if (end_time != INT64_MIN) {
1634 if (end_time - start_time > duration)
1635 duration = end_time - start_time;
1638 if (duration != INT64_MIN) {
1639 ic->duration = duration;
1640 if (ic->file_size > 0) {
1641 /* compute the bitrate */
1642 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1643 (double)ic->duration;
1648 static void fill_all_stream_timings(AVFormatContext *ic)
1653 av_update_stream_timings(ic);
1654 for(i = 0;i < ic->nb_streams; i++) {
1655 st = ic->streams[i];
1656 if (st->start_time == AV_NOPTS_VALUE) {
1657 if(ic->start_time != AV_NOPTS_VALUE)
1658 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1659 if(ic->duration != AV_NOPTS_VALUE)
1660 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1665 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1667 int64_t filesize, duration;
1671 /* if bit_rate is already set, we believe it */
1672 if (ic->bit_rate == 0) {
1674 for(i=0;i<ic->nb_streams;i++) {
1675 st = ic->streams[i];
1676 bit_rate += st->codec->bit_rate;
1678 ic->bit_rate = bit_rate;
1681 /* if duration is already set, we believe it */
1682 if (ic->duration == AV_NOPTS_VALUE &&
1683 ic->bit_rate != 0 &&
1684 ic->file_size != 0) {
1685 filesize = ic->file_size;
1687 for(i = 0; i < ic->nb_streams; i++) {
1688 st = ic->streams[i];
1689 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1690 if (st->duration == AV_NOPTS_VALUE)
1691 st->duration = duration;
1697 #define DURATION_MAX_READ_SIZE 250000
1699 /* only usable for MPEG-PS streams */
1700 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1702 AVPacket pkt1, *pkt = &pkt1;
1704 int read_size, i, ret;
1706 int64_t filesize, offset, duration;
1708 /* free previous packet */
1709 if (ic->cur_st && ic->cur_st->parser)
1710 av_free_packet(&ic->cur_pkt);
1713 /* flush packet queue */
1714 flush_packet_queue(ic);
1716 for(i=0;i<ic->nb_streams;i++) {
1717 st = ic->streams[i];
1719 av_parser_close(st->parser);
1724 /* we read the first packets to get the first PTS (not fully
1725 accurate, but it is enough now) */
1726 url_fseek(ic->pb, 0, SEEK_SET);
1729 if (read_size >= DURATION_MAX_READ_SIZE)
1731 /* if all info is available, we can stop */
1732 for(i = 0;i < ic->nb_streams; i++) {
1733 st = ic->streams[i];
1734 if (st->start_time == AV_NOPTS_VALUE)
1737 if (i == ic->nb_streams)
1740 ret = av_read_packet(ic, pkt);
1743 read_size += pkt->size;
1744 st = ic->streams[pkt->stream_index];
1745 if (pkt->pts != AV_NOPTS_VALUE) {
1746 if (st->start_time == AV_NOPTS_VALUE)
1747 st->start_time = pkt->pts;
1749 av_free_packet(pkt);
1752 /* estimate the end time (duration) */
1753 /* XXX: may need to support wrapping */
1754 filesize = ic->file_size;
1755 offset = filesize - DURATION_MAX_READ_SIZE;
1759 url_fseek(ic->pb, offset, SEEK_SET);
1762 if (read_size >= DURATION_MAX_READ_SIZE)
1765 ret = av_read_packet(ic, pkt);
1768 read_size += pkt->size;
1769 st = ic->streams[pkt->stream_index];
1770 if (pkt->pts != AV_NOPTS_VALUE &&
1771 st->start_time != AV_NOPTS_VALUE) {
1772 end_time = pkt->pts;
1773 duration = end_time - st->start_time;
1775 if (st->duration == AV_NOPTS_VALUE ||
1776 st->duration < duration)
1777 st->duration = duration;
1780 av_free_packet(pkt);
1783 fill_all_stream_timings(ic);
1785 url_fseek(ic->pb, old_offset, SEEK_SET);
1786 for(i=0; i<ic->nb_streams; i++){
1788 st->cur_dts= st->first_dts;
1789 st->last_IP_pts = AV_NOPTS_VALUE;
1793 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1797 /* get the file size, if possible */
1798 if (ic->iformat->flags & AVFMT_NOFILE) {
1801 file_size = url_fsize(ic->pb);
1805 ic->file_size = file_size;
1807 if ((!strcmp(ic->iformat->name, "mpeg") ||
1808 !strcmp(ic->iformat->name, "mpegts")) &&
1809 file_size && !url_is_streamed(ic->pb)) {
1810 /* get accurate estimate from the PTSes */
1811 av_estimate_timings_from_pts(ic, old_offset);
1812 } else if (av_has_duration(ic)) {
1813 /* at least one component has timings - we use them for all
1815 fill_all_stream_timings(ic);
1817 /* less precise: use bitrate info */
1818 av_estimate_timings_from_bit_rate(ic);
1820 av_update_stream_timings(ic);
1826 for(i = 0;i < ic->nb_streams; i++) {
1827 st = ic->streams[i];
1828 printf("%d: start_time: %0.3f duration: %0.3f\n",
1829 i, (double)st->start_time / AV_TIME_BASE,
1830 (double)st->duration / AV_TIME_BASE);
1832 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1833 (double)ic->start_time / AV_TIME_BASE,
1834 (double)ic->duration / AV_TIME_BASE,
1835 ic->bit_rate / 1000);
1840 static int has_codec_parameters(AVCodecContext *enc)
1843 switch(enc->codec_type) {
1844 case CODEC_TYPE_AUDIO:
1845 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1846 if(!enc->frame_size &&
1847 (enc->codec_id == CODEC_ID_VORBIS ||
1848 enc->codec_id == CODEC_ID_AAC))
1851 case CODEC_TYPE_VIDEO:
1852 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1858 return enc->codec_id != CODEC_ID_NONE && val != 0;
1861 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1865 int got_picture, data_size, ret=0;
1868 if(!st->codec->codec){
1869 codec = avcodec_find_decoder(st->codec->codec_id);
1872 ret = avcodec_open(st->codec, codec);
1877 if(!has_codec_parameters(st->codec)){
1878 switch(st->codec->codec_type) {
1879 case CODEC_TYPE_VIDEO:
1880 ret = avcodec_decode_video(st->codec, &picture,
1881 &got_picture, data, size);
1883 case CODEC_TYPE_AUDIO:
1884 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1885 samples = av_malloc(data_size);
1888 ret = avcodec_decode_audio2(st->codec, samples,
1889 &data_size, data, size);
1900 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1902 while (tags->id != CODEC_ID_NONE) {
1910 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1913 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1914 if(tag == tags[i].tag)
1917 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1918 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1919 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1920 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1921 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1924 return CODEC_ID_NONE;
1927 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1930 for(i=0; tags && tags[i]; i++){
1931 int tag= codec_get_tag(tags[i], id);
1937 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1940 for(i=0; tags && tags[i]; i++){
1941 enum CodecID id= codec_get_id(tags[i], tag);
1942 if(id!=CODEC_ID_NONE) return id;
1944 return CODEC_ID_NONE;
1947 static void compute_chapters_end(AVFormatContext *s)
1951 for (i=0; i+1<s->nb_chapters; i++)
1952 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1953 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1954 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1955 s->chapters[i]->end = s->chapters[i+1]->start;
1958 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1959 assert(s->start_time != AV_NOPTS_VALUE);
1960 assert(s->duration > 0);
1961 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1963 s->chapters[i]->time_base);
1967 /* absolute maximum size we read until we abort */
1968 #define MAX_READ_SIZE 5000000
1970 #define MAX_STD_TIMEBASES (60*12+5)
1971 static int get_std_framerate(int i){
1972 if(i<60*12) return i*1001;
1973 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1977 * Is the time base unreliable.
1978 * This is a heuristic to balance between quick acceptance of the values in
1979 * the headers vs. some extra checks.
1980 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1981 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1982 * And there are "variable" fps files this needs to detect as well.
1984 static int tb_unreliable(AVCodecContext *c){
1985 if( c->time_base.den >= 101L*c->time_base.num
1986 || c->time_base.den < 5L*c->time_base.num
1987 /* || c->codec_tag == ff_get_fourcc("DIVX")
1988 || c->codec_tag == ff_get_fourcc("XVID")*/
1989 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1994 int av_find_stream_info(AVFormatContext *ic)
1996 int i, count, ret, read_size, j;
1998 AVPacket pkt1, *pkt;
1999 int64_t last_dts[MAX_STREAMS];
2000 int duration_count[MAX_STREAMS]={0};
2001 double (*duration_error)[MAX_STD_TIMEBASES];
2002 offset_t old_offset = url_ftell(ic->pb);
2003 int64_t codec_info_duration[MAX_STREAMS]={0};
2004 int codec_info_nb_frames[MAX_STREAMS]={0};
2006 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2007 if (!duration_error) return AVERROR(ENOMEM);
2009 for(i=0;i<ic->nb_streams;i++) {
2010 st = ic->streams[i];
2011 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2012 /* if(!st->time_base.num)
2014 if(!st->codec->time_base.num)
2015 st->codec->time_base= st->time_base;
2017 //only for the split stuff
2019 st->parser = av_parser_init(st->codec->codec_id);
2020 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2021 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2026 for(i=0;i<MAX_STREAMS;i++){
2027 last_dts[i]= AV_NOPTS_VALUE;
2033 /* check if one codec still needs to be handled */
2034 for(i=0;i<ic->nb_streams;i++) {
2035 st = ic->streams[i];
2036 if (!has_codec_parameters(st->codec))
2038 /* variable fps and no guess at the real fps */
2039 if( tb_unreliable(st->codec)
2040 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2042 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2044 if(st->first_dts == AV_NOPTS_VALUE)
2047 if (i == ic->nb_streams) {
2048 /* NOTE: if the format has no header, then we need to read
2049 some packets to get most of the streams, so we cannot
2051 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2052 /* if we found the info for all the codecs, we can stop */
2057 /* we did not get all the codec info, but we read too much data */
2058 if (read_size >= MAX_READ_SIZE) {
2063 /* NOTE: a new stream can be added there if no header in file
2064 (AVFMTCTX_NOHEADER) */
2065 ret = av_read_frame_internal(ic, &pkt1);
2068 ret = -1; /* we could not have all the codec parameters before EOF */
2069 for(i=0;i<ic->nb_streams;i++) {
2070 st = ic->streams[i];
2071 if (!has_codec_parameters(st->codec)){
2073 avcodec_string(buf, sizeof(buf), st->codec, 0);
2074 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2082 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2083 if(av_dup_packet(pkt) < 0) {
2084 av_free(duration_error);
2085 return AVERROR(ENOMEM);
2088 read_size += pkt->size;
2090 st = ic->streams[pkt->stream_index];
2091 if(codec_info_nb_frames[st->index]>1)
2092 codec_info_duration[st->index] += pkt->duration;
2093 if (pkt->duration != 0)
2094 codec_info_nb_frames[st->index]++;
2097 int index= pkt->stream_index;
2098 int64_t last= last_dts[index];
2099 int64_t duration= pkt->dts - last;
2101 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2102 double dur= duration * av_q2d(st->time_base);
2104 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2105 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2106 if(duration_count[index] < 2)
2107 memset(duration_error[index], 0, sizeof(*duration_error));
2108 for(i=1; i<MAX_STD_TIMEBASES; i++){
2109 int framerate= get_std_framerate(i);
2110 int ticks= lrintf(dur*framerate/(1001*12));
2111 double error= dur - ticks*1001*12/(double)framerate;
2112 duration_error[index][i] += error*error;
2114 duration_count[index]++;
2116 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2117 last_dts[pkt->stream_index]= pkt->dts;
2119 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2120 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2122 st->codec->extradata_size= i;
2123 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2124 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2125 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2129 /* if still no information, we try to open the codec and to
2130 decompress the frame. We try to avoid that in most cases as
2131 it takes longer and uses more memory. For MPEG-4, we need to
2132 decompress for QuickTime. */
2133 if (!has_codec_parameters(st->codec) /*&&
2134 (st->codec->codec_id == CODEC_ID_FLV1 ||
2135 st->codec->codec_id == CODEC_ID_H264 ||
2136 st->codec->codec_id == CODEC_ID_H263 ||
2137 st->codec->codec_id == CODEC_ID_H261 ||
2138 st->codec->codec_id == CODEC_ID_VORBIS ||
2139 st->codec->codec_id == CODEC_ID_MJPEG ||
2140 st->codec->codec_id == CODEC_ID_PNG ||
2141 st->codec->codec_id == CODEC_ID_PAM ||
2142 st->codec->codec_id == CODEC_ID_PGM ||
2143 st->codec->codec_id == CODEC_ID_PGMYUV ||
2144 st->codec->codec_id == CODEC_ID_PBM ||
2145 st->codec->codec_id == CODEC_ID_PPM ||
2146 st->codec->codec_id == CODEC_ID_SHORTEN ||
2147 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2148 try_decode_frame(st, pkt->data, pkt->size);
2150 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2156 // close codecs which were opened in try_decode_frame()
2157 for(i=0;i<ic->nb_streams;i++) {
2158 st = ic->streams[i];
2159 if(st->codec->codec)
2160 avcodec_close(st->codec);
2162 for(i=0;i<ic->nb_streams;i++) {
2163 st = ic->streams[i];
2164 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2165 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2166 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2168 if(duration_count[i]
2169 && tb_unreliable(st->codec) /*&&
2170 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2171 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2172 double best_error= 2*av_q2d(st->time_base);
2173 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2175 for(j=1; j<MAX_STD_TIMEBASES; j++){
2176 double error= duration_error[i][j] * get_std_framerate(j);
2177 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2178 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2179 if(error < best_error){
2181 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2186 if (!st->r_frame_rate.num){
2187 if( st->codec->time_base.den * (int64_t)st->time_base.num
2188 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2189 st->r_frame_rate.num = st->codec->time_base.den;
2190 st->r_frame_rate.den = st->codec->time_base.num;
2192 st->r_frame_rate.num = st->time_base.den;
2193 st->r_frame_rate.den = st->time_base.num;
2196 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2197 if(!st->codec->bits_per_sample)
2198 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2202 av_estimate_timings(ic, old_offset);
2204 compute_chapters_end(ic);
2207 /* correct DTS for B-frame streams with no timestamps */
2208 for(i=0;i<ic->nb_streams;i++) {
2209 st = ic->streams[i];
2210 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2212 ppktl = &ic->packet_buffer;
2214 if(ppkt1->stream_index != i)
2216 if(ppkt1->pkt->dts < 0)
2218 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2220 ppkt1->pkt->dts -= delta;
2225 st->cur_dts -= delta;
2231 av_free(duration_error);
2236 /*******************************************************/
2238 int av_read_play(AVFormatContext *s)
2240 if (s->iformat->read_play)
2241 return s->iformat->read_play(s);
2243 return av_url_read_fpause(s->pb, 0);
2244 return AVERROR(ENOSYS);
2247 int av_read_pause(AVFormatContext *s)
2249 if (s->iformat->read_pause)
2250 return s->iformat->read_pause(s);
2252 return av_url_read_fpause(s->pb, 1);
2253 return AVERROR(ENOSYS);
2256 void av_close_input_stream(AVFormatContext *s)
2261 /* free previous packet */
2262 if (s->cur_st && s->cur_st->parser)
2263 av_free_packet(&s->cur_pkt);
2265 if (s->iformat->read_close)
2266 s->iformat->read_close(s);
2267 for(i=0;i<s->nb_streams;i++) {
2268 /* free all data in a stream component */
2271 av_parser_close(st->parser);
2273 av_free(st->index_entries);
2274 av_free(st->codec->extradata);
2276 av_free(st->filename);
2277 av_free(st->priv_data);
2280 for(i=s->nb_programs-1; i>=0; i--) {
2281 av_freep(&s->programs[i]->provider_name);
2282 av_freep(&s->programs[i]->name);
2283 av_freep(&s->programs[i]->stream_index);
2284 av_freep(&s->programs[i]);
2286 av_freep(&s->programs);
2287 flush_packet_queue(s);
2288 av_freep(&s->priv_data);
2289 while(s->nb_chapters--) {
2290 av_free(s->chapters[s->nb_chapters]->title);
2291 av_free(s->chapters[s->nb_chapters]);
2293 av_freep(&s->chapters);
2297 void av_close_input_file(AVFormatContext *s)
2299 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2300 av_close_input_stream(s);
2305 AVStream *av_new_stream(AVFormatContext *s, int id)
2310 if (s->nb_streams >= MAX_STREAMS)
2313 st = av_mallocz(sizeof(AVStream));
2317 st->codec= avcodec_alloc_context();
2319 /* no default bitrate if decoding */
2320 st->codec->bit_rate = 0;
2322 st->index = s->nb_streams;
2324 st->start_time = AV_NOPTS_VALUE;
2325 st->duration = AV_NOPTS_VALUE;
2326 /* we set the current DTS to 0 so that formats without any timestamps
2327 but durations get some timestamps, formats with some unknown
2328 timestamps have their first few packets buffered and the
2329 timestamps corrected before they are returned to the user */
2331 st->first_dts = AV_NOPTS_VALUE;
2333 /* default pts setting is MPEG-like */
2334 av_set_pts_info(st, 33, 1, 90000);
2335 st->last_IP_pts = AV_NOPTS_VALUE;
2336 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2337 st->pts_buffer[i]= AV_NOPTS_VALUE;
2339 st->sample_aspect_ratio = (AVRational){0,1};
2341 s->streams[s->nb_streams++] = st;
2345 AVProgram *av_new_program(AVFormatContext *ac, int id)
2347 AVProgram *program=NULL;
2351 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2354 for(i=0; i<ac->nb_programs; i++)
2355 if(ac->programs[i]->id == id)
2356 program = ac->programs[i];
2359 program = av_mallocz(sizeof(AVProgram));
2362 dynarray_add(&ac->programs, &ac->nb_programs, program);
2363 program->discard = AVDISCARD_NONE;
2370 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2372 assert(!provider_name == !name);
2374 av_free(program->provider_name);
2375 av_free(program-> name);
2376 program->provider_name = av_strdup(provider_name);
2377 program-> name = av_strdup( name);
2381 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2383 AVChapter *chapter = NULL;
2386 for(i=0; i<s->nb_chapters; i++)
2387 if(s->chapters[i]->id == id)
2388 chapter = s->chapters[i];
2391 chapter= av_mallocz(sizeof(AVChapter));
2394 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2396 av_free(chapter->title);
2397 chapter->title = av_strdup(title);
2399 chapter->time_base= time_base;
2400 chapter->start = start;
2406 /************************************************************/
2407 /* output media file */
2409 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2413 if (s->oformat->priv_data_size > 0) {
2414 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2416 return AVERROR(ENOMEM);
2418 s->priv_data = NULL;
2420 if (s->oformat->set_parameters) {
2421 ret = s->oformat->set_parameters(s, ap);
2428 int av_write_header(AVFormatContext *s)
2433 // some sanity checks
2434 for(i=0;i<s->nb_streams;i++) {
2437 switch (st->codec->codec_type) {
2438 case CODEC_TYPE_AUDIO:
2439 if(st->codec->sample_rate<=0){
2440 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2444 case CODEC_TYPE_VIDEO:
2445 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2446 av_log(s, AV_LOG_ERROR, "time base not set\n");
2449 if(st->codec->width<=0 || st->codec->height<=0){
2450 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2456 if(s->oformat->codec_tag){
2457 if(st->codec->codec_tag){
2459 //check that tag + id is in the table
2460 //if neither is in the table -> OK
2461 //if tag is in the table with another id -> FAIL
2462 //if id is in the table with another tag -> FAIL unless strict < ?
2464 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2468 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2469 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2471 return AVERROR(ENOMEM);
2474 if(s->oformat->write_header){
2475 ret = s->oformat->write_header(s);
2480 /* init PTS generation */
2481 for(i=0;i<s->nb_streams;i++) {
2482 int64_t den = AV_NOPTS_VALUE;
2485 switch (st->codec->codec_type) {
2486 case CODEC_TYPE_AUDIO:
2487 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2489 case CODEC_TYPE_VIDEO:
2490 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2495 if (den != AV_NOPTS_VALUE) {
2497 return AVERROR_INVALIDDATA;
2498 av_frac_init(&st->pts, 0, 0, den);
2504 //FIXME merge with compute_pkt_fields
2505 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2506 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2507 int num, den, frame_size, i;
2509 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2511 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2514 /* duration field */
2515 if (pkt->duration == 0) {
2516 compute_frame_duration(&num, &den, st, NULL, pkt);
2518 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2522 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2525 //XXX/FIXME this is a temporary hack until all encoders output pts
2526 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2528 // pkt->pts= st->cur_dts;
2529 pkt->pts= st->pts.val;
2532 //calculate dts from pts
2533 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2534 st->pts_buffer[0]= pkt->pts;
2535 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2536 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2537 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2538 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2540 pkt->dts= st->pts_buffer[0];
2543 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2544 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2547 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2548 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2552 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2553 st->cur_dts= pkt->dts;
2554 st->pts.val= pkt->dts;
2557 switch (st->codec->codec_type) {
2558 case CODEC_TYPE_AUDIO:
2559 frame_size = get_audio_frame_size(st->codec, pkt->size);
2561 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2562 likely equal to the encoder delay, but it would be better if we
2563 had the real timestamps from the encoder */
2564 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2565 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2568 case CODEC_TYPE_VIDEO:
2569 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2577 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2579 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2581 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2584 ret= s->oformat->write_packet(s, pkt);
2586 ret= url_ferror(s->pb);
2590 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2591 AVPacketList *pktl, **next_point, *this_pktl;
2593 int streams[MAX_STREAMS];
2596 AVStream *st= s->streams[ pkt->stream_index];
2598 // assert(pkt->destruct != av_destruct_packet); //FIXME
2600 this_pktl = av_mallocz(sizeof(AVPacketList));
2601 this_pktl->pkt= *pkt;
2602 if(pkt->destruct == av_destruct_packet)
2603 pkt->destruct= NULL; // not shared -> must keep original from being freed
2605 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2607 next_point = &s->packet_buffer;
2609 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2610 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2611 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2612 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2614 next_point= &(*next_point)->next;
2616 this_pktl->next= *next_point;
2617 *next_point= this_pktl;
2620 memset(streams, 0, sizeof(streams));
2621 pktl= s->packet_buffer;
2623 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2624 if(streams[ pktl->pkt.stream_index ] == 0)
2626 streams[ pktl->pkt.stream_index ]++;
2630 if(stream_count && (s->nb_streams == stream_count || flush)){
2631 pktl= s->packet_buffer;
2634 s->packet_buffer= pktl->next;
2638 av_init_packet(out);
2644 * Interleaves an AVPacket correctly so it can be muxed.
2645 * @param out the interleaved packet will be output here
2646 * @param in the input packet
2647 * @param flush 1 if no further packets are available as input and all
2648 * remaining packets should be output
2649 * @return 1 if a packet was output, 0 if no packet could be output,
2650 * < 0 if an error occurred
2652 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2653 if(s->oformat->interleave_packet)
2654 return s->oformat->interleave_packet(s, out, in, flush);
2656 return av_interleave_packet_per_dts(s, out, in, flush);
2659 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2660 AVStream *st= s->streams[ pkt->stream_index];
2662 //FIXME/XXX/HACK drop zero sized packets
2663 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2666 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2667 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2670 if(pkt->dts == AV_NOPTS_VALUE)
2675 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2676 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2679 ret= s->oformat->write_packet(s, &opkt);
2681 av_free_packet(&opkt);
2686 if(url_ferror(s->pb))
2687 return url_ferror(s->pb);
2691 int av_write_trailer(AVFormatContext *s)
2697 ret= av_interleave_packet(s, &pkt, NULL, 1);
2698 if(ret<0) //FIXME cleanup needed for ret<0 ?
2703 ret= s->oformat->write_packet(s, &pkt);
2705 av_free_packet(&pkt);
2709 if(url_ferror(s->pb))
2713 if(s->oformat->write_trailer)
2714 ret = s->oformat->write_trailer(s);
2717 ret=url_ferror(s->pb);
2718 for(i=0;i<s->nb_streams;i++)
2719 av_freep(&s->streams[i]->priv_data);
2720 av_freep(&s->priv_data);
2724 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2727 AVProgram *program=NULL;
2730 for(i=0; i<ac->nb_programs; i++){
2731 if(ac->programs[i]->id != progid)
2733 program = ac->programs[i];
2734 for(j=0; j<program->nb_stream_indexes; j++)
2735 if(program->stream_index[j] == idx)
2738 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2741 program->stream_index = tmp;
2742 program->stream_index[program->nb_stream_indexes++] = idx;
2747 /* "user interface" functions */
2748 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2751 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2752 AVStream *st = ic->streams[i];
2753 int g = ff_gcd(st->time_base.num, st->time_base.den);
2754 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2755 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2756 /* the pid is an important information, so we display it */
2757 /* XXX: add a generic system */
2758 if (flags & AVFMT_SHOW_IDS)
2759 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2760 if (strlen(st->language) > 0)
2761 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2762 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2763 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2764 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2765 if(st->r_frame_rate.den && st->r_frame_rate.num)
2766 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2767 /* else if(st->time_base.den && st->time_base.num)
2768 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2770 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2772 av_log(NULL, AV_LOG_INFO, "\n");
2775 void dump_format(AVFormatContext *ic,
2782 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2783 is_output ? "Output" : "Input",
2785 is_output ? ic->oformat->name : ic->iformat->name,
2786 is_output ? "to" : "from", url);
2788 av_log(NULL, AV_LOG_INFO, " Duration: ");
2789 if (ic->duration != AV_NOPTS_VALUE) {
2790 int hours, mins, secs, us;
2791 secs = ic->duration / AV_TIME_BASE;
2792 us = ic->duration % AV_TIME_BASE;
2797 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2798 (100 * us) / AV_TIME_BASE);
2800 av_log(NULL, AV_LOG_INFO, "N/A");
2802 if (ic->start_time != AV_NOPTS_VALUE) {
2804 av_log(NULL, AV_LOG_INFO, ", start: ");
2805 secs = ic->start_time / AV_TIME_BASE;
2806 us = ic->start_time % AV_TIME_BASE;
2807 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2808 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2810 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2812 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2814 av_log(NULL, AV_LOG_INFO, "N/A");
2816 av_log(NULL, AV_LOG_INFO, "\n");
2818 if(ic->nb_programs) {
2820 for(j=0; j<ic->nb_programs; j++) {
2821 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2822 ic->programs[j]->name ? ic->programs[j]->name : "");
2823 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2824 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2827 for(i=0;i<ic->nb_streams;i++)
2828 dump_stream_format(ic, i, index, is_output);
2831 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2833 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2836 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2838 AVRational frame_rate;
2839 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2840 *frame_rate_num= frame_rate.num;
2841 *frame_rate_den= frame_rate.den;
2846 * Gets the current time in microseconds.
2848 int64_t av_gettime(void)
2851 gettimeofday(&tv,NULL);
2852 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2855 int64_t parse_date(const char *datestr, int duration)
2861 static const char * const date_fmt[] = {
2865 static const char * const time_fmt[] = {
2875 time_t now = time(0);
2877 len = strlen(datestr);
2879 lastch = datestr[len - 1];
2882 is_utc = (lastch == 'z' || lastch == 'Z');
2884 memset(&dt, 0, sizeof(dt));
2889 /* parse the year-month-day part */
2890 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2891 q = small_strptime(p, date_fmt[i], &dt);
2897 /* if the year-month-day part is missing, then take the
2898 * current year-month-day time */
2903 dt = *localtime(&now);
2905 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2910 if (*p == 'T' || *p == 't' || *p == ' ')
2913 /* parse the hour-minute-second part */
2914 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2915 q = small_strptime(p, time_fmt[i], &dt);
2921 /* parse datestr as a duration */
2926 /* parse datestr as HH:MM:SS */
2927 q = small_strptime(p, time_fmt[0], &dt);
2929 /* parse datestr as S+ */
2930 dt.tm_sec = strtol(p, (char **)&q, 10);
2932 /* the parsing didn't succeed */
2939 /* Now we have all the fields that we can get */
2945 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2947 dt.tm_isdst = -1; /* unknown */
2957 /* parse the .m... part */
2961 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2964 val += n * (*q - '0');
2968 return negative ? -t : t;
2971 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2981 while (*p != '\0' && *p != '=' && *p != '&') {
2982 if ((q - tag) < sizeof(tag) - 1)
2990 while (*p != '&' && *p != '\0') {
2991 if ((q - arg) < arg_size - 1) {
3001 if (!strcmp(tag, tag1))
3010 int av_get_frame_filename(char *buf, int buf_size,
3011 const char *path, int number)
3014 char *q, buf1[20], c;
3015 int nd, len, percentd_found;
3027 while (isdigit(*p)) {
3028 nd = nd * 10 + *p++ - '0';
3031 } while (isdigit(c));
3040 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3042 if ((q - buf + len) > buf_size - 1)
3044 memcpy(q, buf1, len);
3052 if ((q - buf) < buf_size - 1)
3056 if (!percentd_found)
3065 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3068 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3070 for(i=0;i<size;i+=16) {
3077 PRINT(" %02x", buf[i+j]);
3082 for(j=0;j<len;j++) {
3084 if (c < ' ' || c > '~')
3093 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3095 hex_dump_internal(NULL, f, 0, buf, size);
3098 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3100 hex_dump_internal(avcl, NULL, level, buf, size);
3103 //FIXME needs to know the time_base
3104 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3106 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3107 PRINT("stream #%d:\n", pkt->stream_index);
3108 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3109 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3110 /* DTS is _always_ valid after av_read_frame() */
3112 if (pkt->dts == AV_NOPTS_VALUE)
3115 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3116 /* PTS may not be known if B-frames are present. */
3118 if (pkt->pts == AV_NOPTS_VALUE)
3121 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3123 PRINT(" size=%d\n", pkt->size);
3126 av_hex_dump(f, pkt->data, pkt->size);
3129 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3131 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3134 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3136 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3139 void url_split(char *proto, int proto_size,
3140 char *authorization, int authorization_size,
3141 char *hostname, int hostname_size,
3143 char *path, int path_size,
3146 const char *p, *ls, *at, *col, *brk;
3148 if (port_ptr) *port_ptr = -1;
3149 if (proto_size > 0) proto[0] = 0;
3150 if (authorization_size > 0) authorization[0] = 0;
3151 if (hostname_size > 0) hostname[0] = 0;
3152 if (path_size > 0) path[0] = 0;
3154 /* parse protocol */
3155 if ((p = strchr(url, ':'))) {
3156 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3161 /* no protocol means plain filename */
3162 av_strlcpy(path, url, path_size);
3166 /* separate path from hostname */
3167 ls = strchr(p, '/');
3169 ls = strchr(p, '?');
3171 av_strlcpy(path, ls, path_size);
3173 ls = &p[strlen(p)]; // XXX
3175 /* the rest is hostname, use that to parse auth/port */
3177 /* authorization (user[:pass]@hostname) */
3178 if ((at = strchr(p, '@')) && at < ls) {
3179 av_strlcpy(authorization, p,
3180 FFMIN(authorization_size, at + 1 - p));
3181 p = at + 1; /* skip '@' */
3184 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3186 av_strlcpy(hostname, p + 1,
3187 FFMIN(hostname_size, brk - p));
3188 if (brk[1] == ':' && port_ptr)
3189 *port_ptr = atoi(brk + 2);
3190 } else if ((col = strchr(p, ':')) && col < ls) {
3191 av_strlcpy(hostname, p,
3192 FFMIN(col + 1 - p, hostname_size));
3193 if (port_ptr) *port_ptr = atoi(col + 1);
3195 av_strlcpy(hostname, p,
3196 FFMIN(ls + 1 - p, hostname_size));
3200 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3203 static const char hex_table[16] = { '0', '1', '2', '3',
3206 'C', 'D', 'E', 'F' };
3208 for(i = 0; i < s; i++) {
3209 buff[i * 2] = hex_table[src[i] >> 4];
3210 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3216 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3217 int pts_num, int pts_den)
3219 unsigned int gcd= ff_gcd(pts_num, pts_den);
3220 s->pts_wrap_bits = pts_wrap_bits;
3221 s->time_base.num = pts_num/gcd;
3222 s->time_base.den = pts_den/gcd;
3225 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);