2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 const char *avformat_configuration(void)
46 return FFMPEG_CONFIGURATION;
49 const char *avformat_license(void)
51 #define LICENSE_PREFIX "libavformat license: "
52 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
55 /* fraction handling */
58 * f = val + (num / den) + 0.5.
60 * 'num' is normalized so that it is such as 0 <= num < den.
62 * @param f fractional number
63 * @param val integer value
64 * @param num must be >= 0
65 * @param den must be >= 1
67 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
80 * Fractional addition to f: f = f + (incr / f->den).
82 * @param f fractional number
83 * @param incr increment, can be positive or negative
85 static void av_frac_add(AVFrac *f, int64_t incr)
98 } else if (num >= den) {
105 /** head of registered input format linked list */
106 AVInputFormat *first_iformat = NULL;
107 /** head of registered output format linked list */
108 AVOutputFormat *first_oformat = NULL;
110 AVInputFormat *av_iformat_next(AVInputFormat *f)
112 if(f) return f->next;
113 else return first_iformat;
116 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
118 if(f) return f->next;
119 else return first_oformat;
122 void av_register_input_format(AVInputFormat *format)
126 while (*p != NULL) p = &(*p)->next;
131 void av_register_output_format(AVOutputFormat *format)
135 while (*p != NULL) p = &(*p)->next;
140 #if LIBAVFORMAT_VERSION_MAJOR < 53
141 int match_ext(const char *filename, const char *extensions)
143 return av_match_ext(filename, extensions);
147 int av_match_ext(const char *filename, const char *extensions)
155 ext = strrchr(filename, '.');
161 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
164 if (!strcasecmp(ext1, ext))
174 static int match_format(const char *name, const char *names)
182 namelen = strlen(name);
183 while ((p = strchr(names, ','))) {
184 len = FFMAX(p - names, namelen);
185 if (!strncasecmp(name, names, len))
189 return !strcasecmp(name, names);
192 #if LIBAVFORMAT_VERSION_MAJOR < 53
193 AVOutputFormat *guess_format(const char *short_name, const char *filename,
194 const char *mime_type)
196 return av_guess_format(short_name, filename, mime_type);
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
203 AVOutputFormat *fmt, *fmt_found;
204 int score_max, score;
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
214 /* Find the proper file type. */
218 while (fmt != NULL) {
220 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
222 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
224 if (filename && fmt->extensions &&
225 av_match_ext(filename, fmt->extensions)) {
228 if (score > score_max) {
237 #if LIBAVFORMAT_VERSION_MAJOR < 53
238 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
239 const char *mime_type)
241 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
244 AVOutputFormat *stream_fmt;
245 char stream_format_name[64];
247 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
248 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
258 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
259 const char *filename, const char *mime_type, enum CodecType type){
260 if(type == CODEC_TYPE_VIDEO){
261 enum CodecID codec_id= CODEC_ID_NONE;
263 #if CONFIG_IMAGE2_MUXER
264 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
265 codec_id= av_guess_image2_codec(filename);
268 if(codec_id == CODEC_ID_NONE)
269 codec_id= fmt->video_codec;
271 }else if(type == CODEC_TYPE_AUDIO)
272 return fmt->audio_codec;
274 return CODEC_ID_NONE;
277 AVInputFormat *av_find_input_format(const char *short_name)
280 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
281 if (match_format(short_name, fmt->name))
287 /* memory handling */
290 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
292 int ret= av_new_packet(pkt, size);
297 pkt->pos= url_ftell(s);
299 ret= get_buffer(s, pkt->data, size);
303 av_shrink_packet(pkt, ret);
309 int av_filename_number_test(const char *filename)
312 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
315 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
317 AVInputFormat *fmt1, *fmt;
321 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
322 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
325 if (fmt1->read_probe) {
326 score = fmt1->read_probe(pd);
327 } else if (fmt1->extensions) {
328 if (av_match_ext(pd->filename, fmt1->extensions)) {
332 if (score > *score_max) {
335 }else if (score == *score_max)
341 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
343 return av_probe_input_format2(pd, is_opened, &score);
346 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
349 fmt = av_probe_input_format2(pd, 1, &score);
352 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
353 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
354 if (!strcmp(fmt->name, "mp3")) {
355 st->codec->codec_id = CODEC_ID_MP3;
356 st->codec->codec_type = CODEC_TYPE_AUDIO;
357 } else if (!strcmp(fmt->name, "ac3")) {
358 st->codec->codec_id = CODEC_ID_AC3;
359 st->codec->codec_type = CODEC_TYPE_AUDIO;
360 } else if (!strcmp(fmt->name, "eac3")) {
361 st->codec->codec_id = CODEC_ID_EAC3;
362 st->codec->codec_type = CODEC_TYPE_AUDIO;
363 } else if (!strcmp(fmt->name, "mpegvideo")) {
364 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
365 st->codec->codec_type = CODEC_TYPE_VIDEO;
366 } else if (!strcmp(fmt->name, "m4v")) {
367 st->codec->codec_id = CODEC_ID_MPEG4;
368 st->codec->codec_type = CODEC_TYPE_VIDEO;
369 } else if (!strcmp(fmt->name, "h264")) {
370 st->codec->codec_id = CODEC_ID_H264;
371 st->codec->codec_type = CODEC_TYPE_VIDEO;
372 } else if (!strcmp(fmt->name, "dts")) {
373 st->codec->codec_id = CODEC_ID_DTS;
374 st->codec->codec_type = CODEC_TYPE_AUDIO;
380 /************************************************************/
381 /* input media file */
384 * Open a media file from an IO stream. 'fmt' must be specified.
386 int av_open_input_stream(AVFormatContext **ic_ptr,
387 ByteIOContext *pb, const char *filename,
388 AVInputFormat *fmt, AVFormatParameters *ap)
392 AVFormatParameters default_ap;
396 memset(ap, 0, sizeof(default_ap));
399 if(!ap->prealloced_context)
400 ic = avformat_alloc_context();
404 err = AVERROR(ENOMEM);
409 ic->duration = AV_NOPTS_VALUE;
410 ic->start_time = AV_NOPTS_VALUE;
411 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
413 /* allocate private data */
414 if (fmt->priv_data_size > 0) {
415 ic->priv_data = av_mallocz(fmt->priv_data_size);
416 if (!ic->priv_data) {
417 err = AVERROR(ENOMEM);
421 ic->priv_data = NULL;
424 if (ic->iformat->read_header) {
425 err = ic->iformat->read_header(ic, ap);
430 if (pb && !ic->data_offset)
431 ic->data_offset = url_ftell(ic->pb);
433 #if LIBAVFORMAT_VERSION_MAJOR < 53
434 ff_metadata_demux_compat(ic);
437 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
444 av_freep(&ic->priv_data);
445 for(i=0;i<ic->nb_streams;i++) {
446 AVStream *st = ic->streams[i];
448 av_free(st->priv_data);
449 av_free(st->codec->extradata);
459 /** size of probe buffer, for guessing file type from file contents */
460 #define PROBE_BUF_MIN 2048
461 #define PROBE_BUF_MAX (1<<20)
463 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
466 AVFormatParameters *ap)
469 AVProbeData probe_data, *pd = &probe_data;
470 ByteIOContext *pb = NULL;
471 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
475 pd->filename = filename;
480 /* guess format if no file can be opened */
481 fmt = av_probe_input_format(pd, 0);
484 /* Do not open file if the format does not need it. XXX: specific
485 hack needed to handle RTSP/TCP */
486 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
487 /* if no file needed do not try to open one */
488 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
492 url_setbufsize(pb, buf_size);
495 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
496 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
497 /* read probe data */
498 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
499 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
501 if ((int)pd->buf_size < 0) {
506 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
507 if (url_fseek(pb, 0, SEEK_SET) < 0) {
509 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
515 /* guess file format */
516 fmt = av_probe_input_format2(pd, 1, &score);
518 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
519 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
521 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
527 /* if still no format found, error */
533 /* check filename in case an image number is expected */
534 if (fmt->flags & AVFMT_NEEDNUMBER) {
535 if (!av_filename_number_test(filename)) {
536 err = AVERROR_NUMEXPECTED;
540 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
548 if (ap && ap->prealloced_context)
555 /*******************************************************/
557 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
558 AVPacketList **plast_pktl){
559 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
564 (*plast_pktl)->next = pktl;
566 *packet_buffer = pktl;
568 /* add the packet in the buffered packet list */
574 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
580 AVPacketList *pktl = s->raw_packet_buffer;
584 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
585 !s->streams[pkt->stream_index]->probe_packets ||
586 s->raw_packet_buffer_remaining_size < pkt->size){
587 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
590 s->raw_packet_buffer = pktl->next;
591 s->raw_packet_buffer_remaining_size += pkt->size;
598 ret= s->iformat->read_packet(s, pkt);
600 if (!pktl || ret == AVERROR(EAGAIN))
602 for (i = 0; i < s->nb_streams; i++)
603 s->streams[i]->probe_packets = 0;
606 st= s->streams[pkt->stream_index];
608 switch(st->codec->codec_type){
609 case CODEC_TYPE_VIDEO:
610 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
612 case CODEC_TYPE_AUDIO:
613 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
615 case CODEC_TYPE_SUBTITLE:
616 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
620 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
624 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
625 s->raw_packet_buffer_remaining_size -= pkt->size;
627 if(st->codec->codec_id == CODEC_ID_PROBE){
628 AVProbeData *pd = &st->probe_data;
629 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
632 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
633 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
634 pd->buf_size += pkt->size;
635 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
637 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
638 set_codec_from_probe_data(s, st, pd, 1);
639 if(st->codec->codec_id != CODEC_ID_PROBE){
642 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
649 /**********************************************************/
652 * Get the number of samples of an audio frame. Return -1 on error.
654 static int get_audio_frame_size(AVCodecContext *enc, int size)
658 if(enc->codec_id == CODEC_ID_VORBIS)
661 if (enc->frame_size <= 1) {
662 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
664 if (bits_per_sample) {
665 if (enc->channels == 0)
667 frame_size = (size << 3) / (bits_per_sample * enc->channels);
669 /* used for example by ADPCM codecs */
670 if (enc->bit_rate == 0)
672 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
675 frame_size = enc->frame_size;
682 * Return the frame duration in seconds. Return 0 if not available.
684 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
685 AVCodecParserContext *pc, AVPacket *pkt)
691 switch(st->codec->codec_type) {
692 case CODEC_TYPE_VIDEO:
693 if(st->time_base.num*1000LL > st->time_base.den){
694 *pnum = st->time_base.num;
695 *pden = st->time_base.den;
696 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
697 *pnum = st->codec->time_base.num;
698 *pden = st->codec->time_base.den;
699 if (pc && pc->repeat_pict) {
700 *pnum = (*pnum) * (1 + pc->repeat_pict);
704 case CODEC_TYPE_AUDIO:
705 frame_size = get_audio_frame_size(st->codec, pkt->size);
709 *pden = st->codec->sample_rate;
716 static int is_intra_only(AVCodecContext *enc){
717 if(enc->codec_type == CODEC_TYPE_AUDIO){
719 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
720 switch(enc->codec_id){
722 case CODEC_ID_MJPEGB:
724 case CODEC_ID_RAWVIDEO:
725 case CODEC_ID_DVVIDEO:
726 case CODEC_ID_HUFFYUV:
727 case CODEC_ID_FFVHUFF:
732 case CODEC_ID_JPEG2000:
740 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
741 int64_t dts, int64_t pts)
743 AVStream *st= s->streams[stream_index];
744 AVPacketList *pktl= s->packet_buffer;
746 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
749 st->first_dts= dts - st->cur_dts;
752 for(; pktl; pktl= pktl->next){
753 if(pktl->pkt.stream_index != stream_index)
755 //FIXME think more about this check
756 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
757 pktl->pkt.pts += st->first_dts;
759 if(pktl->pkt.dts != AV_NOPTS_VALUE)
760 pktl->pkt.dts += st->first_dts;
762 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
763 st->start_time= pktl->pkt.pts;
765 if (st->start_time == AV_NOPTS_VALUE)
766 st->start_time = pts;
769 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
771 AVPacketList *pktl= s->packet_buffer;
774 if(st->first_dts != AV_NOPTS_VALUE){
775 cur_dts= st->first_dts;
776 for(; pktl; pktl= pktl->next){
777 if(pktl->pkt.stream_index == pkt->stream_index){
778 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
780 cur_dts -= pkt->duration;
783 pktl= s->packet_buffer;
784 st->first_dts = cur_dts;
785 }else if(st->cur_dts)
788 for(; pktl; pktl= pktl->next){
789 if(pktl->pkt.stream_index != pkt->stream_index)
791 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
792 && !pktl->pkt.duration){
793 pktl->pkt.dts= cur_dts;
794 if(!st->codec->has_b_frames)
795 pktl->pkt.pts= cur_dts;
796 cur_dts += pkt->duration;
797 pktl->pkt.duration= pkt->duration;
801 if(st->first_dts == AV_NOPTS_VALUE)
802 st->cur_dts= cur_dts;
805 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
806 AVCodecParserContext *pc, AVPacket *pkt)
808 int num, den, presentation_delayed, delay, i;
811 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
812 //FIXME Set low_delay = 0 when has_b_frames = 1
813 st->codec->has_b_frames = 1;
815 /* do we have a video B-frame ? */
816 delay= st->codec->has_b_frames;
817 presentation_delayed = 0;
818 /* XXX: need has_b_frame, but cannot get it if the codec is
821 pc && pc->pict_type != FF_B_TYPE)
822 presentation_delayed = 1;
824 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
825 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
826 pkt->dts -= 1LL<<st->pts_wrap_bits;
829 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
830 // we take the conservative approach and discard both
831 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
832 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
833 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
834 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
837 if (pkt->duration == 0) {
838 compute_frame_duration(&num, &den, st, pc, pkt);
840 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
842 if(pkt->duration != 0 && s->packet_buffer)
843 update_initial_durations(s, st, pkt);
847 /* correct timestamps with byte offset if demuxers only have timestamps
848 on packet boundaries */
849 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
850 /* this will estimate bitrate based on this frame's duration and size */
851 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
852 if(pkt->pts != AV_NOPTS_VALUE)
854 if(pkt->dts != AV_NOPTS_VALUE)
858 if (pc && pc->dts_sync_point >= 0) {
859 // we have synchronization info from the parser
860 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
862 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
863 if (pkt->dts != AV_NOPTS_VALUE) {
864 // got DTS from the stream, update reference timestamp
865 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
866 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
867 } else if (st->reference_dts != AV_NOPTS_VALUE) {
868 // compute DTS based on reference timestamp
869 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
870 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
872 if (pc->dts_sync_point > 0)
873 st->reference_dts = pkt->dts; // new reference
877 /* This may be redundant, but it should not hurt. */
878 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
879 presentation_delayed = 1;
881 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
882 /* interpolate PTS and DTS if they are not present */
883 //We skip H264 currently because delay and has_b_frames are not reliably set
884 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
885 if (presentation_delayed) {
886 /* DTS = decompression timestamp */
887 /* PTS = presentation timestamp */
888 if (pkt->dts == AV_NOPTS_VALUE)
889 pkt->dts = st->last_IP_pts;
890 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
891 if (pkt->dts == AV_NOPTS_VALUE)
892 pkt->dts = st->cur_dts;
894 /* this is tricky: the dts must be incremented by the duration
895 of the frame we are displaying, i.e. the last I- or P-frame */
896 if (st->last_IP_duration == 0)
897 st->last_IP_duration = pkt->duration;
898 if(pkt->dts != AV_NOPTS_VALUE)
899 st->cur_dts = pkt->dts + st->last_IP_duration;
900 st->last_IP_duration = pkt->duration;
901 st->last_IP_pts= pkt->pts;
902 /* cannot compute PTS if not present (we can compute it only
903 by knowing the future */
904 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
905 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
906 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
907 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
908 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
909 pkt->pts += pkt->duration;
910 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
914 /* presentation is not delayed : PTS and DTS are the same */
915 if(pkt->pts == AV_NOPTS_VALUE)
917 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
918 if(pkt->pts == AV_NOPTS_VALUE)
919 pkt->pts = st->cur_dts;
921 if(pkt->pts != AV_NOPTS_VALUE)
922 st->cur_dts = pkt->pts + pkt->duration;
926 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
927 st->pts_buffer[0]= pkt->pts;
928 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
929 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
930 if(pkt->dts == AV_NOPTS_VALUE)
931 pkt->dts= st->pts_buffer[0];
932 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
933 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
935 if(pkt->dts > st->cur_dts)
936 st->cur_dts = pkt->dts;
939 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
942 if(is_intra_only(st->codec))
943 pkt->flags |= PKT_FLAG_KEY;
946 /* keyframe computation */
947 if (pc->key_frame == 1)
948 pkt->flags |= PKT_FLAG_KEY;
949 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
950 pkt->flags |= PKT_FLAG_KEY;
953 pkt->convergence_duration = pc->convergence_duration;
957 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
965 /* select current input stream component */
968 if (!st->need_parsing || !st->parser) {
969 /* no parsing needed: we just output the packet as is */
970 /* raw data support */
971 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
972 compute_pkt_fields(s, st, NULL, pkt);
974 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
975 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
976 ff_reduce_index(s, st->index);
977 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
980 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
981 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
982 st->cur_ptr, st->cur_len,
983 st->cur_pkt.pts, st->cur_pkt.dts,
985 st->cur_pkt.pts = AV_NOPTS_VALUE;
986 st->cur_pkt.dts = AV_NOPTS_VALUE;
987 /* increment read pointer */
991 /* return packet if any */
995 pkt->stream_index = st->index;
996 pkt->pts = st->parser->pts;
997 pkt->dts = st->parser->dts;
998 pkt->pos = st->parser->pos;
999 pkt->destruct = NULL;
1000 compute_pkt_fields(s, st, st->parser, pkt);
1002 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
1003 ff_reduce_index(s, st->index);
1004 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1005 0, 0, AVINDEX_KEYFRAME);
1012 av_free_packet(&st->cur_pkt);
1017 /* read next packet */
1018 ret = av_read_packet(s, &cur_pkt);
1020 if (ret == AVERROR(EAGAIN))
1022 /* return the last frames, if any */
1023 for(i = 0; i < s->nb_streams; i++) {
1025 if (st->parser && st->need_parsing) {
1026 av_parser_parse2(st->parser, st->codec,
1027 &pkt->data, &pkt->size,
1029 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1035 /* no more packets: really terminate parsing */
1038 st = s->streams[cur_pkt.stream_index];
1039 st->cur_pkt= cur_pkt;
1041 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1042 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1043 st->cur_pkt.pts < st->cur_pkt.dts){
1044 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1045 st->cur_pkt.stream_index,
1049 // av_free_packet(&st->cur_pkt);
1053 if(s->debug & FF_FDEBUG_TS)
1054 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1055 st->cur_pkt.stream_index,
1059 st->cur_pkt.duration,
1063 st->cur_ptr = st->cur_pkt.data;
1064 st->cur_len = st->cur_pkt.size;
1065 if (st->need_parsing && !st->parser) {
1066 st->parser = av_parser_init(st->codec->codec_id);
1068 /* no parser available: just output the raw packets */
1069 st->need_parsing = AVSTREAM_PARSE_NONE;
1070 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1071 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1073 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1074 st->parser->next_frame_offset=
1075 st->parser->cur_offset= st->cur_pkt.pos;
1080 if(s->debug & FF_FDEBUG_TS)
1081 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1092 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1096 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1099 pktl = s->packet_buffer;
1101 AVPacket *next_pkt= &pktl->pkt;
1103 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1104 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1105 if( pktl->pkt.stream_index == next_pkt->stream_index
1106 && next_pkt->dts < pktl->pkt.dts
1107 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1108 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1109 next_pkt->pts= pktl->pkt.dts;
1113 pktl = s->packet_buffer;
1116 if( next_pkt->pts != AV_NOPTS_VALUE
1117 || next_pkt->dts == AV_NOPTS_VALUE
1119 /* read packet from packet buffer, if there is data */
1121 s->packet_buffer = pktl->next;
1127 int ret= av_read_frame_internal(s, pkt);
1129 if(pktl && ret != AVERROR(EAGAIN)){
1136 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1137 &s->packet_buffer_end)) < 0)
1138 return AVERROR(ENOMEM);
1140 assert(!s->packet_buffer);
1141 return av_read_frame_internal(s, pkt);
1146 /* XXX: suppress the packet queue */
1147 static void flush_packet_queue(AVFormatContext *s)
1152 pktl = s->packet_buffer;
1155 s->packet_buffer = pktl->next;
1156 av_free_packet(&pktl->pkt);
1159 while(s->raw_packet_buffer){
1160 pktl = s->raw_packet_buffer;
1161 s->raw_packet_buffer = pktl->next;
1162 av_free_packet(&pktl->pkt);
1165 s->packet_buffer_end=
1166 s->raw_packet_buffer_end= NULL;
1167 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1170 /*******************************************************/
1173 int av_find_default_stream_index(AVFormatContext *s)
1175 int first_audio_index = -1;
1179 if (s->nb_streams <= 0)
1181 for(i = 0; i < s->nb_streams; i++) {
1183 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1186 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1187 first_audio_index = i;
1189 return first_audio_index >= 0 ? first_audio_index : 0;
1193 * Flush the frame reader.
1195 void av_read_frame_flush(AVFormatContext *s)
1200 flush_packet_queue(s);
1204 /* for each stream, reset read state */
1205 for(i = 0; i < s->nb_streams; i++) {
1209 av_parser_close(st->parser);
1211 av_free_packet(&st->cur_pkt);
1213 st->last_IP_pts = AV_NOPTS_VALUE;
1214 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1215 st->reference_dts = AV_NOPTS_VALUE;
1220 st->probe_packets = MAX_PROBE_PACKETS;
1222 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1223 st->pts_buffer[j]= AV_NOPTS_VALUE;
1227 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1230 for(i = 0; i < s->nb_streams; i++) {
1231 AVStream *st = s->streams[i];
1233 st->cur_dts = av_rescale(timestamp,
1234 st->time_base.den * (int64_t)ref_st->time_base.num,
1235 st->time_base.num * (int64_t)ref_st->time_base.den);
1239 void ff_reduce_index(AVFormatContext *s, int stream_index)
1241 AVStream *st= s->streams[stream_index];
1242 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1244 if((unsigned)st->nb_index_entries >= max_entries){
1246 for(i=0; 2*i<st->nb_index_entries; i++)
1247 st->index_entries[i]= st->index_entries[2*i];
1248 st->nb_index_entries= i;
1252 int av_add_index_entry(AVStream *st,
1253 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1255 AVIndexEntry *entries, *ie;
1258 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1261 entries = av_fast_realloc(st->index_entries,
1262 &st->index_entries_allocated_size,
1263 (st->nb_index_entries + 1) *
1264 sizeof(AVIndexEntry));
1268 st->index_entries= entries;
1270 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1273 index= st->nb_index_entries++;
1274 ie= &entries[index];
1275 assert(index==0 || ie[-1].timestamp < timestamp);
1277 ie= &entries[index];
1278 if(ie->timestamp != timestamp){
1279 if(ie->timestamp <= timestamp)
1281 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1282 st->nb_index_entries++;
1283 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1284 distance= ie->min_distance;
1288 ie->timestamp = timestamp;
1289 ie->min_distance= distance;
1296 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1299 AVIndexEntry *entries= st->index_entries;
1300 int nb_entries= st->nb_index_entries;
1309 timestamp = entries[m].timestamp;
1310 if(timestamp >= wanted_timestamp)
1312 if(timestamp <= wanted_timestamp)
1315 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1317 if(!(flags & AVSEEK_FLAG_ANY)){
1318 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1319 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1330 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1331 AVInputFormat *avif= s->iformat;
1332 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1333 int64_t ts_min, ts_max, ts;
1338 if (stream_index < 0)
1342 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1346 ts_min= AV_NOPTS_VALUE;
1347 pos_limit= -1; //gcc falsely says it may be uninitialized
1349 st= s->streams[stream_index];
1350 if(st->index_entries){
1353 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1354 index= FFMAX(index, 0);
1355 e= &st->index_entries[index];
1357 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1359 ts_min= e->timestamp;
1361 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1368 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1369 assert(index < st->nb_index_entries);
1371 e= &st->index_entries[index];
1372 assert(e->timestamp >= target_ts);
1374 ts_max= e->timestamp;
1375 pos_limit= pos_max - e->min_distance;
1377 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1378 pos_max,pos_limit, ts_max);
1383 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1388 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1391 av_update_cur_dts(s, st, ts);
1396 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1398 int64_t start_pos, filesize;
1402 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1405 if(ts_min == AV_NOPTS_VALUE){
1406 pos_min = s->data_offset;
1407 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1408 if (ts_min == AV_NOPTS_VALUE)
1412 if(ts_max == AV_NOPTS_VALUE){
1414 filesize = url_fsize(s->pb);
1415 pos_max = filesize - 1;
1418 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1420 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1421 if (ts_max == AV_NOPTS_VALUE)
1425 int64_t tmp_pos= pos_max + 1;
1426 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1427 if(tmp_ts == AV_NOPTS_VALUE)
1431 if(tmp_pos >= filesize)
1437 if(ts_min > ts_max){
1439 }else if(ts_min == ts_max){
1444 while (pos_min < pos_limit) {
1446 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1450 assert(pos_limit <= pos_max);
1453 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1454 // interpolate position (better than dichotomy)
1455 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1456 + pos_min - approximate_keyframe_distance;
1457 }else if(no_change==1){
1458 // bisection, if interpolation failed to change min or max pos last time
1459 pos = (pos_min + pos_limit)>>1;
1461 /* linear search if bisection failed, can only happen if there
1462 are very few or no keyframes between min/max */
1467 else if(pos > pos_limit)
1471 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1477 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1478 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1479 start_pos, no_change);
1481 if(ts == AV_NOPTS_VALUE){
1482 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1485 assert(ts != AV_NOPTS_VALUE);
1486 if (target_ts <= ts) {
1487 pos_limit = start_pos - 1;
1491 if (target_ts >= ts) {
1497 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1498 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1501 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1503 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1504 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1505 pos, ts_min, target_ts, ts_max);
1511 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1512 int64_t pos_min, pos_max;
1516 if (stream_index < 0)
1519 st= s->streams[stream_index];
1522 pos_min = s->data_offset;
1523 pos_max = url_fsize(s->pb) - 1;
1525 if (pos < pos_min) pos= pos_min;
1526 else if(pos > pos_max) pos= pos_max;
1528 url_fseek(s->pb, pos, SEEK_SET);
1531 av_update_cur_dts(s, st, ts);
1536 static int av_seek_frame_generic(AVFormatContext *s,
1537 int stream_index, int64_t timestamp, int flags)
1544 st = s->streams[stream_index];
1546 index = av_index_search_timestamp(st, timestamp, flags);
1548 if(index < 0 || index==st->nb_index_entries-1){
1552 if(st->nb_index_entries){
1553 assert(st->index_entries);
1554 ie= &st->index_entries[st->nb_index_entries-1];
1555 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1557 av_update_cur_dts(s, st, ie->timestamp);
1559 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1565 ret = av_read_frame(s, &pkt);
1566 }while(ret == AVERROR(EAGAIN));
1569 av_free_packet(&pkt);
1570 if(stream_index == pkt.stream_index){
1571 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1575 index = av_index_search_timestamp(st, timestamp, flags);
1580 av_read_frame_flush(s);
1581 if (s->iformat->read_seek){
1582 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1585 ie = &st->index_entries[index];
1586 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1588 av_update_cur_dts(s, st, ie->timestamp);
1593 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1598 av_read_frame_flush(s);
1600 if(flags & AVSEEK_FLAG_BYTE)
1601 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1603 if(stream_index < 0){
1604 stream_index= av_find_default_stream_index(s);
1605 if(stream_index < 0)
1608 st= s->streams[stream_index];
1609 /* timestamp for default must be expressed in AV_TIME_BASE units */
1610 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1613 /* first, we try the format specific seek */
1614 if (s->iformat->read_seek)
1615 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1622 if(s->iformat->read_timestamp)
1623 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1625 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1628 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1630 if(min_ts > ts || max_ts < ts)
1633 av_read_frame_flush(s);
1635 if (s->iformat->read_seek2)
1636 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1638 if(s->iformat->read_timestamp){
1639 //try to seek via read_timestamp()
1642 //Fallback to old API if new is not implemented but old is
1643 //Note the old has somewat different sematics
1644 if(s->iformat->read_seek || 1)
1645 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1647 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1650 /*******************************************************/
1653 * Returns TRUE if the stream has accurate duration in any stream.
1655 * @return TRUE if the stream has accurate duration for at least one component.
1657 static int av_has_duration(AVFormatContext *ic)
1662 for(i = 0;i < ic->nb_streams; i++) {
1663 st = ic->streams[i];
1664 if (st->duration != AV_NOPTS_VALUE)
1671 * Estimate the stream timings from the one of each components.
1673 * Also computes the global bitrate if possible.
1675 static void av_update_stream_timings(AVFormatContext *ic)
1677 int64_t start_time, start_time1, end_time, end_time1;
1678 int64_t duration, duration1;
1682 start_time = INT64_MAX;
1683 end_time = INT64_MIN;
1684 duration = INT64_MIN;
1685 for(i = 0;i < ic->nb_streams; i++) {
1686 st = ic->streams[i];
1687 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1688 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1689 if (start_time1 < start_time)
1690 start_time = start_time1;
1691 if (st->duration != AV_NOPTS_VALUE) {
1692 end_time1 = start_time1
1693 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1694 if (end_time1 > end_time)
1695 end_time = end_time1;
1698 if (st->duration != AV_NOPTS_VALUE) {
1699 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1700 if (duration1 > duration)
1701 duration = duration1;
1704 if (start_time != INT64_MAX) {
1705 ic->start_time = start_time;
1706 if (end_time != INT64_MIN) {
1707 if (end_time - start_time > duration)
1708 duration = end_time - start_time;
1711 if (duration != INT64_MIN) {
1712 ic->duration = duration;
1713 if (ic->file_size > 0) {
1714 /* compute the bitrate */
1715 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1716 (double)ic->duration;
1721 static void fill_all_stream_timings(AVFormatContext *ic)
1726 av_update_stream_timings(ic);
1727 for(i = 0;i < ic->nb_streams; i++) {
1728 st = ic->streams[i];
1729 if (st->start_time == AV_NOPTS_VALUE) {
1730 if(ic->start_time != AV_NOPTS_VALUE)
1731 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1732 if(ic->duration != AV_NOPTS_VALUE)
1733 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1738 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1740 int64_t filesize, duration;
1744 /* if bit_rate is already set, we believe it */
1745 if (ic->bit_rate == 0) {
1747 for(i=0;i<ic->nb_streams;i++) {
1748 st = ic->streams[i];
1749 bit_rate += st->codec->bit_rate;
1751 ic->bit_rate = bit_rate;
1754 /* if duration is already set, we believe it */
1755 if (ic->duration == AV_NOPTS_VALUE &&
1756 ic->bit_rate != 0 &&
1757 ic->file_size != 0) {
1758 filesize = ic->file_size;
1760 for(i = 0; i < ic->nb_streams; i++) {
1761 st = ic->streams[i];
1762 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1763 if (st->duration == AV_NOPTS_VALUE)
1764 st->duration = duration;
1770 #define DURATION_MAX_READ_SIZE 250000
1772 /* only usable for MPEG-PS streams */
1773 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1775 AVPacket pkt1, *pkt = &pkt1;
1777 int read_size, i, ret;
1778 int64_t end_time, start_time[MAX_STREAMS];
1779 int64_t filesize, offset, duration;
1783 /* flush packet queue */
1784 flush_packet_queue(ic);
1786 for(i=0;i<ic->nb_streams;i++) {
1787 st = ic->streams[i];
1788 if(st->start_time != AV_NOPTS_VALUE){
1789 start_time[i]= st->start_time;
1790 }else if(st->first_dts != AV_NOPTS_VALUE){
1791 start_time[i]= st->first_dts;
1793 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1796 av_parser_close(st->parser);
1798 av_free_packet(&st->cur_pkt);
1802 /* estimate the end time (duration) */
1803 /* XXX: may need to support wrapping */
1804 filesize = ic->file_size;
1805 offset = filesize - DURATION_MAX_READ_SIZE;
1809 url_fseek(ic->pb, offset, SEEK_SET);
1812 if (read_size >= DURATION_MAX_READ_SIZE)
1816 ret = av_read_packet(ic, pkt);
1817 }while(ret == AVERROR(EAGAIN));
1820 read_size += pkt->size;
1821 st = ic->streams[pkt->stream_index];
1822 if (pkt->pts != AV_NOPTS_VALUE &&
1823 start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
1824 end_time = pkt->pts;
1825 duration = end_time - start_time[pkt->stream_index];
1827 if (st->duration == AV_NOPTS_VALUE ||
1828 st->duration < duration)
1829 st->duration = duration;
1832 av_free_packet(pkt);
1835 fill_all_stream_timings(ic);
1837 url_fseek(ic->pb, old_offset, SEEK_SET);
1838 for(i=0; i<ic->nb_streams; i++){
1840 st->cur_dts= st->first_dts;
1841 st->last_IP_pts = AV_NOPTS_VALUE;
1845 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1849 /* get the file size, if possible */
1850 if (ic->iformat->flags & AVFMT_NOFILE) {
1853 file_size = url_fsize(ic->pb);
1857 ic->file_size = file_size;
1859 if ((!strcmp(ic->iformat->name, "mpeg") ||
1860 !strcmp(ic->iformat->name, "mpegts")) &&
1861 file_size && !url_is_streamed(ic->pb)) {
1862 /* get accurate estimate from the PTSes */
1863 av_estimate_timings_from_pts(ic, old_offset);
1864 } else if (av_has_duration(ic)) {
1865 /* at least one component has timings - we use them for all
1867 fill_all_stream_timings(ic);
1869 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1870 /* less precise: use bitrate info */
1871 av_estimate_timings_from_bit_rate(ic);
1873 av_update_stream_timings(ic);
1879 for(i = 0;i < ic->nb_streams; i++) {
1880 st = ic->streams[i];
1881 printf("%d: start_time: %0.3f duration: %0.3f\n",
1882 i, (double)st->start_time / AV_TIME_BASE,
1883 (double)st->duration / AV_TIME_BASE);
1885 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1886 (double)ic->start_time / AV_TIME_BASE,
1887 (double)ic->duration / AV_TIME_BASE,
1888 ic->bit_rate / 1000);
1893 static int has_codec_parameters(AVCodecContext *enc)
1896 switch(enc->codec_type) {
1897 case CODEC_TYPE_AUDIO:
1898 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1899 if(!enc->frame_size &&
1900 (enc->codec_id == CODEC_ID_VORBIS ||
1901 enc->codec_id == CODEC_ID_AAC ||
1902 enc->codec_id == CODEC_ID_MP3 ||
1903 enc->codec_id == CODEC_ID_SPEEX))
1906 case CODEC_TYPE_VIDEO:
1907 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1913 return enc->codec_id != CODEC_ID_NONE && val != 0;
1916 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1920 int got_picture, data_size, ret=0;
1923 if(!st->codec->codec){
1924 codec = avcodec_find_decoder(st->codec->codec_id);
1927 ret = avcodec_open(st->codec, codec);
1932 if(!has_codec_parameters(st->codec)){
1933 switch(st->codec->codec_type) {
1934 case CODEC_TYPE_VIDEO:
1935 avcodec_get_frame_defaults(&picture);
1936 ret = avcodec_decode_video2(st->codec, &picture,
1937 &got_picture, avpkt);
1939 case CODEC_TYPE_AUDIO:
1940 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1941 samples = av_malloc(data_size);
1944 ret = avcodec_decode_audio3(st->codec, samples,
1956 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1958 while (tags->id != CODEC_ID_NONE) {
1966 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1969 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1970 if(tag == tags[i].tag)
1973 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1974 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1975 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1976 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1977 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1980 return CODEC_ID_NONE;
1983 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1986 for(i=0; tags && tags[i]; i++){
1987 int tag= ff_codec_get_tag(tags[i], id);
1993 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1996 for(i=0; tags && tags[i]; i++){
1997 enum CodecID id= ff_codec_get_id(tags[i], tag);
1998 if(id!=CODEC_ID_NONE) return id;
2000 return CODEC_ID_NONE;
2003 static void compute_chapters_end(AVFormatContext *s)
2007 for (i=0; i+1<s->nb_chapters; i++)
2008 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2009 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2010 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2011 s->chapters[i]->end = s->chapters[i+1]->start;
2014 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2015 assert(s->start_time != AV_NOPTS_VALUE);
2016 assert(s->duration > 0);
2017 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2019 s->chapters[i]->time_base);
2023 #define MAX_STD_TIMEBASES (60*12+5)
2024 static int get_std_framerate(int i){
2025 if(i<60*12) return i*1001;
2026 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2030 * Is the time base unreliable.
2031 * This is a heuristic to balance between quick acceptance of the values in
2032 * the headers vs. some extra checks.
2033 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2034 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2035 * And there are "variable" fps files this needs to detect as well.
2037 static int tb_unreliable(AVCodecContext *c){
2038 if( c->time_base.den >= 101L*c->time_base.num
2039 || c->time_base.den < 5L*c->time_base.num
2040 /* || c->codec_tag == AV_RL32("DIVX")
2041 || c->codec_tag == AV_RL32("XVID")*/
2042 || c->codec_id == CODEC_ID_MPEG2VIDEO
2043 || c->codec_id == CODEC_ID_H264
2049 int av_find_stream_info(AVFormatContext *ic)
2051 int i, count, ret, read_size, j;
2053 AVPacket pkt1, *pkt;
2054 int64_t last_dts[MAX_STREAMS];
2055 int64_t duration_gcd[MAX_STREAMS]={0};
2056 int duration_count[MAX_STREAMS]={0};
2057 double (*duration_error)[MAX_STD_TIMEBASES];
2058 int64_t old_offset = url_ftell(ic->pb);
2059 int64_t codec_info_duration[MAX_STREAMS]={0};
2060 int codec_info_nb_frames[MAX_STREAMS]={0};
2062 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2063 if (!duration_error) return AVERROR(ENOMEM);
2065 for(i=0;i<ic->nb_streams;i++) {
2066 st = ic->streams[i];
2067 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2068 /* if(!st->time_base.num)
2070 if(!st->codec->time_base.num)
2071 st->codec->time_base= st->time_base;
2073 //only for the split stuff
2075 st->parser = av_parser_init(st->codec->codec_id);
2076 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2077 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2082 for(i=0;i<MAX_STREAMS;i++){
2083 last_dts[i]= AV_NOPTS_VALUE;
2089 if(url_interrupt_cb()){
2090 ret= AVERROR(EINTR);
2091 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2095 /* check if one codec still needs to be handled */
2096 for(i=0;i<ic->nb_streams;i++) {
2097 st = ic->streams[i];
2098 if (!has_codec_parameters(st->codec))
2100 /* variable fps and no guess at the real fps */
2101 if( tb_unreliable(st->codec)
2102 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2104 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2106 if(st->first_dts == AV_NOPTS_VALUE)
2109 if (i == ic->nb_streams) {
2110 /* NOTE: if the format has no header, then we need to read
2111 some packets to get most of the streams, so we cannot
2113 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2114 /* if we found the info for all the codecs, we can stop */
2116 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2120 /* we did not get all the codec info, but we read too much data */
2121 if (read_size >= ic->probesize) {
2123 av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2127 /* NOTE: a new stream can be added there if no header in file
2128 (AVFMTCTX_NOHEADER) */
2129 ret = av_read_frame_internal(ic, &pkt1);
2130 if(ret == AVERROR(EAGAIN))
2134 ret = -1; /* we could not have all the codec parameters before EOF */
2135 for(i=0;i<ic->nb_streams;i++) {
2136 st = ic->streams[i];
2137 if (!has_codec_parameters(st->codec)){
2139 avcodec_string(buf, sizeof(buf), st->codec, 0);
2140 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2148 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2149 if(av_dup_packet(pkt) < 0) {
2150 av_free(duration_error);
2151 return AVERROR(ENOMEM);
2154 read_size += pkt->size;
2156 st = ic->streams[pkt->stream_index];
2157 if(codec_info_nb_frames[st->index]>1) {
2158 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2159 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2162 codec_info_duration[st->index] += pkt->duration;
2164 if (pkt->duration != 0)
2165 codec_info_nb_frames[st->index]++;
2168 int index= pkt->stream_index;
2169 int64_t last= last_dts[index];
2170 int64_t duration= pkt->dts - last;
2172 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2173 double dur= duration * av_q2d(st->time_base);
2175 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2176 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2177 if(duration_count[index] < 2)
2178 memset(duration_error[index], 0, sizeof(*duration_error));
2179 for(i=1; i<MAX_STD_TIMEBASES; i++){
2180 int framerate= get_std_framerate(i);
2181 int ticks= lrintf(dur*framerate/(1001*12));
2182 double error= dur - ticks*1001*12/(double)framerate;
2183 duration_error[index][i] += error*error;
2185 duration_count[index]++;
2186 // ignore the first 4 values, they might have some random jitter
2187 if (duration_count[index] > 3)
2188 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2190 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2191 last_dts[pkt->stream_index]= pkt->dts;
2193 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2194 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2196 st->codec->extradata_size= i;
2197 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2198 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2199 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2203 /* if still no information, we try to open the codec and to
2204 decompress the frame. We try to avoid that in most cases as
2205 it takes longer and uses more memory. For MPEG-4, we need to
2206 decompress for QuickTime. */
2207 if (!has_codec_parameters(st->codec))
2208 try_decode_frame(st, pkt);
2213 // close codecs which were opened in try_decode_frame()
2214 for(i=0;i<ic->nb_streams;i++) {
2215 st = ic->streams[i];
2216 if(st->codec->codec)
2217 avcodec_close(st->codec);
2219 for(i=0;i<ic->nb_streams;i++) {
2220 st = ic->streams[i];
2221 if(codec_info_nb_frames[i]>2 && !st->avg_frame_rate.num)
2222 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2223 (codec_info_nb_frames[i]-2)*(int64_t)st->time_base.den,
2224 codec_info_duration[i] *(int64_t)st->time_base.num, 60000);
2225 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2226 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2227 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2229 // the check for tb_unreliable() is not completely correct, since this is not about handling
2230 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2231 // ipmovie.c produces.
2232 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2233 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2234 if(duration_count[i]
2235 && tb_unreliable(st->codec) /*&&
2236 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2237 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2239 double best_error= 2*av_q2d(st->time_base);
2240 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2242 for(j=1; j<MAX_STD_TIMEBASES; j++){
2243 double error= duration_error[i][j] * get_std_framerate(j);
2244 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2245 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2246 if(error < best_error){
2248 num = get_std_framerate(j);
2251 // do not increase frame rate by more than 1 % in order to match a standard rate.
2252 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2253 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2256 if (!st->r_frame_rate.num){
2257 if( st->codec->time_base.den * (int64_t)st->time_base.num
2258 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2259 st->r_frame_rate.num = st->codec->time_base.den;
2260 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2262 st->r_frame_rate.num = st->time_base.den;
2263 st->r_frame_rate.den = st->time_base.num;
2266 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2267 if(!st->codec->bits_per_coded_sample)
2268 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2272 av_estimate_timings(ic, old_offset);
2274 compute_chapters_end(ic);
2277 /* correct DTS for B-frame streams with no timestamps */
2278 for(i=0;i<ic->nb_streams;i++) {
2279 st = ic->streams[i];
2280 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2282 ppktl = &ic->packet_buffer;
2284 if(ppkt1->stream_index != i)
2286 if(ppkt1->pkt->dts < 0)
2288 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2290 ppkt1->pkt->dts -= delta;
2295 st->cur_dts -= delta;
2301 av_free(duration_error);
2306 /*******************************************************/
2308 int av_read_play(AVFormatContext *s)
2310 if (s->iformat->read_play)
2311 return s->iformat->read_play(s);
2313 return av_url_read_fpause(s->pb, 0);
2314 return AVERROR(ENOSYS);
2317 int av_read_pause(AVFormatContext *s)
2319 if (s->iformat->read_pause)
2320 return s->iformat->read_pause(s);
2322 return av_url_read_fpause(s->pb, 1);
2323 return AVERROR(ENOSYS);
2326 void av_close_input_stream(AVFormatContext *s)
2331 if (s->iformat->read_close)
2332 s->iformat->read_close(s);
2333 for(i=0;i<s->nb_streams;i++) {
2334 /* free all data in a stream component */
2337 av_parser_close(st->parser);
2338 av_free_packet(&st->cur_pkt);
2340 av_metadata_free(&st->metadata);
2341 av_free(st->index_entries);
2342 av_free(st->codec->extradata);
2344 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2345 av_free(st->filename);
2347 av_free(st->priv_data);
2350 for(i=s->nb_programs-1; i>=0; i--) {
2351 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2352 av_freep(&s->programs[i]->provider_name);
2353 av_freep(&s->programs[i]->name);
2355 av_metadata_free(&s->programs[i]->metadata);
2356 av_freep(&s->programs[i]->stream_index);
2357 av_freep(&s->programs[i]);
2359 av_freep(&s->programs);
2360 flush_packet_queue(s);
2361 av_freep(&s->priv_data);
2362 while(s->nb_chapters--) {
2363 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2364 av_free(s->chapters[s->nb_chapters]->title);
2366 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2367 av_free(s->chapters[s->nb_chapters]);
2369 av_freep(&s->chapters);
2370 av_metadata_free(&s->metadata);
2374 void av_close_input_file(AVFormatContext *s)
2376 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2377 av_close_input_stream(s);
2382 AVStream *av_new_stream(AVFormatContext *s, int id)
2387 if (s->nb_streams >= MAX_STREAMS)
2390 st = av_mallocz(sizeof(AVStream));
2394 st->codec= avcodec_alloc_context();
2396 /* no default bitrate if decoding */
2397 st->codec->bit_rate = 0;
2399 st->index = s->nb_streams;
2401 st->start_time = AV_NOPTS_VALUE;
2402 st->duration = AV_NOPTS_VALUE;
2403 /* we set the current DTS to 0 so that formats without any timestamps
2404 but durations get some timestamps, formats with some unknown
2405 timestamps have their first few packets buffered and the
2406 timestamps corrected before they are returned to the user */
2408 st->first_dts = AV_NOPTS_VALUE;
2409 st->probe_packets = MAX_PROBE_PACKETS;
2411 /* default pts setting is MPEG-like */
2412 av_set_pts_info(st, 33, 1, 90000);
2413 st->last_IP_pts = AV_NOPTS_VALUE;
2414 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2415 st->pts_buffer[i]= AV_NOPTS_VALUE;
2416 st->reference_dts = AV_NOPTS_VALUE;
2418 st->sample_aspect_ratio = (AVRational){0,1};
2420 s->streams[s->nb_streams++] = st;
2424 AVProgram *av_new_program(AVFormatContext *ac, int id)
2426 AVProgram *program=NULL;
2430 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2433 for(i=0; i<ac->nb_programs; i++)
2434 if(ac->programs[i]->id == id)
2435 program = ac->programs[i];
2438 program = av_mallocz(sizeof(AVProgram));
2441 dynarray_add(&ac->programs, &ac->nb_programs, program);
2442 program->discard = AVDISCARD_NONE;
2449 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2451 AVChapter *chapter = NULL;
2454 for(i=0; i<s->nb_chapters; i++)
2455 if(s->chapters[i]->id == id)
2456 chapter = s->chapters[i];
2459 chapter= av_mallocz(sizeof(AVChapter));
2462 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2464 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2465 av_free(chapter->title);
2467 av_metadata_set(&chapter->metadata, "title", title);
2469 chapter->time_base= time_base;
2470 chapter->start = start;
2476 /************************************************************/
2477 /* output media file */
2479 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2483 if (s->oformat->priv_data_size > 0) {
2484 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2486 return AVERROR(ENOMEM);
2488 s->priv_data = NULL;
2490 if (s->oformat->set_parameters) {
2491 ret = s->oformat->set_parameters(s, ap);
2498 int av_write_header(AVFormatContext *s)
2503 // some sanity checks
2504 if (s->nb_streams == 0) {
2505 av_log(s, AV_LOG_ERROR, "no streams\n");
2509 for(i=0;i<s->nb_streams;i++) {
2512 switch (st->codec->codec_type) {
2513 case CODEC_TYPE_AUDIO:
2514 if(st->codec->sample_rate<=0){
2515 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2518 if(!st->codec->block_align)
2519 st->codec->block_align = st->codec->channels *
2520 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2522 case CODEC_TYPE_VIDEO:
2523 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2524 av_log(s, AV_LOG_ERROR, "time base not set\n");
2527 if(st->codec->width<=0 || st->codec->height<=0){
2528 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2531 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2532 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2538 if(s->oformat->codec_tag){
2539 if(st->codec->codec_tag){
2541 //check that tag + id is in the table
2542 //if neither is in the table -> OK
2543 //if tag is in the table with another id -> FAIL
2544 //if id is in the table with another tag -> FAIL unless strict < ?
2546 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2549 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2550 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2551 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2554 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2555 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2557 return AVERROR(ENOMEM);
2560 #if LIBAVFORMAT_VERSION_MAJOR < 53
2561 ff_metadata_mux_compat(s);
2564 if(s->oformat->write_header){
2565 ret = s->oformat->write_header(s);
2570 /* init PTS generation */
2571 for(i=0;i<s->nb_streams;i++) {
2572 int64_t den = AV_NOPTS_VALUE;
2575 switch (st->codec->codec_type) {
2576 case CODEC_TYPE_AUDIO:
2577 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2579 case CODEC_TYPE_VIDEO:
2580 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2585 if (den != AV_NOPTS_VALUE) {
2587 return AVERROR_INVALIDDATA;
2588 av_frac_init(&st->pts, 0, 0, den);
2594 //FIXME merge with compute_pkt_fields
2595 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2596 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2597 int num, den, frame_size, i;
2599 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2601 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2604 /* duration field */
2605 if (pkt->duration == 0) {
2606 compute_frame_duration(&num, &den, st, NULL, pkt);
2608 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2612 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2615 //XXX/FIXME this is a temporary hack until all encoders output pts
2616 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2618 // pkt->pts= st->cur_dts;
2619 pkt->pts= st->pts.val;
2622 //calculate dts from pts
2623 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2624 st->pts_buffer[0]= pkt->pts;
2625 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2626 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2627 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2628 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2630 pkt->dts= st->pts_buffer[0];
2633 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2634 av_log(s, AV_LOG_ERROR,
2635 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2636 st->index, st->cur_dts, pkt->dts);
2639 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2640 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2644 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2645 st->cur_dts= pkt->dts;
2646 st->pts.val= pkt->dts;
2649 switch (st->codec->codec_type) {
2650 case CODEC_TYPE_AUDIO:
2651 frame_size = get_audio_frame_size(st->codec, pkt->size);
2653 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2654 likely equal to the encoder delay, but it would be better if we
2655 had the real timestamps from the encoder */
2656 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2657 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2660 case CODEC_TYPE_VIDEO:
2661 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2669 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2671 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2673 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2676 ret= s->oformat->write_packet(s, pkt);
2678 ret= url_ferror(s->pb);
2682 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2683 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2685 AVPacketList **next_point, *this_pktl;
2687 this_pktl = av_mallocz(sizeof(AVPacketList));
2688 this_pktl->pkt= *pkt;
2689 pkt->destruct= NULL; // do not free original but only the copy
2690 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2692 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2693 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2695 next_point = &s->packet_buffer;
2698 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2699 while(!compare(s, &(*next_point)->pkt, pkt)){
2700 next_point= &(*next_point)->next;
2704 next_point = &(s->packet_buffer_end->next);
2707 assert(!*next_point);
2709 s->packet_buffer_end= this_pktl;
2712 this_pktl->next= *next_point;
2714 s->streams[pkt->stream_index]->last_in_packet_buffer=
2715 *next_point= this_pktl;
2718 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2720 AVStream *st = s->streams[ pkt ->stream_index];
2721 AVStream *st2= s->streams[ next->stream_index];
2722 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2723 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2725 if (pkt->dts == AV_NOPTS_VALUE)
2728 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2731 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2737 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2740 for(i=0; i < s->nb_streams; i++)
2741 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2743 if(stream_count && (s->nb_streams == stream_count || flush)){
2744 pktl= s->packet_buffer;
2747 s->packet_buffer= pktl->next;
2748 if(!s->packet_buffer)
2749 s->packet_buffer_end= NULL;
2751 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2752 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2756 av_init_packet(out);
2762 * Interleaves an AVPacket correctly so it can be muxed.
2763 * @param out the interleaved packet will be output here
2764 * @param in the input packet
2765 * @param flush 1 if no further packets are available as input and all
2766 * remaining packets should be output
2767 * @return 1 if a packet was output, 0 if no packet could be output,
2768 * < 0 if an error occurred
2770 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2771 if(s->oformat->interleave_packet)
2772 return s->oformat->interleave_packet(s, out, in, flush);
2774 return av_interleave_packet_per_dts(s, out, in, flush);
2777 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2778 AVStream *st= s->streams[ pkt->stream_index];
2780 //FIXME/XXX/HACK drop zero sized packets
2781 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2784 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2785 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2788 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2793 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2794 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2797 ret= s->oformat->write_packet(s, &opkt);
2799 av_free_packet(&opkt);
2804 if(url_ferror(s->pb))
2805 return url_ferror(s->pb);
2809 int av_write_trailer(AVFormatContext *s)
2815 ret= av_interleave_packet(s, &pkt, NULL, 1);
2816 if(ret<0) //FIXME cleanup needed for ret<0 ?
2821 ret= s->oformat->write_packet(s, &pkt);
2823 av_free_packet(&pkt);
2827 if(url_ferror(s->pb))
2831 if(s->oformat->write_trailer)
2832 ret = s->oformat->write_trailer(s);
2835 ret=url_ferror(s->pb);
2836 for(i=0;i<s->nb_streams;i++)
2837 av_freep(&s->streams[i]->priv_data);
2838 av_freep(&s->priv_data);
2842 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2845 AVProgram *program=NULL;
2848 if (idx >= ac->nb_streams) {
2849 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2853 for(i=0; i<ac->nb_programs; i++){
2854 if(ac->programs[i]->id != progid)
2856 program = ac->programs[i];
2857 for(j=0; j<program->nb_stream_indexes; j++)
2858 if(program->stream_index[j] == idx)
2861 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2864 program->stream_index = tmp;
2865 program->stream_index[program->nb_stream_indexes++] = idx;
2870 static void print_fps(double d, const char *postfix){
2871 uint64_t v= lrintf(d*100);
2872 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2873 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2874 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2877 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
2879 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
2880 AVMetadataTag *tag=NULL;
2882 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2883 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
2884 if(strcmp("language", tag->key))
2885 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
2890 /* "user interface" functions */
2891 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2894 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2895 AVStream *st = ic->streams[i];
2896 int g = av_gcd(st->time_base.num, st->time_base.den);
2897 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2898 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2899 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2900 /* the pid is an important information, so we display it */
2901 /* XXX: add a generic system */
2902 if (flags & AVFMT_SHOW_IDS)
2903 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2905 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2906 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2907 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2908 if (st->sample_aspect_ratio.num && // default
2909 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2910 AVRational display_aspect_ratio;
2911 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2912 st->codec->width*st->sample_aspect_ratio.num,
2913 st->codec->height*st->sample_aspect_ratio.den,
2915 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2916 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2917 display_aspect_ratio.num, display_aspect_ratio.den);
2919 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2920 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
2921 print_fps(av_q2d(st->avg_frame_rate), "fps");
2922 if(st->r_frame_rate.den && st->r_frame_rate.num)
2923 print_fps(av_q2d(st->r_frame_rate), "tbr");
2924 if(st->time_base.den && st->time_base.num)
2925 print_fps(1/av_q2d(st->time_base), "tbn");
2926 if(st->codec->time_base.den && st->codec->time_base.num)
2927 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2929 av_log(NULL, AV_LOG_INFO, "\n");
2930 dump_metadata(NULL, st->metadata, " ");
2933 void dump_format(AVFormatContext *ic,
2939 uint8_t *printed = av_mallocz(ic->nb_streams);
2940 if (ic->nb_streams && !printed)
2943 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2944 is_output ? "Output" : "Input",
2946 is_output ? ic->oformat->name : ic->iformat->name,
2947 is_output ? "to" : "from", url);
2948 dump_metadata(NULL, ic->metadata, " ");
2950 av_log(NULL, AV_LOG_INFO, " Duration: ");
2951 if (ic->duration != AV_NOPTS_VALUE) {
2952 int hours, mins, secs, us;
2953 secs = ic->duration / AV_TIME_BASE;
2954 us = ic->duration % AV_TIME_BASE;
2959 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2960 (100 * us) / AV_TIME_BASE);
2962 av_log(NULL, AV_LOG_INFO, "N/A");
2964 if (ic->start_time != AV_NOPTS_VALUE) {
2966 av_log(NULL, AV_LOG_INFO, ", start: ");
2967 secs = ic->start_time / AV_TIME_BASE;
2968 us = ic->start_time % AV_TIME_BASE;
2969 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2970 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2972 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2974 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2976 av_log(NULL, AV_LOG_INFO, "N/A");
2978 av_log(NULL, AV_LOG_INFO, "\n");
2980 if(ic->nb_programs) {
2981 int j, k, total = 0;
2982 for(j=0; j<ic->nb_programs; j++) {
2983 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2985 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2986 name ? name->value : "");
2987 dump_metadata(NULL, ic->programs[j]->metadata, " ");
2988 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
2989 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2990 printed[ic->programs[j]->stream_index[k]] = 1;
2992 total += ic->programs[j]->nb_stream_indexes;
2994 if (total < ic->nb_streams)
2995 av_log(NULL, AV_LOG_INFO, " No Program\n");
2997 for(i=0;i<ic->nb_streams;i++)
2999 dump_stream_format(ic, i, index, is_output);
3004 #if LIBAVFORMAT_VERSION_MAJOR < 53
3005 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
3007 return av_parse_video_frame_size(width_ptr, height_ptr, str);
3010 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
3012 AVRational frame_rate;
3013 int ret = av_parse_video_frame_rate(&frame_rate, arg);
3014 *frame_rate_num= frame_rate.num;
3015 *frame_rate_den= frame_rate.den;
3020 int64_t av_gettime(void)
3023 gettimeofday(&tv,NULL);
3024 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3027 int64_t parse_date(const char *datestr, int duration)
3033 static const char * const date_fmt[] = {
3037 static const char * const time_fmt[] = {
3047 time_t now = time(0);
3049 len = strlen(datestr);
3051 lastch = datestr[len - 1];
3054 is_utc = (lastch == 'z' || lastch == 'Z');
3056 memset(&dt, 0, sizeof(dt));
3061 if (!strncasecmp(datestr, "now", len))
3062 return (int64_t) now * 1000000;
3064 /* parse the year-month-day part */
3065 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3066 q = small_strptime(p, date_fmt[i], &dt);
3072 /* if the year-month-day part is missing, then take the
3073 * current year-month-day time */
3078 dt = *localtime(&now);
3080 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3085 if (*p == 'T' || *p == 't' || *p == ' ')
3088 /* parse the hour-minute-second part */
3089 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3090 q = small_strptime(p, time_fmt[i], &dt);
3096 /* parse datestr as a duration */
3101 /* parse datestr as HH:MM:SS */
3102 q = small_strptime(p, time_fmt[0], &dt);
3104 /* parse datestr as S+ */
3105 dt.tm_sec = strtol(p, (char **)&q, 10);
3107 /* the parsing didn't succeed */
3114 /* Now we have all the fields that we can get */
3120 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3122 dt.tm_isdst = -1; /* unknown */
3132 /* parse the .m... part */
3136 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3139 val += n * (*q - '0');
3143 return negative ? -t : t;
3146 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3156 while (*p != '\0' && *p != '=' && *p != '&') {
3157 if ((q - tag) < sizeof(tag) - 1)
3165 while (*p != '&' && *p != '\0') {
3166 if ((q - arg) < arg_size - 1) {
3176 if (!strcmp(tag, tag1))
3185 int av_get_frame_filename(char *buf, int buf_size,
3186 const char *path, int number)
3189 char *q, buf1[20], c;
3190 int nd, len, percentd_found;
3202 while (isdigit(*p)) {
3203 nd = nd * 10 + *p++ - '0';
3206 } while (isdigit(c));
3215 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3217 if ((q - buf + len) > buf_size - 1)
3219 memcpy(q, buf1, len);
3227 if ((q - buf) < buf_size - 1)
3231 if (!percentd_found)
3240 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3244 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3246 for(i=0;i<size;i+=16) {
3253 PRINT(" %02x", buf[i+j]);
3258 for(j=0;j<len;j++) {
3260 if (c < ' ' || c > '~')
3269 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3271 hex_dump_internal(NULL, f, 0, buf, size);
3274 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3276 hex_dump_internal(avcl, NULL, level, buf, size);
3279 //FIXME needs to know the time_base
3280 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3283 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3284 PRINT("stream #%d:\n", pkt->stream_index);
3285 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3286 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3287 /* DTS is _always_ valid after av_read_frame() */
3289 if (pkt->dts == AV_NOPTS_VALUE)
3292 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3293 /* PTS may not be known if B-frames are present. */
3295 if (pkt->pts == AV_NOPTS_VALUE)
3298 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3300 PRINT(" size=%d\n", pkt->size);
3303 av_hex_dump(f, pkt->data, pkt->size);
3306 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3308 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3311 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3313 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3316 void url_split(char *proto, int proto_size,
3317 char *authorization, int authorization_size,
3318 char *hostname, int hostname_size,
3320 char *path, int path_size,
3323 const char *p, *ls, *at, *col, *brk;
3325 if (port_ptr) *port_ptr = -1;
3326 if (proto_size > 0) proto[0] = 0;
3327 if (authorization_size > 0) authorization[0] = 0;
3328 if (hostname_size > 0) hostname[0] = 0;
3329 if (path_size > 0) path[0] = 0;
3331 /* parse protocol */
3332 if ((p = strchr(url, ':'))) {
3333 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3338 /* no protocol means plain filename */
3339 av_strlcpy(path, url, path_size);
3343 /* separate path from hostname */
3344 ls = strchr(p, '/');
3346 ls = strchr(p, '?');
3348 av_strlcpy(path, ls, path_size);
3350 ls = &p[strlen(p)]; // XXX
3352 /* the rest is hostname, use that to parse auth/port */
3354 /* authorization (user[:pass]@hostname) */
3355 if ((at = strchr(p, '@')) && at < ls) {
3356 av_strlcpy(authorization, p,
3357 FFMIN(authorization_size, at + 1 - p));
3358 p = at + 1; /* skip '@' */
3361 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3363 av_strlcpy(hostname, p + 1,
3364 FFMIN(hostname_size, brk - p));
3365 if (brk[1] == ':' && port_ptr)
3366 *port_ptr = atoi(brk + 2);
3367 } else if ((col = strchr(p, ':')) && col < ls) {
3368 av_strlcpy(hostname, p,
3369 FFMIN(col + 1 - p, hostname_size));
3370 if (port_ptr) *port_ptr = atoi(col + 1);
3372 av_strlcpy(hostname, p,
3373 FFMIN(ls + 1 - p, hostname_size));
3377 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3380 static const char hex_table[16] = { '0', '1', '2', '3',
3383 'C', 'D', 'E', 'F' };
3385 for(i = 0; i < s; i++) {
3386 buff[i * 2] = hex_table[src[i] >> 4];
3387 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3393 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3394 unsigned int pts_num, unsigned int pts_den)
3396 s->pts_wrap_bits = pts_wrap_bits;
3398 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3399 if(s->time_base.num != pts_num)
3400 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3402 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3404 if(!s->time_base.num || !s->time_base.den)
3405 s->time_base.num= s->time_base.den= 0;