2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 const char *avformat_configuration(void)
46 return FFMPEG_CONFIGURATION;
49 const char *avformat_license(void)
51 #define LICENSE_PREFIX "libavformat license: "
52 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
55 /* fraction handling */
58 * f = val + (num / den) + 0.5.
60 * 'num' is normalized so that it is such as 0 <= num < den.
62 * @param f fractional number
63 * @param val integer value
64 * @param num must be >= 0
65 * @param den must be >= 1
67 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
80 * Fractional addition to f: f = f + (incr / f->den).
82 * @param f fractional number
83 * @param incr increment, can be positive or negative
85 static void av_frac_add(AVFrac *f, int64_t incr)
98 } else if (num >= den) {
105 /** head of registered input format linked list */
106 AVInputFormat *first_iformat = NULL;
107 /** head of registered output format linked list */
108 AVOutputFormat *first_oformat = NULL;
110 AVInputFormat *av_iformat_next(AVInputFormat *f)
112 if(f) return f->next;
113 else return first_iformat;
116 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
118 if(f) return f->next;
119 else return first_oformat;
122 void av_register_input_format(AVInputFormat *format)
126 while (*p != NULL) p = &(*p)->next;
131 void av_register_output_format(AVOutputFormat *format)
135 while (*p != NULL) p = &(*p)->next;
140 #if LIBAVFORMAT_VERSION_MAJOR < 53
141 int match_ext(const char *filename, const char *extensions)
143 return av_match_ext(filename, extensions);
147 int av_match_ext(const char *filename, const char *extensions)
155 ext = strrchr(filename, '.');
161 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
164 if (!strcasecmp(ext1, ext))
174 static int match_format(const char *name, const char *names)
182 namelen = strlen(name);
183 while ((p = strchr(names, ','))) {
184 len = FFMAX(p - names, namelen);
185 if (!strncasecmp(name, names, len))
189 return !strcasecmp(name, names);
192 #if LIBAVFORMAT_VERSION_MAJOR < 53
193 AVOutputFormat *guess_format(const char *short_name, const char *filename,
194 const char *mime_type)
196 return av_guess_format(short_name, filename, mime_type);
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
203 AVOutputFormat *fmt, *fmt_found;
204 int score_max, score;
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
214 /* Find the proper file type. */
218 while (fmt != NULL) {
220 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
222 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
224 if (filename && fmt->extensions &&
225 av_match_ext(filename, fmt->extensions)) {
228 if (score > score_max) {
237 #if LIBAVFORMAT_VERSION_MAJOR < 53
238 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
239 const char *mime_type)
241 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
244 AVOutputFormat *stream_fmt;
245 char stream_format_name[64];
247 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
248 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
258 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
259 const char *filename, const char *mime_type, enum CodecType type){
260 if(type == CODEC_TYPE_VIDEO){
261 enum CodecID codec_id= CODEC_ID_NONE;
263 #if CONFIG_IMAGE2_MUXER
264 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
265 codec_id= av_guess_image2_codec(filename);
268 if(codec_id == CODEC_ID_NONE)
269 codec_id= fmt->video_codec;
271 }else if(type == CODEC_TYPE_AUDIO)
272 return fmt->audio_codec;
274 return CODEC_ID_NONE;
277 AVInputFormat *av_find_input_format(const char *short_name)
280 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
281 if (match_format(short_name, fmt->name))
287 /* memory handling */
290 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
292 int ret= av_new_packet(pkt, size);
297 pkt->pos= url_ftell(s);
299 ret= get_buffer(s, pkt->data, size);
303 av_shrink_packet(pkt, ret);
309 int av_filename_number_test(const char *filename)
312 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
315 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
317 AVInputFormat *fmt1, *fmt;
321 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
322 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
325 if (fmt1->read_probe) {
326 score = fmt1->read_probe(pd);
327 } else if (fmt1->extensions) {
328 if (av_match_ext(pd->filename, fmt1->extensions)) {
332 if (score > *score_max) {
335 }else if (score == *score_max)
341 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
343 return av_probe_input_format2(pd, is_opened, &score);
346 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
349 fmt = av_probe_input_format2(pd, 1, &score);
352 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
353 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
354 if (!strcmp(fmt->name, "mp3")) {
355 st->codec->codec_id = CODEC_ID_MP3;
356 st->codec->codec_type = CODEC_TYPE_AUDIO;
357 } else if (!strcmp(fmt->name, "ac3")) {
358 st->codec->codec_id = CODEC_ID_AC3;
359 st->codec->codec_type = CODEC_TYPE_AUDIO;
360 } else if (!strcmp(fmt->name, "eac3")) {
361 st->codec->codec_id = CODEC_ID_EAC3;
362 st->codec->codec_type = CODEC_TYPE_AUDIO;
363 } else if (!strcmp(fmt->name, "mpegvideo")) {
364 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
365 st->codec->codec_type = CODEC_TYPE_VIDEO;
366 } else if (!strcmp(fmt->name, "m4v")) {
367 st->codec->codec_id = CODEC_ID_MPEG4;
368 st->codec->codec_type = CODEC_TYPE_VIDEO;
369 } else if (!strcmp(fmt->name, "h264")) {
370 st->codec->codec_id = CODEC_ID_H264;
371 st->codec->codec_type = CODEC_TYPE_VIDEO;
372 } else if (!strcmp(fmt->name, "dts")) {
373 st->codec->codec_id = CODEC_ID_DTS;
374 st->codec->codec_type = CODEC_TYPE_AUDIO;
380 /************************************************************/
381 /* input media file */
384 * Open a media file from an IO stream. 'fmt' must be specified.
386 int av_open_input_stream(AVFormatContext **ic_ptr,
387 ByteIOContext *pb, const char *filename,
388 AVInputFormat *fmt, AVFormatParameters *ap)
392 AVFormatParameters default_ap;
396 memset(ap, 0, sizeof(default_ap));
399 if(!ap->prealloced_context)
400 ic = avformat_alloc_context();
404 err = AVERROR(ENOMEM);
409 ic->duration = AV_NOPTS_VALUE;
410 ic->start_time = AV_NOPTS_VALUE;
411 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
413 /* allocate private data */
414 if (fmt->priv_data_size > 0) {
415 ic->priv_data = av_mallocz(fmt->priv_data_size);
416 if (!ic->priv_data) {
417 err = AVERROR(ENOMEM);
421 ic->priv_data = NULL;
424 if (ic->iformat->read_header) {
425 err = ic->iformat->read_header(ic, ap);
430 if (pb && !ic->data_offset)
431 ic->data_offset = url_ftell(ic->pb);
433 #if LIBAVFORMAT_VERSION_MAJOR < 53
434 ff_metadata_demux_compat(ic);
437 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
444 av_freep(&ic->priv_data);
445 for(i=0;i<ic->nb_streams;i++) {
446 AVStream *st = ic->streams[i];
448 av_free(st->priv_data);
449 av_free(st->codec->extradata);
459 /** size of probe buffer, for guessing file type from file contents */
460 #define PROBE_BUF_MIN 2048
461 #define PROBE_BUF_MAX (1<<20)
463 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
466 AVFormatParameters *ap)
469 AVProbeData probe_data, *pd = &probe_data;
470 ByteIOContext *pb = NULL;
471 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
475 pd->filename = filename;
480 /* guess format if no file can be opened */
481 fmt = av_probe_input_format(pd, 0);
484 /* Do not open file if the format does not need it. XXX: specific
485 hack needed to handle RTSP/TCP */
486 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
487 /* if no file needed do not try to open one */
488 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
492 url_setbufsize(pb, buf_size);
495 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
496 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
497 /* read probe data */
498 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
499 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
501 if ((int)pd->buf_size < 0) {
506 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
507 if (url_fseek(pb, 0, SEEK_SET) < 0) {
509 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
515 /* guess file format */
516 fmt = av_probe_input_format2(pd, 1, &score);
518 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
519 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
521 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
527 /* if still no format found, error */
533 /* check filename in case an image number is expected */
534 if (fmt->flags & AVFMT_NEEDNUMBER) {
535 if (!av_filename_number_test(filename)) {
536 err = AVERROR_NUMEXPECTED;
540 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
548 if (ap && ap->prealloced_context)
555 /*******************************************************/
557 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
558 AVPacketList **plast_pktl){
559 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
564 (*plast_pktl)->next = pktl;
566 *packet_buffer = pktl;
568 /* add the packet in the buffered packet list */
574 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
580 AVPacketList *pktl = s->raw_packet_buffer;
584 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
585 !s->streams[pkt->stream_index]->probe_packets ||
586 s->raw_packet_buffer_remaining_size < pkt->size){
587 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
590 s->raw_packet_buffer = pktl->next;
591 s->raw_packet_buffer_remaining_size += pkt->size;
598 ret= s->iformat->read_packet(s, pkt);
600 if (!pktl || ret == AVERROR(EAGAIN))
602 for (i = 0; i < s->nb_streams; i++)
603 s->streams[i]->probe_packets = 0;
606 st= s->streams[pkt->stream_index];
608 switch(st->codec->codec_type){
609 case CODEC_TYPE_VIDEO:
610 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
612 case CODEC_TYPE_AUDIO:
613 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
615 case CODEC_TYPE_SUBTITLE:
616 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
620 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
624 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
625 s->raw_packet_buffer_remaining_size -= pkt->size;
627 if(st->codec->codec_id == CODEC_ID_PROBE){
628 AVProbeData *pd = &st->probe_data;
629 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
632 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
633 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
634 pd->buf_size += pkt->size;
635 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
637 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
638 set_codec_from_probe_data(s, st, pd, 1);
639 if(st->codec->codec_id != CODEC_ID_PROBE){
642 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
649 /**********************************************************/
652 * Get the number of samples of an audio frame. Return -1 on error.
654 static int get_audio_frame_size(AVCodecContext *enc, int size)
658 if(enc->codec_id == CODEC_ID_VORBIS)
661 if (enc->frame_size <= 1) {
662 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
664 if (bits_per_sample) {
665 if (enc->channels == 0)
667 frame_size = (size << 3) / (bits_per_sample * enc->channels);
669 /* used for example by ADPCM codecs */
670 if (enc->bit_rate == 0)
672 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
675 frame_size = enc->frame_size;
682 * Return the frame duration in seconds. Return 0 if not available.
684 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
685 AVCodecParserContext *pc, AVPacket *pkt)
691 switch(st->codec->codec_type) {
692 case CODEC_TYPE_VIDEO:
693 if(st->time_base.num*1000LL > st->time_base.den){
694 *pnum = st->time_base.num;
695 *pden = st->time_base.den;
696 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
697 *pnum = st->codec->time_base.num;
698 *pden = st->codec->time_base.den;
699 if (pc && pc->repeat_pict) {
700 *pnum = (*pnum) * (1 + pc->repeat_pict);
704 case CODEC_TYPE_AUDIO:
705 frame_size = get_audio_frame_size(st->codec, pkt->size);
709 *pden = st->codec->sample_rate;
716 static int is_intra_only(AVCodecContext *enc){
717 if(enc->codec_type == CODEC_TYPE_AUDIO){
719 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
720 switch(enc->codec_id){
722 case CODEC_ID_MJPEGB:
724 case CODEC_ID_RAWVIDEO:
725 case CODEC_ID_DVVIDEO:
726 case CODEC_ID_HUFFYUV:
727 case CODEC_ID_FFVHUFF:
732 case CODEC_ID_JPEG2000:
740 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
741 int64_t dts, int64_t pts)
743 AVStream *st= s->streams[stream_index];
744 AVPacketList *pktl= s->packet_buffer;
746 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
749 st->first_dts= dts - st->cur_dts;
752 for(; pktl; pktl= pktl->next){
753 if(pktl->pkt.stream_index != stream_index)
755 //FIXME think more about this check
756 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
757 pktl->pkt.pts += st->first_dts;
759 if(pktl->pkt.dts != AV_NOPTS_VALUE)
760 pktl->pkt.dts += st->first_dts;
762 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
763 st->start_time= pktl->pkt.pts;
765 if (st->start_time == AV_NOPTS_VALUE)
766 st->start_time = pts;
769 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
771 AVPacketList *pktl= s->packet_buffer;
774 if(st->first_dts != AV_NOPTS_VALUE){
775 cur_dts= st->first_dts;
776 for(; pktl; pktl= pktl->next){
777 if(pktl->pkt.stream_index == pkt->stream_index){
778 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
780 cur_dts -= pkt->duration;
783 pktl= s->packet_buffer;
784 st->first_dts = cur_dts;
785 }else if(st->cur_dts)
788 for(; pktl; pktl= pktl->next){
789 if(pktl->pkt.stream_index != pkt->stream_index)
791 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
792 && !pktl->pkt.duration){
793 pktl->pkt.dts= cur_dts;
794 if(!st->codec->has_b_frames)
795 pktl->pkt.pts= cur_dts;
796 cur_dts += pkt->duration;
797 pktl->pkt.duration= pkt->duration;
801 if(st->first_dts == AV_NOPTS_VALUE)
802 st->cur_dts= cur_dts;
805 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
806 AVCodecParserContext *pc, AVPacket *pkt)
808 int num, den, presentation_delayed, delay, i;
811 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
812 pkt->dts= AV_NOPTS_VALUE;
814 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
815 //FIXME Set low_delay = 0 when has_b_frames = 1
816 st->codec->has_b_frames = 1;
818 /* do we have a video B-frame ? */
819 delay= st->codec->has_b_frames;
820 presentation_delayed = 0;
821 /* XXX: need has_b_frame, but cannot get it if the codec is
824 pc && pc->pict_type != FF_B_TYPE)
825 presentation_delayed = 1;
827 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
828 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
829 pkt->dts -= 1LL<<st->pts_wrap_bits;
832 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
833 // we take the conservative approach and discard both
834 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
835 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
836 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
837 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
840 if (pkt->duration == 0) {
841 compute_frame_duration(&num, &den, st, pc, pkt);
843 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
845 if(pkt->duration != 0 && s->packet_buffer)
846 update_initial_durations(s, st, pkt);
850 /* correct timestamps with byte offset if demuxers only have timestamps
851 on packet boundaries */
852 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
853 /* this will estimate bitrate based on this frame's duration and size */
854 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
855 if(pkt->pts != AV_NOPTS_VALUE)
857 if(pkt->dts != AV_NOPTS_VALUE)
861 if (pc && pc->dts_sync_point >= 0) {
862 // we have synchronization info from the parser
863 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
865 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
866 if (pkt->dts != AV_NOPTS_VALUE) {
867 // got DTS from the stream, update reference timestamp
868 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
869 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
870 } else if (st->reference_dts != AV_NOPTS_VALUE) {
871 // compute DTS based on reference timestamp
872 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
873 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
875 if (pc->dts_sync_point > 0)
876 st->reference_dts = pkt->dts; // new reference
880 /* This may be redundant, but it should not hurt. */
881 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
882 presentation_delayed = 1;
884 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
885 /* interpolate PTS and DTS if they are not present */
886 //We skip H264 currently because delay and has_b_frames are not reliably set
887 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
888 if (presentation_delayed) {
889 /* DTS = decompression timestamp */
890 /* PTS = presentation timestamp */
891 if (pkt->dts == AV_NOPTS_VALUE)
892 pkt->dts = st->last_IP_pts;
893 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
894 if (pkt->dts == AV_NOPTS_VALUE)
895 pkt->dts = st->cur_dts;
897 /* this is tricky: the dts must be incremented by the duration
898 of the frame we are displaying, i.e. the last I- or P-frame */
899 if (st->last_IP_duration == 0)
900 st->last_IP_duration = pkt->duration;
901 if(pkt->dts != AV_NOPTS_VALUE)
902 st->cur_dts = pkt->dts + st->last_IP_duration;
903 st->last_IP_duration = pkt->duration;
904 st->last_IP_pts= pkt->pts;
905 /* cannot compute PTS if not present (we can compute it only
906 by knowing the future */
907 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
908 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
909 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
910 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
911 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
912 pkt->pts += pkt->duration;
913 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
917 /* presentation is not delayed : PTS and DTS are the same */
918 if(pkt->pts == AV_NOPTS_VALUE)
920 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
921 if(pkt->pts == AV_NOPTS_VALUE)
922 pkt->pts = st->cur_dts;
924 if(pkt->pts != AV_NOPTS_VALUE)
925 st->cur_dts = pkt->pts + pkt->duration;
929 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
930 st->pts_buffer[0]= pkt->pts;
931 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
932 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
933 if(pkt->dts == AV_NOPTS_VALUE)
934 pkt->dts= st->pts_buffer[0];
935 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
936 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
938 if(pkt->dts > st->cur_dts)
939 st->cur_dts = pkt->dts;
942 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
945 if(is_intra_only(st->codec))
946 pkt->flags |= PKT_FLAG_KEY;
949 /* keyframe computation */
950 if (pc->key_frame == 1)
951 pkt->flags |= PKT_FLAG_KEY;
952 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
953 pkt->flags |= PKT_FLAG_KEY;
956 pkt->convergence_duration = pc->convergence_duration;
960 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
968 /* select current input stream component */
971 if (!st->need_parsing || !st->parser) {
972 /* no parsing needed: we just output the packet as is */
973 /* raw data support */
974 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
975 compute_pkt_fields(s, st, NULL, pkt);
977 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
978 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
979 ff_reduce_index(s, st->index);
980 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
983 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
984 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
985 st->cur_ptr, st->cur_len,
986 st->cur_pkt.pts, st->cur_pkt.dts,
988 st->cur_pkt.pts = AV_NOPTS_VALUE;
989 st->cur_pkt.dts = AV_NOPTS_VALUE;
990 /* increment read pointer */
994 /* return packet if any */
998 pkt->stream_index = st->index;
999 pkt->pts = st->parser->pts;
1000 pkt->dts = st->parser->dts;
1001 pkt->pos = st->parser->pos;
1002 pkt->destruct = NULL;
1003 compute_pkt_fields(s, st, st->parser, pkt);
1005 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
1006 ff_reduce_index(s, st->index);
1007 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1008 0, 0, AVINDEX_KEYFRAME);
1015 av_free_packet(&st->cur_pkt);
1020 /* read next packet */
1021 ret = av_read_packet(s, &cur_pkt);
1023 if (ret == AVERROR(EAGAIN))
1025 /* return the last frames, if any */
1026 for(i = 0; i < s->nb_streams; i++) {
1028 if (st->parser && st->need_parsing) {
1029 av_parser_parse2(st->parser, st->codec,
1030 &pkt->data, &pkt->size,
1032 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1038 /* no more packets: really terminate parsing */
1041 st = s->streams[cur_pkt.stream_index];
1042 st->cur_pkt= cur_pkt;
1044 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1045 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1046 st->cur_pkt.pts < st->cur_pkt.dts){
1047 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1048 st->cur_pkt.stream_index,
1052 // av_free_packet(&st->cur_pkt);
1056 if(s->debug & FF_FDEBUG_TS)
1057 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1058 st->cur_pkt.stream_index,
1062 st->cur_pkt.duration,
1066 st->cur_ptr = st->cur_pkt.data;
1067 st->cur_len = st->cur_pkt.size;
1068 if (st->need_parsing && !st->parser) {
1069 st->parser = av_parser_init(st->codec->codec_id);
1071 /* no parser available: just output the raw packets */
1072 st->need_parsing = AVSTREAM_PARSE_NONE;
1073 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1074 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1076 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1077 st->parser->next_frame_offset=
1078 st->parser->cur_offset= st->cur_pkt.pos;
1083 if(s->debug & FF_FDEBUG_TS)
1084 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1095 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1099 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1102 pktl = s->packet_buffer;
1104 AVPacket *next_pkt= &pktl->pkt;
1106 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1107 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1108 if( pktl->pkt.stream_index == next_pkt->stream_index
1109 && next_pkt->dts < pktl->pkt.dts
1110 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1111 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1112 next_pkt->pts= pktl->pkt.dts;
1116 pktl = s->packet_buffer;
1119 if( next_pkt->pts != AV_NOPTS_VALUE
1120 || next_pkt->dts == AV_NOPTS_VALUE
1122 /* read packet from packet buffer, if there is data */
1124 s->packet_buffer = pktl->next;
1130 int ret= av_read_frame_internal(s, pkt);
1132 if(pktl && ret != AVERROR(EAGAIN)){
1139 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1140 &s->packet_buffer_end)) < 0)
1141 return AVERROR(ENOMEM);
1143 assert(!s->packet_buffer);
1144 return av_read_frame_internal(s, pkt);
1149 /* XXX: suppress the packet queue */
1150 static void flush_packet_queue(AVFormatContext *s)
1155 pktl = s->packet_buffer;
1158 s->packet_buffer = pktl->next;
1159 av_free_packet(&pktl->pkt);
1162 while(s->raw_packet_buffer){
1163 pktl = s->raw_packet_buffer;
1164 s->raw_packet_buffer = pktl->next;
1165 av_free_packet(&pktl->pkt);
1168 s->packet_buffer_end=
1169 s->raw_packet_buffer_end= NULL;
1170 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1173 /*******************************************************/
1176 int av_find_default_stream_index(AVFormatContext *s)
1178 int first_audio_index = -1;
1182 if (s->nb_streams <= 0)
1184 for(i = 0; i < s->nb_streams; i++) {
1186 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1189 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1190 first_audio_index = i;
1192 return first_audio_index >= 0 ? first_audio_index : 0;
1196 * Flush the frame reader.
1198 void av_read_frame_flush(AVFormatContext *s)
1203 flush_packet_queue(s);
1207 /* for each stream, reset read state */
1208 for(i = 0; i < s->nb_streams; i++) {
1212 av_parser_close(st->parser);
1214 av_free_packet(&st->cur_pkt);
1216 st->last_IP_pts = AV_NOPTS_VALUE;
1217 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1218 st->reference_dts = AV_NOPTS_VALUE;
1223 st->probe_packets = MAX_PROBE_PACKETS;
1225 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1226 st->pts_buffer[j]= AV_NOPTS_VALUE;
1230 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1233 for(i = 0; i < s->nb_streams; i++) {
1234 AVStream *st = s->streams[i];
1236 st->cur_dts = av_rescale(timestamp,
1237 st->time_base.den * (int64_t)ref_st->time_base.num,
1238 st->time_base.num * (int64_t)ref_st->time_base.den);
1242 void ff_reduce_index(AVFormatContext *s, int stream_index)
1244 AVStream *st= s->streams[stream_index];
1245 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1247 if((unsigned)st->nb_index_entries >= max_entries){
1249 for(i=0; 2*i<st->nb_index_entries; i++)
1250 st->index_entries[i]= st->index_entries[2*i];
1251 st->nb_index_entries= i;
1255 int av_add_index_entry(AVStream *st,
1256 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1258 AVIndexEntry *entries, *ie;
1261 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1264 entries = av_fast_realloc(st->index_entries,
1265 &st->index_entries_allocated_size,
1266 (st->nb_index_entries + 1) *
1267 sizeof(AVIndexEntry));
1271 st->index_entries= entries;
1273 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1276 index= st->nb_index_entries++;
1277 ie= &entries[index];
1278 assert(index==0 || ie[-1].timestamp < timestamp);
1280 ie= &entries[index];
1281 if(ie->timestamp != timestamp){
1282 if(ie->timestamp <= timestamp)
1284 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1285 st->nb_index_entries++;
1286 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1287 distance= ie->min_distance;
1291 ie->timestamp = timestamp;
1292 ie->min_distance= distance;
1299 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1302 AVIndexEntry *entries= st->index_entries;
1303 int nb_entries= st->nb_index_entries;
1312 timestamp = entries[m].timestamp;
1313 if(timestamp >= wanted_timestamp)
1315 if(timestamp <= wanted_timestamp)
1318 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1320 if(!(flags & AVSEEK_FLAG_ANY)){
1321 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1322 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1333 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1334 AVInputFormat *avif= s->iformat;
1335 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1336 int64_t ts_min, ts_max, ts;
1341 if (stream_index < 0)
1345 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1349 ts_min= AV_NOPTS_VALUE;
1350 pos_limit= -1; //gcc falsely says it may be uninitialized
1352 st= s->streams[stream_index];
1353 if(st->index_entries){
1356 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1357 index= FFMAX(index, 0);
1358 e= &st->index_entries[index];
1360 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1362 ts_min= e->timestamp;
1364 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1371 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1372 assert(index < st->nb_index_entries);
1374 e= &st->index_entries[index];
1375 assert(e->timestamp >= target_ts);
1377 ts_max= e->timestamp;
1378 pos_limit= pos_max - e->min_distance;
1380 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1381 pos_max,pos_limit, ts_max);
1386 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1391 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1394 av_update_cur_dts(s, st, ts);
1399 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1401 int64_t start_pos, filesize;
1405 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1408 if(ts_min == AV_NOPTS_VALUE){
1409 pos_min = s->data_offset;
1410 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1411 if (ts_min == AV_NOPTS_VALUE)
1415 if(ts_max == AV_NOPTS_VALUE){
1417 filesize = url_fsize(s->pb);
1418 pos_max = filesize - 1;
1421 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1423 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1424 if (ts_max == AV_NOPTS_VALUE)
1428 int64_t tmp_pos= pos_max + 1;
1429 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1430 if(tmp_ts == AV_NOPTS_VALUE)
1434 if(tmp_pos >= filesize)
1440 if(ts_min > ts_max){
1442 }else if(ts_min == ts_max){
1447 while (pos_min < pos_limit) {
1449 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1453 assert(pos_limit <= pos_max);
1456 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1457 // interpolate position (better than dichotomy)
1458 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1459 + pos_min - approximate_keyframe_distance;
1460 }else if(no_change==1){
1461 // bisection, if interpolation failed to change min or max pos last time
1462 pos = (pos_min + pos_limit)>>1;
1464 /* linear search if bisection failed, can only happen if there
1465 are very few or no keyframes between min/max */
1470 else if(pos > pos_limit)
1474 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1480 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1481 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1482 start_pos, no_change);
1484 if(ts == AV_NOPTS_VALUE){
1485 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1488 assert(ts != AV_NOPTS_VALUE);
1489 if (target_ts <= ts) {
1490 pos_limit = start_pos - 1;
1494 if (target_ts >= ts) {
1500 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1501 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1504 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1506 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1507 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1508 pos, ts_min, target_ts, ts_max);
1514 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1515 int64_t pos_min, pos_max;
1519 if (stream_index < 0)
1522 st= s->streams[stream_index];
1525 pos_min = s->data_offset;
1526 pos_max = url_fsize(s->pb) - 1;
1528 if (pos < pos_min) pos= pos_min;
1529 else if(pos > pos_max) pos= pos_max;
1531 url_fseek(s->pb, pos, SEEK_SET);
1534 av_update_cur_dts(s, st, ts);
1539 static int av_seek_frame_generic(AVFormatContext *s,
1540 int stream_index, int64_t timestamp, int flags)
1547 st = s->streams[stream_index];
1549 index = av_index_search_timestamp(st, timestamp, flags);
1551 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1554 if(index < 0 || index==st->nb_index_entries-1){
1558 if(st->nb_index_entries){
1559 assert(st->index_entries);
1560 ie= &st->index_entries[st->nb_index_entries-1];
1561 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1563 av_update_cur_dts(s, st, ie->timestamp);
1565 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1571 ret = av_read_frame(s, &pkt);
1572 }while(ret == AVERROR(EAGAIN));
1575 av_free_packet(&pkt);
1576 if(stream_index == pkt.stream_index){
1577 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1581 index = av_index_search_timestamp(st, timestamp, flags);
1586 av_read_frame_flush(s);
1587 if (s->iformat->read_seek){
1588 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1591 ie = &st->index_entries[index];
1592 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1594 av_update_cur_dts(s, st, ie->timestamp);
1599 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1604 av_read_frame_flush(s);
1606 if(flags & AVSEEK_FLAG_BYTE)
1607 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1609 if(stream_index < 0){
1610 stream_index= av_find_default_stream_index(s);
1611 if(stream_index < 0)
1614 st= s->streams[stream_index];
1615 /* timestamp for default must be expressed in AV_TIME_BASE units */
1616 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1619 /* first, we try the format specific seek */
1620 if (s->iformat->read_seek)
1621 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1628 if(s->iformat->read_timestamp)
1629 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1631 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1634 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1636 if(min_ts > ts || max_ts < ts)
1639 av_read_frame_flush(s);
1641 if (s->iformat->read_seek2)
1642 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1644 if(s->iformat->read_timestamp){
1645 //try to seek via read_timestamp()
1648 //Fallback to old API if new is not implemented but old is
1649 //Note the old has somewat different sematics
1650 if(s->iformat->read_seek || 1)
1651 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1653 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1656 /*******************************************************/
1659 * Returns TRUE if the stream has accurate duration in any stream.
1661 * @return TRUE if the stream has accurate duration for at least one component.
1663 static int av_has_duration(AVFormatContext *ic)
1668 for(i = 0;i < ic->nb_streams; i++) {
1669 st = ic->streams[i];
1670 if (st->duration != AV_NOPTS_VALUE)
1677 * Estimate the stream timings from the one of each components.
1679 * Also computes the global bitrate if possible.
1681 static void av_update_stream_timings(AVFormatContext *ic)
1683 int64_t start_time, start_time1, end_time, end_time1;
1684 int64_t duration, duration1;
1688 start_time = INT64_MAX;
1689 end_time = INT64_MIN;
1690 duration = INT64_MIN;
1691 for(i = 0;i < ic->nb_streams; i++) {
1692 st = ic->streams[i];
1693 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1694 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1695 if (start_time1 < start_time)
1696 start_time = start_time1;
1697 if (st->duration != AV_NOPTS_VALUE) {
1698 end_time1 = start_time1
1699 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1700 if (end_time1 > end_time)
1701 end_time = end_time1;
1704 if (st->duration != AV_NOPTS_VALUE) {
1705 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1706 if (duration1 > duration)
1707 duration = duration1;
1710 if (start_time != INT64_MAX) {
1711 ic->start_time = start_time;
1712 if (end_time != INT64_MIN) {
1713 if (end_time - start_time > duration)
1714 duration = end_time - start_time;
1717 if (duration != INT64_MIN) {
1718 ic->duration = duration;
1719 if (ic->file_size > 0) {
1720 /* compute the bitrate */
1721 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1722 (double)ic->duration;
1727 static void fill_all_stream_timings(AVFormatContext *ic)
1732 av_update_stream_timings(ic);
1733 for(i = 0;i < ic->nb_streams; i++) {
1734 st = ic->streams[i];
1735 if (st->start_time == AV_NOPTS_VALUE) {
1736 if(ic->start_time != AV_NOPTS_VALUE)
1737 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1738 if(ic->duration != AV_NOPTS_VALUE)
1739 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1744 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1746 int64_t filesize, duration;
1750 /* if bit_rate is already set, we believe it */
1751 if (ic->bit_rate == 0) {
1753 for(i=0;i<ic->nb_streams;i++) {
1754 st = ic->streams[i];
1755 bit_rate += st->codec->bit_rate;
1757 ic->bit_rate = bit_rate;
1760 /* if duration is already set, we believe it */
1761 if (ic->duration == AV_NOPTS_VALUE &&
1762 ic->bit_rate != 0 &&
1763 ic->file_size != 0) {
1764 filesize = ic->file_size;
1766 for(i = 0; i < ic->nb_streams; i++) {
1767 st = ic->streams[i];
1768 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1769 if (st->duration == AV_NOPTS_VALUE)
1770 st->duration = duration;
1776 #define DURATION_MAX_READ_SIZE 250000
1777 #define DURATION_MAX_RETRY 3
1779 /* only usable for MPEG-PS streams */
1780 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1782 AVPacket pkt1, *pkt = &pkt1;
1784 int read_size, i, ret;
1785 int64_t end_time, start_time[MAX_STREAMS];
1786 int64_t filesize, offset, duration;
1791 /* flush packet queue */
1792 flush_packet_queue(ic);
1794 for(i=0;i<ic->nb_streams;i++) {
1795 st = ic->streams[i];
1796 if(st->start_time != AV_NOPTS_VALUE){
1797 start_time[i]= st->start_time;
1798 }else if(st->first_dts != AV_NOPTS_VALUE){
1799 start_time[i]= st->first_dts;
1801 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1804 av_parser_close(st->parser);
1806 av_free_packet(&st->cur_pkt);
1810 /* estimate the end time (duration) */
1811 /* XXX: may need to support wrapping */
1812 filesize = ic->file_size;
1813 end_time = AV_NOPTS_VALUE;
1815 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1819 url_fseek(ic->pb, offset, SEEK_SET);
1822 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1826 ret = av_read_packet(ic, pkt);
1827 }while(ret == AVERROR(EAGAIN));
1830 read_size += pkt->size;
1831 st = ic->streams[pkt->stream_index];
1832 if (pkt->pts != AV_NOPTS_VALUE &&
1833 start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
1834 end_time = pkt->pts;
1835 duration = end_time - start_time[pkt->stream_index];
1837 duration += 1LL<<st->pts_wrap_bits;
1839 if (st->duration == AV_NOPTS_VALUE ||
1840 st->duration < duration)
1841 st->duration = duration;
1844 av_free_packet(pkt);
1846 }while( end_time==AV_NOPTS_VALUE
1847 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1848 && ++retry <= DURATION_MAX_RETRY);
1850 fill_all_stream_timings(ic);
1852 url_fseek(ic->pb, old_offset, SEEK_SET);
1853 for(i=0; i<ic->nb_streams; i++){
1855 st->cur_dts= st->first_dts;
1856 st->last_IP_pts = AV_NOPTS_VALUE;
1860 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1864 /* get the file size, if possible */
1865 if (ic->iformat->flags & AVFMT_NOFILE) {
1868 file_size = url_fsize(ic->pb);
1872 ic->file_size = file_size;
1874 if ((!strcmp(ic->iformat->name, "mpeg") ||
1875 !strcmp(ic->iformat->name, "mpegts")) &&
1876 file_size && !url_is_streamed(ic->pb)) {
1877 /* get accurate estimate from the PTSes */
1878 av_estimate_timings_from_pts(ic, old_offset);
1879 } else if (av_has_duration(ic)) {
1880 /* at least one component has timings - we use them for all
1882 fill_all_stream_timings(ic);
1884 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1885 /* less precise: use bitrate info */
1886 av_estimate_timings_from_bit_rate(ic);
1888 av_update_stream_timings(ic);
1894 for(i = 0;i < ic->nb_streams; i++) {
1895 st = ic->streams[i];
1896 printf("%d: start_time: %0.3f duration: %0.3f\n",
1897 i, (double)st->start_time / AV_TIME_BASE,
1898 (double)st->duration / AV_TIME_BASE);
1900 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1901 (double)ic->start_time / AV_TIME_BASE,
1902 (double)ic->duration / AV_TIME_BASE,
1903 ic->bit_rate / 1000);
1908 static int has_codec_parameters(AVCodecContext *enc)
1911 switch(enc->codec_type) {
1912 case CODEC_TYPE_AUDIO:
1913 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1914 if(!enc->frame_size &&
1915 (enc->codec_id == CODEC_ID_VORBIS ||
1916 enc->codec_id == CODEC_ID_AAC ||
1917 enc->codec_id == CODEC_ID_MP3 ||
1918 enc->codec_id == CODEC_ID_SPEEX))
1921 case CODEC_TYPE_VIDEO:
1922 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1928 return enc->codec_id != CODEC_ID_NONE && val != 0;
1931 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1935 int got_picture, data_size, ret=0;
1938 if(!st->codec->codec){
1939 codec = avcodec_find_decoder(st->codec->codec_id);
1942 ret = avcodec_open(st->codec, codec);
1947 if(!has_codec_parameters(st->codec)){
1948 switch(st->codec->codec_type) {
1949 case CODEC_TYPE_VIDEO:
1950 avcodec_get_frame_defaults(&picture);
1951 ret = avcodec_decode_video2(st->codec, &picture,
1952 &got_picture, avpkt);
1954 case CODEC_TYPE_AUDIO:
1955 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1956 samples = av_malloc(data_size);
1959 ret = avcodec_decode_audio3(st->codec, samples,
1971 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1973 while (tags->id != CODEC_ID_NONE) {
1981 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1984 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1985 if(tag == tags[i].tag)
1988 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1989 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1990 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1991 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1992 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1995 return CODEC_ID_NONE;
1998 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2001 for(i=0; tags && tags[i]; i++){
2002 int tag= ff_codec_get_tag(tags[i], id);
2008 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2011 for(i=0; tags && tags[i]; i++){
2012 enum CodecID id= ff_codec_get_id(tags[i], tag);
2013 if(id!=CODEC_ID_NONE) return id;
2015 return CODEC_ID_NONE;
2018 static void compute_chapters_end(AVFormatContext *s)
2022 for (i=0; i+1<s->nb_chapters; i++)
2023 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2024 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2025 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2026 s->chapters[i]->end = s->chapters[i+1]->start;
2029 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2030 assert(s->start_time != AV_NOPTS_VALUE);
2031 assert(s->duration > 0);
2032 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2034 s->chapters[i]->time_base);
2038 #define MAX_STD_TIMEBASES (60*12+5)
2039 static int get_std_framerate(int i){
2040 if(i<60*12) return i*1001;
2041 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2045 * Is the time base unreliable.
2046 * This is a heuristic to balance between quick acceptance of the values in
2047 * the headers vs. some extra checks.
2048 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2049 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2050 * And there are "variable" fps files this needs to detect as well.
2052 static int tb_unreliable(AVCodecContext *c){
2053 if( c->time_base.den >= 101L*c->time_base.num
2054 || c->time_base.den < 5L*c->time_base.num
2055 /* || c->codec_tag == AV_RL32("DIVX")
2056 || c->codec_tag == AV_RL32("XVID")*/
2057 || c->codec_id == CODEC_ID_MPEG2VIDEO
2058 || c->codec_id == CODEC_ID_H264
2064 int av_find_stream_info(AVFormatContext *ic)
2066 int i, count, ret, read_size, j;
2068 AVPacket pkt1, *pkt;
2069 int64_t last_dts[MAX_STREAMS];
2070 int64_t duration_gcd[MAX_STREAMS]={0};
2071 int duration_count[MAX_STREAMS]={0};
2072 double (*duration_error)[MAX_STD_TIMEBASES];
2073 int64_t old_offset = url_ftell(ic->pb);
2074 int64_t codec_info_duration[MAX_STREAMS]={0};
2075 int codec_info_nb_frames[MAX_STREAMS]={0};
2077 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2078 if (!duration_error) return AVERROR(ENOMEM);
2080 for(i=0;i<ic->nb_streams;i++) {
2081 st = ic->streams[i];
2082 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2083 /* if(!st->time_base.num)
2085 if(!st->codec->time_base.num)
2086 st->codec->time_base= st->time_base;
2088 //only for the split stuff
2090 st->parser = av_parser_init(st->codec->codec_id);
2091 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2092 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2095 assert(!st->codec->codec);
2096 //try to just open decoders, in case this is enough to get parameters
2097 if(!has_codec_parameters(st->codec)){
2098 AVCodec *codec = avcodec_find_decoder(st->codec->codec_id);
2100 avcodec_open(st->codec, codec);
2104 for(i=0;i<MAX_STREAMS;i++){
2105 last_dts[i]= AV_NOPTS_VALUE;
2111 if(url_interrupt_cb()){
2112 ret= AVERROR(EINTR);
2113 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2117 /* check if one codec still needs to be handled */
2118 for(i=0;i<ic->nb_streams;i++) {
2119 st = ic->streams[i];
2120 if (!has_codec_parameters(st->codec))
2122 /* variable fps and no guess at the real fps */
2123 if( tb_unreliable(st->codec)
2124 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2126 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2128 if(st->first_dts == AV_NOPTS_VALUE)
2131 if (i == ic->nb_streams) {
2132 /* NOTE: if the format has no header, then we need to read
2133 some packets to get most of the streams, so we cannot
2135 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2136 /* if we found the info for all the codecs, we can stop */
2138 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2142 /* we did not get all the codec info, but we read too much data */
2143 if (read_size >= ic->probesize) {
2145 av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2149 /* NOTE: a new stream can be added there if no header in file
2150 (AVFMTCTX_NOHEADER) */
2151 ret = av_read_frame_internal(ic, &pkt1);
2152 if(ret == AVERROR(EAGAIN))
2156 ret = -1; /* we could not have all the codec parameters before EOF */
2157 for(i=0;i<ic->nb_streams;i++) {
2158 st = ic->streams[i];
2159 if (!has_codec_parameters(st->codec)){
2161 avcodec_string(buf, sizeof(buf), st->codec, 0);
2162 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2170 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2171 if(av_dup_packet(pkt) < 0) {
2172 av_free(duration_error);
2173 return AVERROR(ENOMEM);
2176 read_size += pkt->size;
2178 st = ic->streams[pkt->stream_index];
2179 if(codec_info_nb_frames[st->index]>1) {
2180 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2181 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2184 codec_info_duration[st->index] += pkt->duration;
2186 if (pkt->duration != 0)
2187 codec_info_nb_frames[st->index]++;
2190 int index= pkt->stream_index;
2191 int64_t last= last_dts[index];
2192 int64_t duration= pkt->dts - last;
2194 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2195 double dur= duration * av_q2d(st->time_base);
2197 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2198 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2199 if(duration_count[index] < 2)
2200 memset(duration_error[index], 0, sizeof(*duration_error));
2201 for(i=1; i<MAX_STD_TIMEBASES; i++){
2202 int framerate= get_std_framerate(i);
2203 int ticks= lrintf(dur*framerate/(1001*12));
2204 double error= dur - ticks*1001*12/(double)framerate;
2205 duration_error[index][i] += error*error;
2207 duration_count[index]++;
2208 // ignore the first 4 values, they might have some random jitter
2209 if (duration_count[index] > 3)
2210 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2212 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2213 last_dts[pkt->stream_index]= pkt->dts;
2215 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2216 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2218 st->codec->extradata_size= i;
2219 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2220 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2221 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2225 /* if still no information, we try to open the codec and to
2226 decompress the frame. We try to avoid that in most cases as
2227 it takes longer and uses more memory. For MPEG-4, we need to
2228 decompress for QuickTime. */
2229 if (!has_codec_parameters(st->codec))
2230 try_decode_frame(st, pkt);
2235 // close codecs which were opened in try_decode_frame()
2236 for(i=0;i<ic->nb_streams;i++) {
2237 st = ic->streams[i];
2238 if(st->codec->codec)
2239 avcodec_close(st->codec);
2241 for(i=0;i<ic->nb_streams;i++) {
2242 st = ic->streams[i];
2243 if(codec_info_nb_frames[i]>2 && !st->avg_frame_rate.num)
2244 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2245 (codec_info_nb_frames[i]-2)*(int64_t)st->time_base.den,
2246 codec_info_duration[i] *(int64_t)st->time_base.num, 60000);
2247 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2248 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2249 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2251 // the check for tb_unreliable() is not completely correct, since this is not about handling
2252 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2253 // ipmovie.c produces.
2254 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2255 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2256 if(duration_count[i]
2257 && tb_unreliable(st->codec) /*&&
2258 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2259 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2261 double best_error= 2*av_q2d(st->time_base);
2262 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2264 for(j=1; j<MAX_STD_TIMEBASES; j++){
2265 double error= duration_error[i][j] * get_std_framerate(j);
2266 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2267 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2268 if(error < best_error){
2270 num = get_std_framerate(j);
2273 // do not increase frame rate by more than 1 % in order to match a standard rate.
2274 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2275 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2278 if (!st->r_frame_rate.num){
2279 if( st->codec->time_base.den * (int64_t)st->time_base.num
2280 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2281 st->r_frame_rate.num = st->codec->time_base.den;
2282 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2284 st->r_frame_rate.num = st->time_base.den;
2285 st->r_frame_rate.den = st->time_base.num;
2288 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2289 if(!st->codec->bits_per_coded_sample)
2290 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2294 av_estimate_timings(ic, old_offset);
2296 compute_chapters_end(ic);
2299 /* correct DTS for B-frame streams with no timestamps */
2300 for(i=0;i<ic->nb_streams;i++) {
2301 st = ic->streams[i];
2302 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2304 ppktl = &ic->packet_buffer;
2306 if(ppkt1->stream_index != i)
2308 if(ppkt1->pkt->dts < 0)
2310 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2312 ppkt1->pkt->dts -= delta;
2317 st->cur_dts -= delta;
2323 av_free(duration_error);
2328 /*******************************************************/
2330 int av_read_play(AVFormatContext *s)
2332 if (s->iformat->read_play)
2333 return s->iformat->read_play(s);
2335 return av_url_read_fpause(s->pb, 0);
2336 return AVERROR(ENOSYS);
2339 int av_read_pause(AVFormatContext *s)
2341 if (s->iformat->read_pause)
2342 return s->iformat->read_pause(s);
2344 return av_url_read_fpause(s->pb, 1);
2345 return AVERROR(ENOSYS);
2348 void av_close_input_stream(AVFormatContext *s)
2353 if (s->iformat->read_close)
2354 s->iformat->read_close(s);
2355 for(i=0;i<s->nb_streams;i++) {
2356 /* free all data in a stream component */
2359 av_parser_close(st->parser);
2360 av_free_packet(&st->cur_pkt);
2362 av_metadata_free(&st->metadata);
2363 av_free(st->index_entries);
2364 av_free(st->codec->extradata);
2366 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2367 av_free(st->filename);
2369 av_free(st->priv_data);
2372 for(i=s->nb_programs-1; i>=0; i--) {
2373 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2374 av_freep(&s->programs[i]->provider_name);
2375 av_freep(&s->programs[i]->name);
2377 av_metadata_free(&s->programs[i]->metadata);
2378 av_freep(&s->programs[i]->stream_index);
2379 av_freep(&s->programs[i]);
2381 av_freep(&s->programs);
2382 flush_packet_queue(s);
2383 av_freep(&s->priv_data);
2384 while(s->nb_chapters--) {
2385 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2386 av_free(s->chapters[s->nb_chapters]->title);
2388 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2389 av_free(s->chapters[s->nb_chapters]);
2391 av_freep(&s->chapters);
2392 av_metadata_free(&s->metadata);
2396 void av_close_input_file(AVFormatContext *s)
2398 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2399 av_close_input_stream(s);
2404 AVStream *av_new_stream(AVFormatContext *s, int id)
2409 if (s->nb_streams >= MAX_STREAMS)
2412 st = av_mallocz(sizeof(AVStream));
2416 st->codec= avcodec_alloc_context();
2418 /* no default bitrate if decoding */
2419 st->codec->bit_rate = 0;
2421 st->index = s->nb_streams;
2423 st->start_time = AV_NOPTS_VALUE;
2424 st->duration = AV_NOPTS_VALUE;
2425 /* we set the current DTS to 0 so that formats without any timestamps
2426 but durations get some timestamps, formats with some unknown
2427 timestamps have their first few packets buffered and the
2428 timestamps corrected before they are returned to the user */
2430 st->first_dts = AV_NOPTS_VALUE;
2431 st->probe_packets = MAX_PROBE_PACKETS;
2433 /* default pts setting is MPEG-like */
2434 av_set_pts_info(st, 33, 1, 90000);
2435 st->last_IP_pts = AV_NOPTS_VALUE;
2436 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2437 st->pts_buffer[i]= AV_NOPTS_VALUE;
2438 st->reference_dts = AV_NOPTS_VALUE;
2440 st->sample_aspect_ratio = (AVRational){0,1};
2442 s->streams[s->nb_streams++] = st;
2446 AVProgram *av_new_program(AVFormatContext *ac, int id)
2448 AVProgram *program=NULL;
2452 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2455 for(i=0; i<ac->nb_programs; i++)
2456 if(ac->programs[i]->id == id)
2457 program = ac->programs[i];
2460 program = av_mallocz(sizeof(AVProgram));
2463 dynarray_add(&ac->programs, &ac->nb_programs, program);
2464 program->discard = AVDISCARD_NONE;
2471 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2473 AVChapter *chapter = NULL;
2476 for(i=0; i<s->nb_chapters; i++)
2477 if(s->chapters[i]->id == id)
2478 chapter = s->chapters[i];
2481 chapter= av_mallocz(sizeof(AVChapter));
2484 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2486 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2487 av_free(chapter->title);
2489 av_metadata_set(&chapter->metadata, "title", title);
2491 chapter->time_base= time_base;
2492 chapter->start = start;
2498 /************************************************************/
2499 /* output media file */
2501 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2505 if (s->oformat->priv_data_size > 0) {
2506 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2508 return AVERROR(ENOMEM);
2510 s->priv_data = NULL;
2512 if (s->oformat->set_parameters) {
2513 ret = s->oformat->set_parameters(s, ap);
2520 int av_write_header(AVFormatContext *s)
2525 // some sanity checks
2526 if (s->nb_streams == 0) {
2527 av_log(s, AV_LOG_ERROR, "no streams\n");
2531 for(i=0;i<s->nb_streams;i++) {
2534 switch (st->codec->codec_type) {
2535 case CODEC_TYPE_AUDIO:
2536 if(st->codec->sample_rate<=0){
2537 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2540 if(!st->codec->block_align)
2541 st->codec->block_align = st->codec->channels *
2542 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2544 case CODEC_TYPE_VIDEO:
2545 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2546 av_log(s, AV_LOG_ERROR, "time base not set\n");
2549 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2550 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2553 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2554 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2560 if(s->oformat->codec_tag){
2561 if(st->codec->codec_tag){
2563 //check that tag + id is in the table
2564 //if neither is in the table -> OK
2565 //if tag is in the table with another id -> FAIL
2566 //if id is in the table with another tag -> FAIL unless strict < ?
2568 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2571 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2572 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2573 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2576 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2577 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2579 return AVERROR(ENOMEM);
2582 #if LIBAVFORMAT_VERSION_MAJOR < 53
2583 ff_metadata_mux_compat(s);
2586 /* set muxer identification string */
2587 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2591 if (!(m = av_mallocz(sizeof(AVMetadata))))
2592 return AVERROR(ENOMEM);
2593 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
2594 metadata_conv(&m, s->oformat->metadata_conv, NULL);
2595 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
2596 av_metadata_set2(&s->metadata, t->key, t->value, 0);
2597 av_metadata_free(&m);
2600 if(s->oformat->write_header){
2601 ret = s->oformat->write_header(s);
2606 /* init PTS generation */
2607 for(i=0;i<s->nb_streams;i++) {
2608 int64_t den = AV_NOPTS_VALUE;
2611 switch (st->codec->codec_type) {
2612 case CODEC_TYPE_AUDIO:
2613 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2615 case CODEC_TYPE_VIDEO:
2616 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2621 if (den != AV_NOPTS_VALUE) {
2623 return AVERROR_INVALIDDATA;
2624 av_frac_init(&st->pts, 0, 0, den);
2630 //FIXME merge with compute_pkt_fields
2631 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2632 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2633 int num, den, frame_size, i;
2635 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2637 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2640 /* duration field */
2641 if (pkt->duration == 0) {
2642 compute_frame_duration(&num, &den, st, NULL, pkt);
2644 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2648 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2651 //XXX/FIXME this is a temporary hack until all encoders output pts
2652 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2654 // pkt->pts= st->cur_dts;
2655 pkt->pts= st->pts.val;
2658 //calculate dts from pts
2659 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2660 st->pts_buffer[0]= pkt->pts;
2661 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2662 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2663 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2664 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2666 pkt->dts= st->pts_buffer[0];
2669 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2670 av_log(s, AV_LOG_ERROR,
2671 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2672 st->index, st->cur_dts, pkt->dts);
2675 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2676 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2680 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2681 st->cur_dts= pkt->dts;
2682 st->pts.val= pkt->dts;
2685 switch (st->codec->codec_type) {
2686 case CODEC_TYPE_AUDIO:
2687 frame_size = get_audio_frame_size(st->codec, pkt->size);
2689 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2690 likely equal to the encoder delay, but it would be better if we
2691 had the real timestamps from the encoder */
2692 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2693 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2696 case CODEC_TYPE_VIDEO:
2697 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2705 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2707 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2709 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2712 ret= s->oformat->write_packet(s, pkt);
2714 ret= url_ferror(s->pb);
2718 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2719 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2721 AVPacketList **next_point, *this_pktl;
2723 this_pktl = av_mallocz(sizeof(AVPacketList));
2724 this_pktl->pkt= *pkt;
2725 pkt->destruct= NULL; // do not free original but only the copy
2726 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2728 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2729 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2731 next_point = &s->packet_buffer;
2734 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2735 while(!compare(s, &(*next_point)->pkt, pkt)){
2736 next_point= &(*next_point)->next;
2740 next_point = &(s->packet_buffer_end->next);
2743 assert(!*next_point);
2745 s->packet_buffer_end= this_pktl;
2748 this_pktl->next= *next_point;
2750 s->streams[pkt->stream_index]->last_in_packet_buffer=
2751 *next_point= this_pktl;
2754 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2756 AVStream *st = s->streams[ pkt ->stream_index];
2757 AVStream *st2= s->streams[ next->stream_index];
2758 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
2759 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
2760 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
2763 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2769 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2772 for(i=0; i < s->nb_streams; i++)
2773 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2775 if(stream_count && (s->nb_streams == stream_count || flush)){
2776 pktl= s->packet_buffer;
2779 s->packet_buffer= pktl->next;
2780 if(!s->packet_buffer)
2781 s->packet_buffer_end= NULL;
2783 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2784 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2788 av_init_packet(out);
2794 * Interleaves an AVPacket correctly so it can be muxed.
2795 * @param out the interleaved packet will be output here
2796 * @param in the input packet
2797 * @param flush 1 if no further packets are available as input and all
2798 * remaining packets should be output
2799 * @return 1 if a packet was output, 0 if no packet could be output,
2800 * < 0 if an error occurred
2802 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2803 if(s->oformat->interleave_packet)
2804 return s->oformat->interleave_packet(s, out, in, flush);
2806 return av_interleave_packet_per_dts(s, out, in, flush);
2809 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2810 AVStream *st= s->streams[ pkt->stream_index];
2812 //FIXME/XXX/HACK drop zero sized packets
2813 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2816 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2817 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2820 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2825 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2826 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2829 ret= s->oformat->write_packet(s, &opkt);
2831 av_free_packet(&opkt);
2836 if(url_ferror(s->pb))
2837 return url_ferror(s->pb);
2841 int av_write_trailer(AVFormatContext *s)
2847 ret= av_interleave_packet(s, &pkt, NULL, 1);
2848 if(ret<0) //FIXME cleanup needed for ret<0 ?
2853 ret= s->oformat->write_packet(s, &pkt);
2855 av_free_packet(&pkt);
2859 if(url_ferror(s->pb))
2863 if(s->oformat->write_trailer)
2864 ret = s->oformat->write_trailer(s);
2867 ret=url_ferror(s->pb);
2868 for(i=0;i<s->nb_streams;i++)
2869 av_freep(&s->streams[i]->priv_data);
2870 av_freep(&s->priv_data);
2874 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2877 AVProgram *program=NULL;
2880 if (idx >= ac->nb_streams) {
2881 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2885 for(i=0; i<ac->nb_programs; i++){
2886 if(ac->programs[i]->id != progid)
2888 program = ac->programs[i];
2889 for(j=0; j<program->nb_stream_indexes; j++)
2890 if(program->stream_index[j] == idx)
2893 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2896 program->stream_index = tmp;
2897 program->stream_index[program->nb_stream_indexes++] = idx;
2902 static void print_fps(double d, const char *postfix){
2903 uint64_t v= lrintf(d*100);
2904 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2905 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2906 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2909 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
2911 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
2912 AVMetadataTag *tag=NULL;
2914 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2915 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
2916 if(strcmp("language", tag->key))
2917 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
2922 /* "user interface" functions */
2923 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2926 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2927 AVStream *st = ic->streams[i];
2928 int g = av_gcd(st->time_base.num, st->time_base.den);
2929 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2930 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2931 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2932 /* the pid is an important information, so we display it */
2933 /* XXX: add a generic system */
2934 if (flags & AVFMT_SHOW_IDS)
2935 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2937 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2938 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2939 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2940 if (st->sample_aspect_ratio.num && // default
2941 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2942 AVRational display_aspect_ratio;
2943 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2944 st->codec->width*st->sample_aspect_ratio.num,
2945 st->codec->height*st->sample_aspect_ratio.den,
2947 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2948 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2949 display_aspect_ratio.num, display_aspect_ratio.den);
2951 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2952 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
2953 print_fps(av_q2d(st->avg_frame_rate), "fps");
2954 if(st->r_frame_rate.den && st->r_frame_rate.num)
2955 print_fps(av_q2d(st->r_frame_rate), "tbr");
2956 if(st->time_base.den && st->time_base.num)
2957 print_fps(1/av_q2d(st->time_base), "tbn");
2958 if(st->codec->time_base.den && st->codec->time_base.num)
2959 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2961 av_log(NULL, AV_LOG_INFO, "\n");
2962 dump_metadata(NULL, st->metadata, " ");
2965 void dump_format(AVFormatContext *ic,
2971 uint8_t *printed = av_mallocz(ic->nb_streams);
2972 if (ic->nb_streams && !printed)
2975 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2976 is_output ? "Output" : "Input",
2978 is_output ? ic->oformat->name : ic->iformat->name,
2979 is_output ? "to" : "from", url);
2980 dump_metadata(NULL, ic->metadata, " ");
2982 av_log(NULL, AV_LOG_INFO, " Duration: ");
2983 if (ic->duration != AV_NOPTS_VALUE) {
2984 int hours, mins, secs, us;
2985 secs = ic->duration / AV_TIME_BASE;
2986 us = ic->duration % AV_TIME_BASE;
2991 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2992 (100 * us) / AV_TIME_BASE);
2994 av_log(NULL, AV_LOG_INFO, "N/A");
2996 if (ic->start_time != AV_NOPTS_VALUE) {
2998 av_log(NULL, AV_LOG_INFO, ", start: ");
2999 secs = ic->start_time / AV_TIME_BASE;
3000 us = ic->start_time % AV_TIME_BASE;
3001 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3002 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3004 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3006 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3008 av_log(NULL, AV_LOG_INFO, "N/A");
3010 av_log(NULL, AV_LOG_INFO, "\n");
3012 if(ic->nb_programs) {
3013 int j, k, total = 0;
3014 for(j=0; j<ic->nb_programs; j++) {
3015 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
3017 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3018 name ? name->value : "");
3019 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3020 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3021 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3022 printed[ic->programs[j]->stream_index[k]] = 1;
3024 total += ic->programs[j]->nb_stream_indexes;
3026 if (total < ic->nb_streams)
3027 av_log(NULL, AV_LOG_INFO, " No Program\n");
3029 for(i=0;i<ic->nb_streams;i++)
3031 dump_stream_format(ic, i, index, is_output);
3036 #if LIBAVFORMAT_VERSION_MAJOR < 53
3037 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
3039 return av_parse_video_frame_size(width_ptr, height_ptr, str);
3042 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
3044 AVRational frame_rate;
3045 int ret = av_parse_video_frame_rate(&frame_rate, arg);
3046 *frame_rate_num= frame_rate.num;
3047 *frame_rate_den= frame_rate.den;
3052 int64_t av_gettime(void)
3055 gettimeofday(&tv,NULL);
3056 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3059 int64_t parse_date(const char *datestr, int duration)
3065 static const char * const date_fmt[] = {
3069 static const char * const time_fmt[] = {
3079 time_t now = time(0);
3081 len = strlen(datestr);
3083 lastch = datestr[len - 1];
3086 is_utc = (lastch == 'z' || lastch == 'Z');
3088 memset(&dt, 0, sizeof(dt));
3093 if (!strncasecmp(datestr, "now", len))
3094 return (int64_t) now * 1000000;
3096 /* parse the year-month-day part */
3097 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3098 q = small_strptime(p, date_fmt[i], &dt);
3104 /* if the year-month-day part is missing, then take the
3105 * current year-month-day time */
3110 dt = *localtime(&now);
3112 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3117 if (*p == 'T' || *p == 't' || *p == ' ')
3120 /* parse the hour-minute-second part */
3121 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3122 q = small_strptime(p, time_fmt[i], &dt);
3128 /* parse datestr as a duration */
3133 /* parse datestr as HH:MM:SS */
3134 q = small_strptime(p, time_fmt[0], &dt);
3136 /* parse datestr as S+ */
3137 dt.tm_sec = strtol(p, (char **)&q, 10);
3139 /* the parsing didn't succeed */
3146 /* Now we have all the fields that we can get */
3152 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3154 dt.tm_isdst = -1; /* unknown */
3164 /* parse the .m... part */
3168 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3171 val += n * (*q - '0');
3175 return negative ? -t : t;
3178 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3188 while (*p != '\0' && *p != '=' && *p != '&') {
3189 if ((q - tag) < sizeof(tag) - 1)
3197 while (*p != '&' && *p != '\0') {
3198 if ((q - arg) < arg_size - 1) {
3208 if (!strcmp(tag, tag1))
3217 int av_get_frame_filename(char *buf, int buf_size,
3218 const char *path, int number)
3221 char *q, buf1[20], c;
3222 int nd, len, percentd_found;
3234 while (isdigit(*p)) {
3235 nd = nd * 10 + *p++ - '0';
3238 } while (isdigit(c));
3247 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3249 if ((q - buf + len) > buf_size - 1)
3251 memcpy(q, buf1, len);
3259 if ((q - buf) < buf_size - 1)
3263 if (!percentd_found)
3272 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3276 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3278 for(i=0;i<size;i+=16) {
3285 PRINT(" %02x", buf[i+j]);
3290 for(j=0;j<len;j++) {
3292 if (c < ' ' || c > '~')
3301 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3303 hex_dump_internal(NULL, f, 0, buf, size);
3306 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3308 hex_dump_internal(avcl, NULL, level, buf, size);
3311 //FIXME needs to know the time_base
3312 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3315 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3316 PRINT("stream #%d:\n", pkt->stream_index);
3317 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3318 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3319 /* DTS is _always_ valid after av_read_frame() */
3321 if (pkt->dts == AV_NOPTS_VALUE)
3324 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3325 /* PTS may not be known if B-frames are present. */
3327 if (pkt->pts == AV_NOPTS_VALUE)
3330 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3332 PRINT(" size=%d\n", pkt->size);
3335 av_hex_dump(f, pkt->data, pkt->size);
3338 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3340 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3343 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3345 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3348 void url_split(char *proto, int proto_size,
3349 char *authorization, int authorization_size,
3350 char *hostname, int hostname_size,
3352 char *path, int path_size,
3355 const char *p, *ls, *at, *col, *brk;
3357 if (port_ptr) *port_ptr = -1;
3358 if (proto_size > 0) proto[0] = 0;
3359 if (authorization_size > 0) authorization[0] = 0;
3360 if (hostname_size > 0) hostname[0] = 0;
3361 if (path_size > 0) path[0] = 0;
3363 /* parse protocol */
3364 if ((p = strchr(url, ':'))) {
3365 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3370 /* no protocol means plain filename */
3371 av_strlcpy(path, url, path_size);
3375 /* separate path from hostname */
3376 ls = strchr(p, '/');
3378 ls = strchr(p, '?');
3380 av_strlcpy(path, ls, path_size);
3382 ls = &p[strlen(p)]; // XXX
3384 /* the rest is hostname, use that to parse auth/port */
3386 /* authorization (user[:pass]@hostname) */
3387 if ((at = strchr(p, '@')) && at < ls) {
3388 av_strlcpy(authorization, p,
3389 FFMIN(authorization_size, at + 1 - p));
3390 p = at + 1; /* skip '@' */
3393 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3395 av_strlcpy(hostname, p + 1,
3396 FFMIN(hostname_size, brk - p));
3397 if (brk[1] == ':' && port_ptr)
3398 *port_ptr = atoi(brk + 2);
3399 } else if ((col = strchr(p, ':')) && col < ls) {
3400 av_strlcpy(hostname, p,
3401 FFMIN(col + 1 - p, hostname_size));
3402 if (port_ptr) *port_ptr = atoi(col + 1);
3404 av_strlcpy(hostname, p,
3405 FFMIN(ls + 1 - p, hostname_size));
3409 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3412 static const char hex_table[16] = { '0', '1', '2', '3',
3415 'C', 'D', 'E', 'F' };
3417 for(i = 0; i < s; i++) {
3418 buff[i * 2] = hex_table[src[i] >> 4];
3419 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3425 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3426 unsigned int pts_num, unsigned int pts_den)
3428 s->pts_wrap_bits = pts_wrap_bits;
3430 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3431 if(s->time_base.num != pts_num)
3432 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3434 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3436 if(!s->time_base.num || !s->time_base.den)
3437 s->time_base.num= s->time_base.den= 0;