2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/internal.h"
24 #include "libavutil/opt.h"
27 #include "libavutil/avstring.h"
29 #include "audiointerleave.h"
43 * various utility functions for use within FFmpeg
46 unsigned avformat_version(void)
48 return LIBAVFORMAT_VERSION_INT;
51 const char *avformat_configuration(void)
53 return FFMPEG_CONFIGURATION;
56 const char *avformat_license(void)
58 #define LICENSE_PREFIX "libavformat license: "
59 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
62 /* fraction handling */
65 * f = val + (num / den) + 0.5.
67 * 'num' is normalized so that it is such as 0 <= num < den.
69 * @param f fractional number
70 * @param val integer value
71 * @param num must be >= 0
72 * @param den must be >= 1
74 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
87 * Fractional addition to f: f = f + (incr / f->den).
89 * @param f fractional number
90 * @param incr increment, can be positive or negative
92 static void av_frac_add(AVFrac *f, int64_t incr)
105 } else if (num >= den) {
112 /** head of registered input format linked list */
113 AVInputFormat *first_iformat = NULL;
114 /** head of registered output format linked list */
115 AVOutputFormat *first_oformat = NULL;
117 AVInputFormat *av_iformat_next(AVInputFormat *f)
119 if(f) return f->next;
120 else return first_iformat;
123 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
125 if(f) return f->next;
126 else return first_oformat;
129 void av_register_input_format(AVInputFormat *format)
133 while (*p != NULL) p = &(*p)->next;
138 void av_register_output_format(AVOutputFormat *format)
142 while (*p != NULL) p = &(*p)->next;
147 int av_match_ext(const char *filename, const char *extensions)
155 ext = strrchr(filename, '.');
161 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
164 if (!strcasecmp(ext1, ext))
174 static int match_format(const char *name, const char *names)
182 namelen = strlen(name);
183 while ((p = strchr(names, ','))) {
184 len = FFMAX(p - names, namelen);
185 if (!strncasecmp(name, names, len))
189 return !strcasecmp(name, names);
192 #if LIBAVFORMAT_VERSION_MAJOR < 53
193 AVOutputFormat *guess_format(const char *short_name, const char *filename,
194 const char *mime_type)
196 return av_guess_format(short_name, filename, mime_type);
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
203 AVOutputFormat *fmt, *fmt_found;
204 int score_max, score;
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
214 /* Find the proper file type. */
218 while (fmt != NULL) {
220 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
222 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
224 if (filename && fmt->extensions &&
225 av_match_ext(filename, fmt->extensions)) {
228 if (score > score_max) {
237 #if LIBAVFORMAT_VERSION_MAJOR < 53
238 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
239 const char *mime_type)
241 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
244 AVOutputFormat *stream_fmt;
245 char stream_format_name[64];
247 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
248 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
258 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
259 const char *filename, const char *mime_type, enum AVMediaType type){
260 if(type == AVMEDIA_TYPE_VIDEO){
261 enum CodecID codec_id= CODEC_ID_NONE;
263 #if CONFIG_IMAGE2_MUXER
264 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
265 codec_id= av_guess_image2_codec(filename);
268 if(codec_id == CODEC_ID_NONE)
269 codec_id= fmt->video_codec;
271 }else if(type == AVMEDIA_TYPE_AUDIO)
272 return fmt->audio_codec;
274 return CODEC_ID_NONE;
277 AVInputFormat *av_find_input_format(const char *short_name)
280 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
281 if (match_format(short_name, fmt->name))
287 #if LIBAVFORMAT_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
288 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
290 av_destruct_packet_nofree(pkt);
293 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
295 av_destruct_packet(pkt);
298 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
300 return av_new_packet(pkt, size);
303 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
305 return av_dup_packet(pkt);
308 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
313 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
315 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
320 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
322 int ret= av_new_packet(pkt, size);
327 pkt->pos= url_ftell(s);
329 ret= get_buffer(s, pkt->data, size);
333 av_shrink_packet(pkt, ret);
339 int av_filename_number_test(const char *filename)
342 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
345 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
347 AVProbeData lpd = *pd;
348 AVInputFormat *fmt1, *fmt;
351 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
352 int id3len = ff_id3v2_tag_len(lpd.buf);
353 if (lpd.buf_size > id3len + 16) {
355 lpd.buf_size -= id3len;
360 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
361 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
364 if (fmt1->read_probe) {
365 score = fmt1->read_probe(&lpd);
366 } else if (fmt1->extensions) {
367 if (av_match_ext(lpd.filename, fmt1->extensions)) {
371 if (score > *score_max) {
374 }else if (score == *score_max)
380 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
382 return av_probe_input_format2(pd, is_opened, &score);
385 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
387 static const struct {
388 const char *name; enum CodecID id; enum AVMediaType type;
390 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
391 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
392 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
393 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
394 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
395 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
396 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
397 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
400 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
404 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
405 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
406 for (i = 0; fmt_id_type[i].name; i++) {
407 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
408 st->codec->codec_id = fmt_id_type[i].id;
409 st->codec->codec_type = fmt_id_type[i].type;
417 /************************************************************/
418 /* input media file */
421 * Open a media file from an IO stream. 'fmt' must be specified.
423 int av_open_input_stream(AVFormatContext **ic_ptr,
424 ByteIOContext *pb, const char *filename,
425 AVInputFormat *fmt, AVFormatParameters *ap)
429 AVFormatParameters default_ap;
433 memset(ap, 0, sizeof(default_ap));
436 if(!ap->prealloced_context)
437 ic = avformat_alloc_context();
441 err = AVERROR(ENOMEM);
446 ic->duration = AV_NOPTS_VALUE;
447 ic->start_time = AV_NOPTS_VALUE;
448 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
450 /* allocate private data */
451 if (fmt->priv_data_size > 0) {
452 ic->priv_data = av_mallocz(fmt->priv_data_size);
453 if (!ic->priv_data) {
454 err = AVERROR(ENOMEM);
458 ic->priv_data = NULL;
461 // e.g. AVFMT_NOFILE formats will not have a ByteIOContext
463 ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
465 if (ic->iformat->read_header) {
466 err = ic->iformat->read_header(ic, ap);
471 if (pb && !ic->data_offset)
472 ic->data_offset = url_ftell(ic->pb);
474 #if FF_API_OLD_METADATA
475 ff_metadata_demux_compat(ic);
478 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
485 av_freep(&ic->priv_data);
486 for(i=0;i<ic->nb_streams;i++) {
487 AVStream *st = ic->streams[i];
489 av_free(st->priv_data);
490 av_free(st->codec->extradata);
502 /** size of probe buffer, for guessing file type from file contents */
503 #define PROBE_BUF_MIN 2048
504 #define PROBE_BUF_MAX (1<<20)
506 int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt,
507 const char *filename, void *logctx,
508 unsigned int offset, unsigned int max_probe_size)
510 AVProbeData pd = { filename ? filename : "", NULL, -offset };
511 unsigned char *buf = NULL;
512 int ret = 0, probe_size;
514 if (!max_probe_size) {
515 max_probe_size = PROBE_BUF_MAX;
516 } else if (max_probe_size > PROBE_BUF_MAX) {
517 max_probe_size = PROBE_BUF_MAX;
518 } else if (max_probe_size < PROBE_BUF_MIN) {
519 return AVERROR(EINVAL);
522 if (offset >= max_probe_size) {
523 return AVERROR(EINVAL);
526 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
527 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
528 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
529 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
531 if (probe_size < offset) {
535 /* read probe data */
536 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
537 if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
538 /* fail if error was not end of file, otherwise, lower score */
539 if (ret != AVERROR_EOF) {
544 ret = 0; /* error was end of file, nothing read */
547 pd.buf = &buf[offset];
549 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
551 /* guess file format */
552 *fmt = av_probe_input_format2(&pd, 1, &score);
554 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
555 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
557 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
563 return AVERROR_INVALIDDATA;
566 /* rewind. reuse probe buffer to avoid seeking */
567 if ((ret = ff_rewind_with_probe_data(*pb, buf, pd.buf_size)) < 0)
573 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
576 AVFormatParameters *ap)
579 AVProbeData probe_data, *pd = &probe_data;
580 ByteIOContext *pb = NULL;
581 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
585 pd->filename = filename;
590 /* guess format if no file can be opened */
591 fmt = av_probe_input_format(pd, 0);
594 /* Do not open file if the format does not need it. XXX: specific
595 hack needed to handle RTSP/TCP */
596 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
597 /* if no file needed do not try to open one */
598 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
602 url_setbufsize(pb, buf_size);
604 if (!fmt && (err = ff_probe_input_buffer(&pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
609 /* if still no format found, error */
611 err = AVERROR_INVALIDDATA;
615 /* check filename in case an image number is expected */
616 if (fmt->flags & AVFMT_NEEDNUMBER) {
617 if (!av_filename_number_test(filename)) {
618 err = AVERROR_NUMEXPECTED;
622 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
630 if (ap && ap->prealloced_context)
637 /*******************************************************/
639 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
640 AVPacketList **plast_pktl){
641 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
646 (*plast_pktl)->next = pktl;
648 *packet_buffer = pktl;
650 /* add the packet in the buffered packet list */
656 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
662 AVPacketList *pktl = s->raw_packet_buffer;
666 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
667 !s->streams[pkt->stream_index]->probe_packets ||
668 s->raw_packet_buffer_remaining_size < pkt->size){
669 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
672 s->raw_packet_buffer = pktl->next;
673 s->raw_packet_buffer_remaining_size += pkt->size;
680 ret= s->iformat->read_packet(s, pkt);
682 if (!pktl || ret == AVERROR(EAGAIN))
684 for (i = 0; i < s->nb_streams; i++)
685 s->streams[i]->probe_packets = 0;
688 st= s->streams[pkt->stream_index];
690 switch(st->codec->codec_type){
691 case AVMEDIA_TYPE_VIDEO:
692 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
694 case AVMEDIA_TYPE_AUDIO:
695 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
697 case AVMEDIA_TYPE_SUBTITLE:
698 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
702 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
706 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
707 s->raw_packet_buffer_remaining_size -= pkt->size;
709 if(st->codec->codec_id == CODEC_ID_PROBE){
710 AVProbeData *pd = &st->probe_data;
711 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
714 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
715 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
716 pd->buf_size += pkt->size;
717 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
719 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
720 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
721 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
722 if(st->codec->codec_id != CODEC_ID_PROBE){
725 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
732 /**********************************************************/
735 * Get the number of samples of an audio frame. Return -1 on error.
737 static int get_audio_frame_size(AVCodecContext *enc, int size)
741 if(enc->codec_id == CODEC_ID_VORBIS)
744 if (enc->frame_size <= 1) {
745 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
747 if (bits_per_sample) {
748 if (enc->channels == 0)
750 frame_size = (size << 3) / (bits_per_sample * enc->channels);
752 /* used for example by ADPCM codecs */
753 if (enc->bit_rate == 0)
755 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
758 frame_size = enc->frame_size;
765 * Return the frame duration in seconds. Return 0 if not available.
767 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
768 AVCodecParserContext *pc, AVPacket *pkt)
774 switch(st->codec->codec_type) {
775 case AVMEDIA_TYPE_VIDEO:
776 if(st->time_base.num*1000LL > st->time_base.den){
777 *pnum = st->time_base.num;
778 *pden = st->time_base.den;
779 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
780 *pnum = st->codec->time_base.num;
781 *pden = st->codec->time_base.den;
782 if (pc && pc->repeat_pict) {
783 *pnum = (*pnum) * (1 + pc->repeat_pict);
785 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
786 //Thus if we have no parser in such case leave duration undefined.
787 if(st->codec->ticks_per_frame>1 && !pc){
792 case AVMEDIA_TYPE_AUDIO:
793 frame_size = get_audio_frame_size(st->codec, pkt->size);
797 *pden = st->codec->sample_rate;
804 static int is_intra_only(AVCodecContext *enc){
805 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
807 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
808 switch(enc->codec_id){
810 case CODEC_ID_MJPEGB:
812 case CODEC_ID_RAWVIDEO:
813 case CODEC_ID_DVVIDEO:
814 case CODEC_ID_HUFFYUV:
815 case CODEC_ID_FFVHUFF:
820 case CODEC_ID_JPEG2000:
828 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
829 int64_t dts, int64_t pts)
831 AVStream *st= s->streams[stream_index];
832 AVPacketList *pktl= s->packet_buffer;
834 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
837 st->first_dts= dts - st->cur_dts;
840 for(; pktl; pktl= pktl->next){
841 if(pktl->pkt.stream_index != stream_index)
843 //FIXME think more about this check
844 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
845 pktl->pkt.pts += st->first_dts;
847 if(pktl->pkt.dts != AV_NOPTS_VALUE)
848 pktl->pkt.dts += st->first_dts;
850 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
851 st->start_time= pktl->pkt.pts;
853 if (st->start_time == AV_NOPTS_VALUE)
854 st->start_time = pts;
857 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
859 AVPacketList *pktl= s->packet_buffer;
862 if(st->first_dts != AV_NOPTS_VALUE){
863 cur_dts= st->first_dts;
864 for(; pktl; pktl= pktl->next){
865 if(pktl->pkt.stream_index == pkt->stream_index){
866 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
868 cur_dts -= pkt->duration;
871 pktl= s->packet_buffer;
872 st->first_dts = cur_dts;
873 }else if(st->cur_dts)
876 for(; pktl; pktl= pktl->next){
877 if(pktl->pkt.stream_index != pkt->stream_index)
879 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
880 && !pktl->pkt.duration){
881 pktl->pkt.dts= cur_dts;
882 if(!st->codec->has_b_frames)
883 pktl->pkt.pts= cur_dts;
884 cur_dts += pkt->duration;
885 pktl->pkt.duration= pkt->duration;
889 if(st->first_dts == AV_NOPTS_VALUE)
890 st->cur_dts= cur_dts;
893 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
894 AVCodecParserContext *pc, AVPacket *pkt)
896 int num, den, presentation_delayed, delay, i;
899 if (s->flags & AVFMT_FLAG_NOFILLIN)
902 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
903 pkt->dts= AV_NOPTS_VALUE;
905 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
906 //FIXME Set low_delay = 0 when has_b_frames = 1
907 st->codec->has_b_frames = 1;
909 /* do we have a video B-frame ? */
910 delay= st->codec->has_b_frames;
911 presentation_delayed = 0;
912 /* XXX: need has_b_frame, but cannot get it if the codec is
915 pc && pc->pict_type != FF_B_TYPE)
916 presentation_delayed = 1;
918 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
919 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
920 pkt->dts -= 1LL<<st->pts_wrap_bits;
923 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
924 // we take the conservative approach and discard both
925 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
926 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
927 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
928 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
931 if (pkt->duration == 0) {
932 compute_frame_duration(&num, &den, st, pc, pkt);
934 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
936 if(pkt->duration != 0 && s->packet_buffer)
937 update_initial_durations(s, st, pkt);
941 /* correct timestamps with byte offset if demuxers only have timestamps
942 on packet boundaries */
943 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
944 /* this will estimate bitrate based on this frame's duration and size */
945 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
946 if(pkt->pts != AV_NOPTS_VALUE)
948 if(pkt->dts != AV_NOPTS_VALUE)
952 if (pc && pc->dts_sync_point >= 0) {
953 // we have synchronization info from the parser
954 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
956 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
957 if (pkt->dts != AV_NOPTS_VALUE) {
958 // got DTS from the stream, update reference timestamp
959 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
960 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
961 } else if (st->reference_dts != AV_NOPTS_VALUE) {
962 // compute DTS based on reference timestamp
963 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
964 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
966 if (pc->dts_sync_point > 0)
967 st->reference_dts = pkt->dts; // new reference
971 /* This may be redundant, but it should not hurt. */
972 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
973 presentation_delayed = 1;
975 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
976 /* interpolate PTS and DTS if they are not present */
977 //We skip H264 currently because delay and has_b_frames are not reliably set
978 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
979 if (presentation_delayed) {
980 /* DTS = decompression timestamp */
981 /* PTS = presentation timestamp */
982 if (pkt->dts == AV_NOPTS_VALUE)
983 pkt->dts = st->last_IP_pts;
984 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
985 if (pkt->dts == AV_NOPTS_VALUE)
986 pkt->dts = st->cur_dts;
988 /* this is tricky: the dts must be incremented by the duration
989 of the frame we are displaying, i.e. the last I- or P-frame */
990 if (st->last_IP_duration == 0)
991 st->last_IP_duration = pkt->duration;
992 if(pkt->dts != AV_NOPTS_VALUE)
993 st->cur_dts = pkt->dts + st->last_IP_duration;
994 st->last_IP_duration = pkt->duration;
995 st->last_IP_pts= pkt->pts;
996 /* cannot compute PTS if not present (we can compute it only
997 by knowing the future */
998 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
999 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1000 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1001 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1002 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1003 pkt->pts += pkt->duration;
1004 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1008 /* presentation is not delayed : PTS and DTS are the same */
1009 if(pkt->pts == AV_NOPTS_VALUE)
1010 pkt->pts = pkt->dts;
1011 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1012 if(pkt->pts == AV_NOPTS_VALUE)
1013 pkt->pts = st->cur_dts;
1014 pkt->dts = pkt->pts;
1015 if(pkt->pts != AV_NOPTS_VALUE)
1016 st->cur_dts = pkt->pts + pkt->duration;
1020 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1021 st->pts_buffer[0]= pkt->pts;
1022 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1023 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1024 if(pkt->dts == AV_NOPTS_VALUE)
1025 pkt->dts= st->pts_buffer[0];
1026 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1027 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1029 if(pkt->dts > st->cur_dts)
1030 st->cur_dts = pkt->dts;
1033 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1036 if(is_intra_only(st->codec))
1037 pkt->flags |= AV_PKT_FLAG_KEY;
1040 /* keyframe computation */
1041 if (pc->key_frame == 1)
1042 pkt->flags |= AV_PKT_FLAG_KEY;
1043 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
1044 pkt->flags |= AV_PKT_FLAG_KEY;
1047 pkt->convergence_duration = pc->convergence_duration;
1051 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1056 av_init_packet(pkt);
1059 /* select current input stream component */
1062 if (!st->need_parsing || !st->parser) {
1063 /* no parsing needed: we just output the packet as is */
1064 /* raw data support */
1065 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1066 compute_pkt_fields(s, st, NULL, pkt);
1068 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1069 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1070 ff_reduce_index(s, st->index);
1071 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1074 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1075 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1076 st->cur_ptr, st->cur_len,
1077 st->cur_pkt.pts, st->cur_pkt.dts,
1079 st->cur_pkt.pts = AV_NOPTS_VALUE;
1080 st->cur_pkt.dts = AV_NOPTS_VALUE;
1081 /* increment read pointer */
1085 /* return packet if any */
1089 pkt->stream_index = st->index;
1090 pkt->pts = st->parser->pts;
1091 pkt->dts = st->parser->dts;
1092 pkt->pos = st->parser->pos;
1093 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1095 pkt->destruct= st->cur_pkt.destruct;
1096 st->cur_pkt.destruct= NULL;
1097 st->cur_pkt.data = NULL;
1098 assert(st->cur_len == 0);
1100 pkt->destruct = NULL;
1102 compute_pkt_fields(s, st, st->parser, pkt);
1104 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1105 ff_reduce_index(s, st->index);
1106 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1107 0, 0, AVINDEX_KEYFRAME);
1114 av_free_packet(&st->cur_pkt);
1119 /* read next packet */
1120 ret = av_read_packet(s, &cur_pkt);
1122 if (ret == AVERROR(EAGAIN))
1124 /* return the last frames, if any */
1125 for(i = 0; i < s->nb_streams; i++) {
1127 if (st->parser && st->need_parsing) {
1128 av_parser_parse2(st->parser, st->codec,
1129 &pkt->data, &pkt->size,
1131 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1137 /* no more packets: really terminate parsing */
1140 st = s->streams[cur_pkt.stream_index];
1141 st->cur_pkt= cur_pkt;
1143 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1144 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1145 st->cur_pkt.pts < st->cur_pkt.dts){
1146 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1147 st->cur_pkt.stream_index,
1151 // av_free_packet(&st->cur_pkt);
1155 if(s->debug & FF_FDEBUG_TS)
1156 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1157 st->cur_pkt.stream_index,
1161 st->cur_pkt.duration,
1165 st->cur_ptr = st->cur_pkt.data;
1166 st->cur_len = st->cur_pkt.size;
1167 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1168 st->parser = av_parser_init(st->codec->codec_id);
1170 /* no parser available: just output the raw packets */
1171 st->need_parsing = AVSTREAM_PARSE_NONE;
1172 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1173 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1174 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1175 st->parser->flags |= PARSER_FLAG_ONCE;
1180 if(s->debug & FF_FDEBUG_TS)
1181 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1192 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1196 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1199 pktl = s->packet_buffer;
1201 AVPacket *next_pkt= &pktl->pkt;
1203 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1204 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1205 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1206 if( pktl->pkt.stream_index == next_pkt->stream_index
1207 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1208 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1209 next_pkt->pts= pktl->pkt.dts;
1213 pktl = s->packet_buffer;
1216 if( next_pkt->pts != AV_NOPTS_VALUE
1217 || next_pkt->dts == AV_NOPTS_VALUE
1219 /* read packet from packet buffer, if there is data */
1221 s->packet_buffer = pktl->next;
1227 int ret= av_read_frame_internal(s, pkt);
1229 if(pktl && ret != AVERROR(EAGAIN)){
1236 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1237 &s->packet_buffer_end)) < 0)
1238 return AVERROR(ENOMEM);
1240 assert(!s->packet_buffer);
1241 return av_read_frame_internal(s, pkt);
1246 /* XXX: suppress the packet queue */
1247 static void flush_packet_queue(AVFormatContext *s)
1252 pktl = s->packet_buffer;
1255 s->packet_buffer = pktl->next;
1256 av_free_packet(&pktl->pkt);
1259 while(s->raw_packet_buffer){
1260 pktl = s->raw_packet_buffer;
1261 s->raw_packet_buffer = pktl->next;
1262 av_free_packet(&pktl->pkt);
1265 s->packet_buffer_end=
1266 s->raw_packet_buffer_end= NULL;
1267 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1270 /*******************************************************/
1273 int av_find_default_stream_index(AVFormatContext *s)
1275 int first_audio_index = -1;
1279 if (s->nb_streams <= 0)
1281 for(i = 0; i < s->nb_streams; i++) {
1283 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1286 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1287 first_audio_index = i;
1289 return first_audio_index >= 0 ? first_audio_index : 0;
1293 * Flush the frame reader.
1295 void ff_read_frame_flush(AVFormatContext *s)
1300 flush_packet_queue(s);
1304 /* for each stream, reset read state */
1305 for(i = 0; i < s->nb_streams; i++) {
1309 av_parser_close(st->parser);
1311 av_free_packet(&st->cur_pkt);
1313 st->last_IP_pts = AV_NOPTS_VALUE;
1314 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1315 st->reference_dts = AV_NOPTS_VALUE;
1320 st->probe_packets = MAX_PROBE_PACKETS;
1322 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1323 st->pts_buffer[j]= AV_NOPTS_VALUE;
1327 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1330 for(i = 0; i < s->nb_streams; i++) {
1331 AVStream *st = s->streams[i];
1333 st->cur_dts = av_rescale(timestamp,
1334 st->time_base.den * (int64_t)ref_st->time_base.num,
1335 st->time_base.num * (int64_t)ref_st->time_base.den);
1339 void ff_reduce_index(AVFormatContext *s, int stream_index)
1341 AVStream *st= s->streams[stream_index];
1342 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1344 if((unsigned)st->nb_index_entries >= max_entries){
1346 for(i=0; 2*i<st->nb_index_entries; i++)
1347 st->index_entries[i]= st->index_entries[2*i];
1348 st->nb_index_entries= i;
1352 int av_add_index_entry(AVStream *st,
1353 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1355 AVIndexEntry *entries, *ie;
1358 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1361 entries = av_fast_realloc(st->index_entries,
1362 &st->index_entries_allocated_size,
1363 (st->nb_index_entries + 1) *
1364 sizeof(AVIndexEntry));
1368 st->index_entries= entries;
1370 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1373 index= st->nb_index_entries++;
1374 ie= &entries[index];
1375 assert(index==0 || ie[-1].timestamp < timestamp);
1377 ie= &entries[index];
1378 if(ie->timestamp != timestamp){
1379 if(ie->timestamp <= timestamp)
1381 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1382 st->nb_index_entries++;
1383 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1384 distance= ie->min_distance;
1388 ie->timestamp = timestamp;
1389 ie->min_distance= distance;
1396 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1399 AVIndexEntry *entries= st->index_entries;
1400 int nb_entries= st->nb_index_entries;
1407 //optimize appending index entries at the end
1408 if(b && entries[b-1].timestamp < wanted_timestamp)
1413 timestamp = entries[m].timestamp;
1414 if(timestamp >= wanted_timestamp)
1416 if(timestamp <= wanted_timestamp)
1419 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1421 if(!(flags & AVSEEK_FLAG_ANY)){
1422 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1423 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1434 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1435 AVInputFormat *avif= s->iformat;
1436 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1437 int64_t ts_min, ts_max, ts;
1442 if (stream_index < 0)
1446 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1450 ts_min= AV_NOPTS_VALUE;
1451 pos_limit= -1; //gcc falsely says it may be uninitialized
1453 st= s->streams[stream_index];
1454 if(st->index_entries){
1457 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1458 index= FFMAX(index, 0);
1459 e= &st->index_entries[index];
1461 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1463 ts_min= e->timestamp;
1465 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1472 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1473 assert(index < st->nb_index_entries);
1475 e= &st->index_entries[index];
1476 assert(e->timestamp >= target_ts);
1478 ts_max= e->timestamp;
1479 pos_limit= pos_max - e->min_distance;
1481 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1482 pos_max,pos_limit, ts_max);
1487 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1492 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1495 av_update_cur_dts(s, st, ts);
1500 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1502 int64_t start_pos, filesize;
1506 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1509 if(ts_min == AV_NOPTS_VALUE){
1510 pos_min = s->data_offset;
1511 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1512 if (ts_min == AV_NOPTS_VALUE)
1516 if(ts_max == AV_NOPTS_VALUE){
1518 filesize = url_fsize(s->pb);
1519 pos_max = filesize - 1;
1522 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1524 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1525 if (ts_max == AV_NOPTS_VALUE)
1529 int64_t tmp_pos= pos_max + 1;
1530 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1531 if(tmp_ts == AV_NOPTS_VALUE)
1535 if(tmp_pos >= filesize)
1541 if(ts_min > ts_max){
1543 }else if(ts_min == ts_max){
1548 while (pos_min < pos_limit) {
1550 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1554 assert(pos_limit <= pos_max);
1557 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1558 // interpolate position (better than dichotomy)
1559 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1560 + pos_min - approximate_keyframe_distance;
1561 }else if(no_change==1){
1562 // bisection, if interpolation failed to change min or max pos last time
1563 pos = (pos_min + pos_limit)>>1;
1565 /* linear search if bisection failed, can only happen if there
1566 are very few or no keyframes between min/max */
1571 else if(pos > pos_limit)
1575 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1581 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1582 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1583 start_pos, no_change);
1585 if(ts == AV_NOPTS_VALUE){
1586 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1589 assert(ts != AV_NOPTS_VALUE);
1590 if (target_ts <= ts) {
1591 pos_limit = start_pos - 1;
1595 if (target_ts >= ts) {
1601 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1602 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1605 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1607 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1608 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1609 pos, ts_min, target_ts, ts_max);
1615 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1616 int64_t pos_min, pos_max;
1620 if (stream_index < 0)
1623 st= s->streams[stream_index];
1626 pos_min = s->data_offset;
1627 pos_max = url_fsize(s->pb) - 1;
1629 if (pos < pos_min) pos= pos_min;
1630 else if(pos > pos_max) pos= pos_max;
1632 url_fseek(s->pb, pos, SEEK_SET);
1635 av_update_cur_dts(s, st, ts);
1640 static int av_seek_frame_generic(AVFormatContext *s,
1641 int stream_index, int64_t timestamp, int flags)
1648 st = s->streams[stream_index];
1650 index = av_index_search_timestamp(st, timestamp, flags);
1652 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1655 if(index < 0 || index==st->nb_index_entries-1){
1659 if(st->nb_index_entries){
1660 assert(st->index_entries);
1661 ie= &st->index_entries[st->nb_index_entries-1];
1662 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1664 av_update_cur_dts(s, st, ie->timestamp);
1666 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1672 ret = av_read_frame(s, &pkt);
1673 }while(ret == AVERROR(EAGAIN));
1676 av_free_packet(&pkt);
1677 if(stream_index == pkt.stream_index){
1678 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1682 index = av_index_search_timestamp(st, timestamp, flags);
1687 ff_read_frame_flush(s);
1688 if (s->iformat->read_seek){
1689 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1692 ie = &st->index_entries[index];
1693 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1695 av_update_cur_dts(s, st, ie->timestamp);
1700 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1705 ff_read_frame_flush(s);
1707 if(flags & AVSEEK_FLAG_BYTE)
1708 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1710 if(stream_index < 0){
1711 stream_index= av_find_default_stream_index(s);
1712 if(stream_index < 0)
1715 st= s->streams[stream_index];
1716 /* timestamp for default must be expressed in AV_TIME_BASE units */
1717 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1720 /* first, we try the format specific seek */
1721 if (s->iformat->read_seek)
1722 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1729 if(s->iformat->read_timestamp)
1730 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1732 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1735 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1737 if(min_ts > ts || max_ts < ts)
1740 ff_read_frame_flush(s);
1742 if (s->iformat->read_seek2)
1743 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1745 if(s->iformat->read_timestamp){
1746 //try to seek via read_timestamp()
1749 //Fallback to old API if new is not implemented but old is
1750 //Note the old has somewat different sematics
1751 if(s->iformat->read_seek || 1)
1752 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1754 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1757 /*******************************************************/
1760 * Return TRUE if the stream has accurate duration in any stream.
1762 * @return TRUE if the stream has accurate duration for at least one component.
1764 static int av_has_duration(AVFormatContext *ic)
1769 for(i = 0;i < ic->nb_streams; i++) {
1770 st = ic->streams[i];
1771 if (st->duration != AV_NOPTS_VALUE)
1778 * Estimate the stream timings from the one of each components.
1780 * Also computes the global bitrate if possible.
1782 static void av_update_stream_timings(AVFormatContext *ic)
1784 int64_t start_time, start_time1, end_time, end_time1;
1785 int64_t duration, duration1;
1789 start_time = INT64_MAX;
1790 end_time = INT64_MIN;
1791 duration = INT64_MIN;
1792 for(i = 0;i < ic->nb_streams; i++) {
1793 st = ic->streams[i];
1794 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1795 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1796 if (start_time1 < start_time)
1797 start_time = start_time1;
1798 if (st->duration != AV_NOPTS_VALUE) {
1799 end_time1 = start_time1
1800 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1801 if (end_time1 > end_time)
1802 end_time = end_time1;
1805 if (st->duration != AV_NOPTS_VALUE) {
1806 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1807 if (duration1 > duration)
1808 duration = duration1;
1811 if (start_time != INT64_MAX) {
1812 ic->start_time = start_time;
1813 if (end_time != INT64_MIN) {
1814 if (end_time - start_time > duration)
1815 duration = end_time - start_time;
1818 if (duration != INT64_MIN) {
1819 ic->duration = duration;
1820 if (ic->file_size > 0) {
1821 /* compute the bitrate */
1822 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1823 (double)ic->duration;
1828 static void fill_all_stream_timings(AVFormatContext *ic)
1833 av_update_stream_timings(ic);
1834 for(i = 0;i < ic->nb_streams; i++) {
1835 st = ic->streams[i];
1836 if (st->start_time == AV_NOPTS_VALUE) {
1837 if(ic->start_time != AV_NOPTS_VALUE)
1838 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1839 if(ic->duration != AV_NOPTS_VALUE)
1840 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1845 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1847 int64_t filesize, duration;
1851 /* if bit_rate is already set, we believe it */
1852 if (ic->bit_rate == 0) {
1854 for(i=0;i<ic->nb_streams;i++) {
1855 st = ic->streams[i];
1856 bit_rate += st->codec->bit_rate;
1858 ic->bit_rate = bit_rate;
1861 /* if duration is already set, we believe it */
1862 if (ic->duration == AV_NOPTS_VALUE &&
1863 ic->bit_rate != 0 &&
1864 ic->file_size != 0) {
1865 filesize = ic->file_size;
1867 for(i = 0; i < ic->nb_streams; i++) {
1868 st = ic->streams[i];
1869 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1870 if (st->duration == AV_NOPTS_VALUE)
1871 st->duration = duration;
1877 #define DURATION_MAX_READ_SIZE 250000
1878 #define DURATION_MAX_RETRY 3
1880 /* only usable for MPEG-PS streams */
1881 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1883 AVPacket pkt1, *pkt = &pkt1;
1885 int read_size, i, ret;
1887 int64_t filesize, offset, duration;
1892 /* flush packet queue */
1893 flush_packet_queue(ic);
1895 for (i=0; i<ic->nb_streams; i++) {
1896 st = ic->streams[i];
1897 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1898 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1901 av_parser_close(st->parser);
1903 av_free_packet(&st->cur_pkt);
1907 /* estimate the end time (duration) */
1908 /* XXX: may need to support wrapping */
1909 filesize = ic->file_size;
1910 end_time = AV_NOPTS_VALUE;
1912 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1916 url_fseek(ic->pb, offset, SEEK_SET);
1919 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1923 ret = av_read_packet(ic, pkt);
1924 }while(ret == AVERROR(EAGAIN));
1927 read_size += pkt->size;
1928 st = ic->streams[pkt->stream_index];
1929 if (pkt->pts != AV_NOPTS_VALUE &&
1930 (st->start_time != AV_NOPTS_VALUE ||
1931 st->first_dts != AV_NOPTS_VALUE)) {
1932 duration = end_time = pkt->pts;
1933 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1934 else duration -= st->first_dts;
1936 duration += 1LL<<st->pts_wrap_bits;
1938 if (st->duration == AV_NOPTS_VALUE ||
1939 st->duration < duration)
1940 st->duration = duration;
1943 av_free_packet(pkt);
1945 }while( end_time==AV_NOPTS_VALUE
1946 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1947 && ++retry <= DURATION_MAX_RETRY);
1949 fill_all_stream_timings(ic);
1951 url_fseek(ic->pb, old_offset, SEEK_SET);
1952 for (i=0; i<ic->nb_streams; i++) {
1954 st->cur_dts= st->first_dts;
1955 st->last_IP_pts = AV_NOPTS_VALUE;
1959 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1963 /* get the file size, if possible */
1964 if (ic->iformat->flags & AVFMT_NOFILE) {
1967 file_size = url_fsize(ic->pb);
1971 ic->file_size = file_size;
1973 if ((!strcmp(ic->iformat->name, "mpeg") ||
1974 !strcmp(ic->iformat->name, "mpegts")) &&
1975 file_size && !url_is_streamed(ic->pb)) {
1976 /* get accurate estimate from the PTSes */
1977 av_estimate_timings_from_pts(ic, old_offset);
1978 } else if (av_has_duration(ic)) {
1979 /* at least one component has timings - we use them for all
1981 fill_all_stream_timings(ic);
1983 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1984 /* less precise: use bitrate info */
1985 av_estimate_timings_from_bit_rate(ic);
1987 av_update_stream_timings(ic);
1993 for(i = 0;i < ic->nb_streams; i++) {
1994 st = ic->streams[i];
1995 printf("%d: start_time: %0.3f duration: %0.3f\n",
1996 i, (double)st->start_time / AV_TIME_BASE,
1997 (double)st->duration / AV_TIME_BASE);
1999 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2000 (double)ic->start_time / AV_TIME_BASE,
2001 (double)ic->duration / AV_TIME_BASE,
2002 ic->bit_rate / 1000);
2007 static int has_codec_parameters(AVCodecContext *enc)
2010 switch(enc->codec_type) {
2011 case AVMEDIA_TYPE_AUDIO:
2012 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
2013 if(!enc->frame_size &&
2014 (enc->codec_id == CODEC_ID_VORBIS ||
2015 enc->codec_id == CODEC_ID_AAC ||
2016 enc->codec_id == CODEC_ID_MP1 ||
2017 enc->codec_id == CODEC_ID_MP2 ||
2018 enc->codec_id == CODEC_ID_MP3 ||
2019 enc->codec_id == CODEC_ID_SPEEX))
2022 case AVMEDIA_TYPE_VIDEO:
2023 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2029 return enc->codec_id != CODEC_ID_NONE && val != 0;
2032 static int has_decode_delay_been_guessed(AVStream *st)
2034 return st->codec->codec_id != CODEC_ID_H264 ||
2035 st->codec_info_nb_frames >= 4 + st->codec->has_b_frames;
2038 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2042 int got_picture, data_size, ret=0;
2045 if(!st->codec->codec){
2046 codec = avcodec_find_decoder(st->codec->codec_id);
2049 ret = avcodec_open(st->codec, codec);
2054 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2055 switch(st->codec->codec_type) {
2056 case AVMEDIA_TYPE_VIDEO:
2057 avcodec_get_frame_defaults(&picture);
2058 ret = avcodec_decode_video2(st->codec, &picture,
2059 &got_picture, avpkt);
2061 case AVMEDIA_TYPE_AUDIO:
2062 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2063 samples = av_malloc(data_size);
2066 ret = avcodec_decode_audio3(st->codec, samples,
2078 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2080 while (tags->id != CODEC_ID_NONE) {
2088 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2091 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2092 if(tag == tags[i].tag)
2095 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2096 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2099 return CODEC_ID_NONE;
2102 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2105 for(i=0; tags && tags[i]; i++){
2106 int tag= ff_codec_get_tag(tags[i], id);
2112 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2115 for(i=0; tags && tags[i]; i++){
2116 enum CodecID id= ff_codec_get_id(tags[i], tag);
2117 if(id!=CODEC_ID_NONE) return id;
2119 return CODEC_ID_NONE;
2122 static void compute_chapters_end(AVFormatContext *s)
2126 for (i=0; i+1<s->nb_chapters; i++)
2127 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2128 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2129 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2130 s->chapters[i]->end = s->chapters[i+1]->start;
2133 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2134 assert(s->start_time != AV_NOPTS_VALUE);
2135 assert(s->duration > 0);
2136 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2138 s->chapters[i]->time_base);
2142 static int get_std_framerate(int i){
2143 if(i<60*12) return i*1001;
2144 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2148 * Is the time base unreliable.
2149 * This is a heuristic to balance between quick acceptance of the values in
2150 * the headers vs. some extra checks.
2151 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2152 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2153 * And there are "variable" fps files this needs to detect as well.
2155 static int tb_unreliable(AVCodecContext *c){
2156 if( c->time_base.den >= 101L*c->time_base.num
2157 || c->time_base.den < 5L*c->time_base.num
2158 /* || c->codec_tag == AV_RL32("DIVX")
2159 || c->codec_tag == AV_RL32("XVID")*/
2160 || c->codec_id == CODEC_ID_MPEG2VIDEO
2161 || c->codec_id == CODEC_ID_H264
2167 int av_find_stream_info(AVFormatContext *ic)
2169 int i, count, ret, read_size, j;
2171 AVPacket pkt1, *pkt;
2172 int64_t old_offset = url_ftell(ic->pb);
2174 for(i=0;i<ic->nb_streams;i++) {
2176 st = ic->streams[i];
2177 if (st->codec->codec_id == CODEC_ID_AAC) {
2178 st->codec->sample_rate = 0;
2179 st->codec->frame_size = 0;
2180 st->codec->channels = 0;
2182 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
2183 /* if(!st->time_base.num)
2185 if(!st->codec->time_base.num)
2186 st->codec->time_base= st->time_base;
2188 //only for the split stuff
2189 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2190 st->parser = av_parser_init(st->codec->codec_id);
2191 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2192 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2195 assert(!st->codec->codec);
2196 codec = avcodec_find_decoder(st->codec->codec_id);
2198 /* Force decoding of at least one frame of codec data
2199 * this makes sure the codec initializes the channel configuration
2200 * and does not trust the values from the container.
2202 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2203 st->codec->channels = 0;
2205 //try to just open decoders, in case this is enough to get parameters
2206 if(!has_codec_parameters(st->codec)){
2208 avcodec_open(st->codec, codec);
2212 for (i=0; i<ic->nb_streams; i++) {
2213 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2219 if(url_interrupt_cb()){
2220 ret= AVERROR(EINTR);
2221 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2225 /* check if one codec still needs to be handled */
2226 for(i=0;i<ic->nb_streams;i++) {
2227 st = ic->streams[i];
2228 if (!has_codec_parameters(st->codec))
2230 /* variable fps and no guess at the real fps */
2231 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2232 && st->info->duration_count<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2234 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2236 if(st->first_dts == AV_NOPTS_VALUE)
2239 if (i == ic->nb_streams) {
2240 /* NOTE: if the format has no header, then we need to read
2241 some packets to get most of the streams, so we cannot
2243 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2244 /* if we found the info for all the codecs, we can stop */
2246 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2250 /* we did not get all the codec info, but we read too much data */
2251 if (read_size >= ic->probesize) {
2253 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2257 /* NOTE: a new stream can be added there if no header in file
2258 (AVFMTCTX_NOHEADER) */
2259 ret = av_read_frame_internal(ic, &pkt1);
2260 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2262 ret = -1; /* we could not have all the codec parameters before EOF */
2263 for(i=0;i<ic->nb_streams;i++) {
2264 st = ic->streams[i];
2265 if (!has_codec_parameters(st->codec)){
2267 avcodec_string(buf, sizeof(buf), st->codec, 0);
2268 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2276 if (ret == AVERROR(EAGAIN))
2279 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2280 if ((ret = av_dup_packet(pkt)) < 0)
2281 goto find_stream_info_err;
2283 read_size += pkt->size;
2285 st = ic->streams[pkt->stream_index];
2286 if (st->codec_info_nb_frames>1) {
2287 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2288 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2291 st->info->codec_info_duration += pkt->duration;
2294 int64_t last = st->info->last_dts;
2295 int64_t duration= pkt->dts - last;
2297 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2298 double dur= duration * av_q2d(st->time_base);
2300 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2301 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2302 if (st->info->duration_count < 2)
2303 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2304 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2305 int framerate= get_std_framerate(i);
2306 int ticks= lrintf(dur*framerate/(1001*12));
2307 double error= dur - ticks*1001*12/(double)framerate;
2308 st->info->duration_error[i] += error*error;
2310 st->info->duration_count++;
2311 // ignore the first 4 values, they might have some random jitter
2312 if (st->info->duration_count > 3)
2313 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2315 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2316 st->info->last_dts = pkt->dts;
2318 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2319 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2321 st->codec->extradata_size= i;
2322 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2323 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2324 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2328 /* if still no information, we try to open the codec and to
2329 decompress the frame. We try to avoid that in most cases as
2330 it takes longer and uses more memory. For MPEG-4, we need to
2331 decompress for QuickTime. */
2332 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2333 try_decode_frame(st, pkt);
2335 st->codec_info_nb_frames++;
2339 // close codecs which were opened in try_decode_frame()
2340 for(i=0;i<ic->nb_streams;i++) {
2341 st = ic->streams[i];
2342 if(st->codec->codec)
2343 avcodec_close(st->codec);
2345 for(i=0;i<ic->nb_streams;i++) {
2346 st = ic->streams[i];
2347 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2348 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2349 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2350 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2351 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2352 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2353 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2355 // the check for tb_unreliable() is not completely correct, since this is not about handling
2356 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2357 // ipmovie.c produces.
2358 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2359 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2360 if (st->info->duration_count && !st->r_frame_rate.num
2361 && tb_unreliable(st->codec) /*&&
2362 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2363 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2365 double best_error= 2*av_q2d(st->time_base);
2366 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2368 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2369 double error = st->info->duration_error[j] * get_std_framerate(j);
2370 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2371 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2372 if(error < best_error){
2374 num = get_std_framerate(j);
2377 // do not increase frame rate by more than 1 % in order to match a standard rate.
2378 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2379 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2382 if (!st->r_frame_rate.num){
2383 if( st->codec->time_base.den * (int64_t)st->time_base.num
2384 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2385 st->r_frame_rate.num = st->codec->time_base.den;
2386 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2388 st->r_frame_rate.num = st->time_base.den;
2389 st->r_frame_rate.den = st->time_base.num;
2392 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2393 if(!st->codec->bits_per_coded_sample)
2394 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2398 av_estimate_timings(ic, old_offset);
2400 compute_chapters_end(ic);
2403 /* correct DTS for B-frame streams with no timestamps */
2404 for(i=0;i<ic->nb_streams;i++) {
2405 st = ic->streams[i];
2406 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2408 ppktl = &ic->packet_buffer;
2410 if(ppkt1->stream_index != i)
2412 if(ppkt1->pkt->dts < 0)
2414 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2416 ppkt1->pkt->dts -= delta;
2421 st->cur_dts -= delta;
2427 find_stream_info_err:
2428 for (i=0; i < ic->nb_streams; i++)
2429 av_freep(&ic->streams[i]->info);
2433 /*******************************************************/
2435 int av_read_play(AVFormatContext *s)
2437 if (s->iformat->read_play)
2438 return s->iformat->read_play(s);
2440 return av_url_read_fpause(s->pb, 0);
2441 return AVERROR(ENOSYS);
2444 int av_read_pause(AVFormatContext *s)
2446 if (s->iformat->read_pause)
2447 return s->iformat->read_pause(s);
2449 return av_url_read_fpause(s->pb, 1);
2450 return AVERROR(ENOSYS);
2453 void av_close_input_stream(AVFormatContext *s)
2458 if (s->iformat->read_close)
2459 s->iformat->read_close(s);
2460 for(i=0;i<s->nb_streams;i++) {
2461 /* free all data in a stream component */
2464 av_parser_close(st->parser);
2465 av_free_packet(&st->cur_pkt);
2467 av_metadata_free(&st->metadata);
2468 av_free(st->index_entries);
2469 av_free(st->codec->extradata);
2471 #if FF_API_OLD_METADATA
2472 av_free(st->filename);
2474 av_free(st->priv_data);
2478 for(i=s->nb_programs-1; i>=0; i--) {
2479 #if FF_API_OLD_METADATA
2480 av_freep(&s->programs[i]->provider_name);
2481 av_freep(&s->programs[i]->name);
2483 av_metadata_free(&s->programs[i]->metadata);
2484 av_freep(&s->programs[i]->stream_index);
2485 av_freep(&s->programs[i]);
2487 av_freep(&s->programs);
2488 flush_packet_queue(s);
2489 av_freep(&s->priv_data);
2490 while(s->nb_chapters--) {
2491 #if FF_API_OLD_METADATA
2492 av_free(s->chapters[s->nb_chapters]->title);
2494 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2495 av_free(s->chapters[s->nb_chapters]);
2497 av_freep(&s->chapters);
2498 av_metadata_free(&s->metadata);
2503 void av_close_input_file(AVFormatContext *s)
2505 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2506 av_close_input_stream(s);
2511 AVStream *av_new_stream(AVFormatContext *s, int id)
2516 #if FF_API_MAX_STREAMS
2517 if (s->nb_streams >= MAX_STREAMS){
2518 av_log(s, AV_LOG_ERROR, "Too many streams\n");
2524 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2526 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2529 s->streams = streams;
2532 st = av_mallocz(sizeof(AVStream));
2535 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2540 st->codec= avcodec_alloc_context();
2542 /* no default bitrate if decoding */
2543 st->codec->bit_rate = 0;
2545 st->index = s->nb_streams;
2547 st->start_time = AV_NOPTS_VALUE;
2548 st->duration = AV_NOPTS_VALUE;
2549 /* we set the current DTS to 0 so that formats without any timestamps
2550 but durations get some timestamps, formats with some unknown
2551 timestamps have their first few packets buffered and the
2552 timestamps corrected before they are returned to the user */
2554 st->first_dts = AV_NOPTS_VALUE;
2555 st->probe_packets = MAX_PROBE_PACKETS;
2557 /* default pts setting is MPEG-like */
2558 av_set_pts_info(st, 33, 1, 90000);
2559 st->last_IP_pts = AV_NOPTS_VALUE;
2560 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2561 st->pts_buffer[i]= AV_NOPTS_VALUE;
2562 st->reference_dts = AV_NOPTS_VALUE;
2564 st->sample_aspect_ratio = (AVRational){0,1};
2566 s->streams[s->nb_streams++] = st;
2570 AVProgram *av_new_program(AVFormatContext *ac, int id)
2572 AVProgram *program=NULL;
2576 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2579 for(i=0; i<ac->nb_programs; i++)
2580 if(ac->programs[i]->id == id)
2581 program = ac->programs[i];
2584 program = av_mallocz(sizeof(AVProgram));
2587 dynarray_add(&ac->programs, &ac->nb_programs, program);
2588 program->discard = AVDISCARD_NONE;
2595 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2597 AVChapter *chapter = NULL;
2600 for(i=0; i<s->nb_chapters; i++)
2601 if(s->chapters[i]->id == id)
2602 chapter = s->chapters[i];
2605 chapter= av_mallocz(sizeof(AVChapter));
2608 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2610 #if FF_API_OLD_METADATA
2611 av_free(chapter->title);
2613 av_metadata_set2(&chapter->metadata, "title", title, 0);
2615 chapter->time_base= time_base;
2616 chapter->start = start;
2622 /************************************************************/
2623 /* output media file */
2625 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2629 if (s->oformat->priv_data_size > 0) {
2630 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2632 return AVERROR(ENOMEM);
2634 s->priv_data = NULL;
2636 if (s->oformat->set_parameters) {
2637 ret = s->oformat->set_parameters(s, ap);
2644 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2646 const AVCodecTag *avctag;
2648 enum CodecID id = CODEC_ID_NONE;
2649 unsigned int tag = 0;
2652 * Check that tag + id is in the table
2653 * If neither is in the table -> OK
2654 * If tag is in the table with another id -> FAIL
2655 * If id is in the table with another tag -> FAIL unless strict < normal
2657 for (n = 0; s->oformat->codec_tag[n]; n++) {
2658 avctag = s->oformat->codec_tag[n];
2659 while (avctag->id != CODEC_ID_NONE) {
2660 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2662 if (id == st->codec->codec_id)
2665 if (avctag->id == st->codec->codec_id)
2670 if (id != CODEC_ID_NONE)
2672 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2677 int av_write_header(AVFormatContext *s)
2682 // some sanity checks
2683 if (s->nb_streams == 0) {
2684 av_log(s, AV_LOG_ERROR, "no streams\n");
2685 return AVERROR(EINVAL);
2688 for(i=0;i<s->nb_streams;i++) {
2691 switch (st->codec->codec_type) {
2692 case AVMEDIA_TYPE_AUDIO:
2693 if(st->codec->sample_rate<=0){
2694 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2695 return AVERROR(EINVAL);
2697 if(!st->codec->block_align)
2698 st->codec->block_align = st->codec->channels *
2699 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2701 case AVMEDIA_TYPE_VIDEO:
2702 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2703 av_log(s, AV_LOG_ERROR, "time base not set\n");
2704 return AVERROR(EINVAL);
2706 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2707 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2708 return AVERROR(EINVAL);
2710 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2711 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2712 return AVERROR(EINVAL);
2717 if(s->oformat->codec_tag){
2718 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2719 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2720 st->codec->codec_tag= 0;
2722 if(st->codec->codec_tag){
2723 if (!validate_codec_tag(s, st)) {
2725 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2726 av_log(s, AV_LOG_ERROR,
2727 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2728 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2729 return AVERROR_INVALIDDATA;
2732 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2735 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2736 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2737 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2740 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2741 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2743 return AVERROR(ENOMEM);
2746 #if FF_API_OLD_METADATA
2747 ff_metadata_mux_compat(s);
2750 /* set muxer identification string */
2751 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2755 if (!(m = av_mallocz(sizeof(AVMetadata))))
2756 return AVERROR(ENOMEM);
2757 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
2758 metadata_conv(&m, s->oformat->metadata_conv, NULL);
2759 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
2760 av_metadata_set2(&s->metadata, t->key, t->value, 0);
2761 av_metadata_free(&m);
2764 if(s->oformat->write_header){
2765 ret = s->oformat->write_header(s);
2770 /* init PTS generation */
2771 for(i=0;i<s->nb_streams;i++) {
2772 int64_t den = AV_NOPTS_VALUE;
2775 switch (st->codec->codec_type) {
2776 case AVMEDIA_TYPE_AUDIO:
2777 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2779 case AVMEDIA_TYPE_VIDEO:
2780 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2785 if (den != AV_NOPTS_VALUE) {
2787 return AVERROR_INVALIDDATA;
2788 av_frac_init(&st->pts, 0, 0, den);
2794 //FIXME merge with compute_pkt_fields
2795 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2796 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2797 int num, den, frame_size, i;
2799 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2801 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2804 /* duration field */
2805 if (pkt->duration == 0) {
2806 compute_frame_duration(&num, &den, st, NULL, pkt);
2808 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2812 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2815 //XXX/FIXME this is a temporary hack until all encoders output pts
2816 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2818 // pkt->pts= st->cur_dts;
2819 pkt->pts= st->pts.val;
2822 //calculate dts from pts
2823 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2824 st->pts_buffer[0]= pkt->pts;
2825 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2826 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2827 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2828 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2830 pkt->dts= st->pts_buffer[0];
2833 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2834 av_log(s, AV_LOG_ERROR,
2835 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2836 st->index, st->cur_dts, pkt->dts);
2839 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2840 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2844 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2845 st->cur_dts= pkt->dts;
2846 st->pts.val= pkt->dts;
2849 switch (st->codec->codec_type) {
2850 case AVMEDIA_TYPE_AUDIO:
2851 frame_size = get_audio_frame_size(st->codec, pkt->size);
2853 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2854 likely equal to the encoder delay, but it would be better if we
2855 had the real timestamps from the encoder */
2856 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2857 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2860 case AVMEDIA_TYPE_VIDEO:
2861 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2869 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2871 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2873 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2876 ret= s->oformat->write_packet(s, pkt);
2878 ret= url_ferror(s->pb);
2882 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2883 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2885 AVPacketList **next_point, *this_pktl;
2887 this_pktl = av_mallocz(sizeof(AVPacketList));
2888 this_pktl->pkt= *pkt;
2889 pkt->destruct= NULL; // do not free original but only the copy
2890 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2892 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2893 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2895 next_point = &s->packet_buffer;
2898 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2899 while(!compare(s, &(*next_point)->pkt, pkt)){
2900 next_point= &(*next_point)->next;
2904 next_point = &(s->packet_buffer_end->next);
2907 assert(!*next_point);
2909 s->packet_buffer_end= this_pktl;
2912 this_pktl->next= *next_point;
2914 s->streams[pkt->stream_index]->last_in_packet_buffer=
2915 *next_point= this_pktl;
2918 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2920 AVStream *st = s->streams[ pkt ->stream_index];
2921 AVStream *st2= s->streams[ next->stream_index];
2922 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
2923 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
2924 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
2927 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2933 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2936 for(i=0; i < s->nb_streams; i++)
2937 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2939 if(stream_count && (s->nb_streams == stream_count || flush)){
2940 pktl= s->packet_buffer;
2943 s->packet_buffer= pktl->next;
2944 if(!s->packet_buffer)
2945 s->packet_buffer_end= NULL;
2947 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2948 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2952 av_init_packet(out);
2958 * Interleave an AVPacket correctly so it can be muxed.
2959 * @param out the interleaved packet will be output here
2960 * @param in the input packet
2961 * @param flush 1 if no further packets are available as input and all
2962 * remaining packets should be output
2963 * @return 1 if a packet was output, 0 if no packet could be output,
2964 * < 0 if an error occurred
2966 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2967 if(s->oformat->interleave_packet)
2968 return s->oformat->interleave_packet(s, out, in, flush);
2970 return av_interleave_packet_per_dts(s, out, in, flush);
2973 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2974 AVStream *st= s->streams[ pkt->stream_index];
2976 //FIXME/XXX/HACK drop zero sized packets
2977 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
2980 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2981 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2984 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2989 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2990 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2993 ret= s->oformat->write_packet(s, &opkt);
2995 av_free_packet(&opkt);
3000 if(url_ferror(s->pb))
3001 return url_ferror(s->pb);
3005 int av_write_trailer(AVFormatContext *s)
3011 ret= av_interleave_packet(s, &pkt, NULL, 1);
3012 if(ret<0) //FIXME cleanup needed for ret<0 ?
3017 ret= s->oformat->write_packet(s, &pkt);
3019 av_free_packet(&pkt);
3023 if(url_ferror(s->pb))
3027 if(s->oformat->write_trailer)
3028 ret = s->oformat->write_trailer(s);
3031 ret=url_ferror(s->pb);
3032 for(i=0;i<s->nb_streams;i++) {
3033 av_freep(&s->streams[i]->priv_data);
3034 av_freep(&s->streams[i]->index_entries);
3036 av_freep(&s->priv_data);
3040 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3043 AVProgram *program=NULL;
3046 if (idx >= ac->nb_streams) {
3047 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3051 for(i=0; i<ac->nb_programs; i++){
3052 if(ac->programs[i]->id != progid)
3054 program = ac->programs[i];
3055 for(j=0; j<program->nb_stream_indexes; j++)
3056 if(program->stream_index[j] == idx)
3059 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3062 program->stream_index = tmp;
3063 program->stream_index[program->nb_stream_indexes++] = idx;
3068 static void print_fps(double d, const char *postfix){
3069 uint64_t v= lrintf(d*100);
3070 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3071 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3072 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3075 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
3077 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
3078 AVMetadataTag *tag=NULL;
3080 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3081 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
3082 if(strcmp("language", tag->key))
3083 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3088 /* "user interface" functions */
3089 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3092 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3093 AVStream *st = ic->streams[i];
3094 int g = av_gcd(st->time_base.num, st->time_base.den);
3095 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
3096 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3097 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3098 /* the pid is an important information, so we display it */
3099 /* XXX: add a generic system */
3100 if (flags & AVFMT_SHOW_IDS)
3101 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3103 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3104 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3105 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3106 if (st->sample_aspect_ratio.num && // default
3107 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3108 AVRational display_aspect_ratio;
3109 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3110 st->codec->width*st->sample_aspect_ratio.num,
3111 st->codec->height*st->sample_aspect_ratio.den,
3113 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3114 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3115 display_aspect_ratio.num, display_aspect_ratio.den);
3117 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3118 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3119 print_fps(av_q2d(st->avg_frame_rate), "fps");
3120 if(st->r_frame_rate.den && st->r_frame_rate.num)
3121 print_fps(av_q2d(st->r_frame_rate), "tbr");
3122 if(st->time_base.den && st->time_base.num)
3123 print_fps(1/av_q2d(st->time_base), "tbn");
3124 if(st->codec->time_base.den && st->codec->time_base.num)
3125 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3127 av_log(NULL, AV_LOG_INFO, "\n");
3128 dump_metadata(NULL, st->metadata, " ");
3131 void dump_format(AVFormatContext *ic,
3137 uint8_t *printed = av_mallocz(ic->nb_streams);
3138 if (ic->nb_streams && !printed)
3141 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3142 is_output ? "Output" : "Input",
3144 is_output ? ic->oformat->name : ic->iformat->name,
3145 is_output ? "to" : "from", url);
3146 dump_metadata(NULL, ic->metadata, " ");
3148 av_log(NULL, AV_LOG_INFO, " Duration: ");
3149 if (ic->duration != AV_NOPTS_VALUE) {
3150 int hours, mins, secs, us;
3151 secs = ic->duration / AV_TIME_BASE;
3152 us = ic->duration % AV_TIME_BASE;
3157 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3158 (100 * us) / AV_TIME_BASE);
3160 av_log(NULL, AV_LOG_INFO, "N/A");
3162 if (ic->start_time != AV_NOPTS_VALUE) {
3164 av_log(NULL, AV_LOG_INFO, ", start: ");
3165 secs = ic->start_time / AV_TIME_BASE;
3166 us = abs(ic->start_time % AV_TIME_BASE);
3167 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3168 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3170 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3172 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3174 av_log(NULL, AV_LOG_INFO, "N/A");
3176 av_log(NULL, AV_LOG_INFO, "\n");
3178 for (i = 0; i < ic->nb_chapters; i++) {
3179 AVChapter *ch = ic->chapters[i];
3180 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3181 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3182 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3184 dump_metadata(NULL, ch->metadata, " ");
3186 if(ic->nb_programs) {
3187 int j, k, total = 0;
3188 for(j=0; j<ic->nb_programs; j++) {
3189 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
3191 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3192 name ? name->value : "");
3193 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3194 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3195 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3196 printed[ic->programs[j]->stream_index[k]] = 1;
3198 total += ic->programs[j]->nb_stream_indexes;
3200 if (total < ic->nb_streams)
3201 av_log(NULL, AV_LOG_INFO, " No Program\n");
3203 for(i=0;i<ic->nb_streams;i++)
3205 dump_stream_format(ic, i, index, is_output);
3210 #if LIBAVFORMAT_VERSION_MAJOR < 53
3211 #include "libavcore/parseutils.h"
3213 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
3215 return av_parse_video_size(width_ptr, height_ptr, str);
3218 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
3220 AVRational frame_rate;
3221 int ret = av_parse_video_rate(&frame_rate, arg);
3222 *frame_rate_num= frame_rate.num;
3223 *frame_rate_den= frame_rate.den;
3228 int64_t av_gettime(void)
3231 gettimeofday(&tv,NULL);
3232 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3235 uint64_t ff_ntp_time(void)
3237 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3240 int64_t parse_date(const char *datestr, int duration)
3246 static const char * const date_fmt[] = {
3250 static const char * const time_fmt[] = {
3260 time_t now = time(0);
3262 len = strlen(datestr);
3264 lastch = datestr[len - 1];
3267 is_utc = (lastch == 'z' || lastch == 'Z');
3269 memset(&dt, 0, sizeof(dt));
3274 if (!strncasecmp(datestr, "now", len))
3275 return (int64_t) now * 1000000;
3277 /* parse the year-month-day part */
3278 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3279 q = small_strptime(p, date_fmt[i], &dt);
3285 /* if the year-month-day part is missing, then take the
3286 * current year-month-day time */
3291 dt = *localtime(&now);
3293 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3298 if (*p == 'T' || *p == 't' || *p == ' ')
3301 /* parse the hour-minute-second part */
3302 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3303 q = small_strptime(p, time_fmt[i], &dt);
3309 /* parse datestr as a duration */
3314 /* parse datestr as HH:MM:SS */
3315 q = small_strptime(p, time_fmt[0], &dt);
3317 /* parse datestr as S+ */
3318 dt.tm_sec = strtol(p, (char **)&q, 10);
3320 /* the parsing didn't succeed */
3327 /* Now we have all the fields that we can get */
3333 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3335 dt.tm_isdst = -1; /* unknown */
3345 /* parse the .m... part */
3349 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3352 val += n * (*q - '0');
3356 return negative ? -t : t;
3359 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3369 while (*p != '\0' && *p != '=' && *p != '&') {
3370 if ((q - tag) < sizeof(tag) - 1)
3378 while (*p != '&' && *p != '\0') {
3379 if ((q - arg) < arg_size - 1) {
3389 if (!strcmp(tag, tag1))
3398 int av_get_frame_filename(char *buf, int buf_size,
3399 const char *path, int number)
3402 char *q, buf1[20], c;
3403 int nd, len, percentd_found;
3415 while (isdigit(*p)) {
3416 nd = nd * 10 + *p++ - '0';
3419 } while (isdigit(c));
3428 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3430 if ((q - buf + len) > buf_size - 1)
3432 memcpy(q, buf1, len);
3440 if ((q - buf) < buf_size - 1)
3444 if (!percentd_found)
3453 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3457 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3459 for(i=0;i<size;i+=16) {
3466 PRINT(" %02x", buf[i+j]);
3471 for(j=0;j<len;j++) {
3473 if (c < ' ' || c > '~')
3482 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3484 hex_dump_internal(NULL, f, 0, buf, size);
3487 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3489 hex_dump_internal(avcl, NULL, level, buf, size);
3492 //FIXME needs to know the time_base
3493 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3496 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3497 PRINT("stream #%d:\n", pkt->stream_index);
3498 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3499 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3500 /* DTS is _always_ valid after av_read_frame() */
3502 if (pkt->dts == AV_NOPTS_VALUE)
3505 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3506 /* PTS may not be known if B-frames are present. */
3508 if (pkt->pts == AV_NOPTS_VALUE)
3511 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3513 PRINT(" size=%d\n", pkt->size);
3516 av_hex_dump(f, pkt->data, pkt->size);
3519 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3521 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3524 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3526 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3529 #if LIBAVFORMAT_VERSION_MAJOR < 53
3530 attribute_deprecated
3531 void ff_url_split(char *proto, int proto_size,
3532 char *authorization, int authorization_size,
3533 char *hostname, int hostname_size,
3535 char *path, int path_size,
3538 av_url_split(proto, proto_size,
3539 authorization, authorization_size,
3540 hostname, hostname_size,
3547 void av_url_split(char *proto, int proto_size,
3548 char *authorization, int authorization_size,
3549 char *hostname, int hostname_size,
3551 char *path, int path_size,
3554 const char *p, *ls, *at, *col, *brk;
3556 if (port_ptr) *port_ptr = -1;
3557 if (proto_size > 0) proto[0] = 0;
3558 if (authorization_size > 0) authorization[0] = 0;
3559 if (hostname_size > 0) hostname[0] = 0;
3560 if (path_size > 0) path[0] = 0;
3562 /* parse protocol */
3563 if ((p = strchr(url, ':'))) {
3564 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3569 /* no protocol means plain filename */
3570 av_strlcpy(path, url, path_size);
3574 /* separate path from hostname */
3575 ls = strchr(p, '/');
3577 ls = strchr(p, '?');
3579 av_strlcpy(path, ls, path_size);
3581 ls = &p[strlen(p)]; // XXX
3583 /* the rest is hostname, use that to parse auth/port */
3585 /* authorization (user[:pass]@hostname) */
3586 if ((at = strchr(p, '@')) && at < ls) {
3587 av_strlcpy(authorization, p,
3588 FFMIN(authorization_size, at + 1 - p));
3589 p = at + 1; /* skip '@' */
3592 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3594 av_strlcpy(hostname, p + 1,
3595 FFMIN(hostname_size, brk - p));
3596 if (brk[1] == ':' && port_ptr)
3597 *port_ptr = atoi(brk + 2);
3598 } else if ((col = strchr(p, ':')) && col < ls) {
3599 av_strlcpy(hostname, p,
3600 FFMIN(col + 1 - p, hostname_size));
3601 if (port_ptr) *port_ptr = atoi(col + 1);
3603 av_strlcpy(hostname, p,
3604 FFMIN(ls + 1 - p, hostname_size));
3608 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3611 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3614 'C', 'D', 'E', 'F' };
3615 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3618 'c', 'd', 'e', 'f' };
3619 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3621 for(i = 0; i < s; i++) {
3622 buff[i * 2] = hex_table[src[i] >> 4];
3623 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3629 int ff_hex_to_data(uint8_t *data, const char *p)
3636 p += strspn(p, SPACE_CHARS);
3639 c = toupper((unsigned char) *p++);
3640 if (c >= '0' && c <= '9')
3642 else if (c >= 'A' && c <= 'F')
3657 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3658 unsigned int pts_num, unsigned int pts_den)
3660 s->pts_wrap_bits = pts_wrap_bits;
3662 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3663 if(s->time_base.num != pts_num)
3664 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3666 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3668 if(!s->time_base.num || !s->time_base.den)
3669 s->time_base.num= s->time_base.den= 0;
3672 int ff_url_join(char *str, int size, const char *proto,
3673 const char *authorization, const char *hostname,
3674 int port, const char *fmt, ...)
3677 struct addrinfo hints, *ai;
3682 av_strlcatf(str, size, "%s://", proto);
3683 if (authorization && authorization[0])
3684 av_strlcatf(str, size, "%s@", authorization);
3685 #if CONFIG_NETWORK && defined(AF_INET6)
3686 /* Determine if hostname is a numerical IPv6 address,
3687 * properly escape it within [] in that case. */
3688 memset(&hints, 0, sizeof(hints));
3689 hints.ai_flags = AI_NUMERICHOST;
3690 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
3691 if (ai->ai_family == AF_INET6) {
3692 av_strlcat(str, "[", size);
3693 av_strlcat(str, hostname, size);
3694 av_strlcat(str, "]", size);
3696 av_strlcat(str, hostname, size);
3701 /* Not an IPv6 address, just output the plain string. */
3702 av_strlcat(str, hostname, size);
3705 av_strlcatf(str, size, ":%d", port);
3708 int len = strlen(str);
3711 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
3717 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
3718 AVFormatContext *src)
3723 local_pkt.stream_index = dst_stream;
3724 if (pkt->pts != AV_NOPTS_VALUE)
3725 local_pkt.pts = av_rescale_q(pkt->pts,
3726 src->streams[pkt->stream_index]->time_base,
3727 dst->streams[dst_stream]->time_base);
3728 if (pkt->dts != AV_NOPTS_VALUE)
3729 local_pkt.dts = av_rescale_q(pkt->dts,
3730 src->streams[pkt->stream_index]->time_base,
3731 dst->streams[dst_stream]->time_base);
3732 return av_write_frame(dst, &local_pkt);
3735 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3738 const char *ptr = str;
3740 /* Parse key=value pairs. */
3743 char *dest = NULL, *dest_end;
3744 int key_len, dest_len = 0;
3746 /* Skip whitespace and potential commas. */
3747 while (*ptr && (isspace(*ptr) || *ptr == ','))
3754 if (!(ptr = strchr(key, '=')))
3757 key_len = ptr - key;
3759 callback_get_buf(context, key, key_len, &dest, &dest_len);
3760 dest_end = dest + dest_len - 1;
3764 while (*ptr && *ptr != '\"') {
3768 if (dest && dest < dest_end)
3772 if (dest && dest < dest_end)
3780 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
3781 if (dest && dest < dest_end)