2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
264 int ret= av_new_packet(pkt, size);
269 pkt->pos= url_ftell(s);
271 ret= get_buffer(s, pkt->data, size);
275 av_shrink_packet(pkt, ret);
281 int av_filename_number_test(const char *filename)
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
289 AVInputFormat *fmt1, *fmt;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
304 if (score > *score_max) {
307 }else if (score == *score_max)
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
315 return av_probe_input_format2(pd, is_opened, &score);
318 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
321 fmt = av_probe_input_format2(pd, 1, &score);
324 if (!strcmp(fmt->name, "mp3")) {
325 st->codec->codec_id = CODEC_ID_MP3;
326 st->codec->codec_type = CODEC_TYPE_AUDIO;
327 } else if (!strcmp(fmt->name, "ac3")) {
328 st->codec->codec_id = CODEC_ID_AC3;
329 st->codec->codec_type = CODEC_TYPE_AUDIO;
330 } else if (!strcmp(fmt->name, "mpegvideo")) {
331 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
332 st->codec->codec_type = CODEC_TYPE_VIDEO;
333 } else if (!strcmp(fmt->name, "m4v")) {
334 st->codec->codec_id = CODEC_ID_MPEG4;
335 st->codec->codec_type = CODEC_TYPE_VIDEO;
336 } else if (!strcmp(fmt->name, "h264")) {
337 st->codec->codec_id = CODEC_ID_H264;
338 st->codec->codec_type = CODEC_TYPE_VIDEO;
344 /************************************************************/
345 /* input media file */
348 * Open a media file from an IO stream. 'fmt' must be specified.
350 int av_open_input_stream(AVFormatContext **ic_ptr,
351 ByteIOContext *pb, const char *filename,
352 AVInputFormat *fmt, AVFormatParameters *ap)
356 AVFormatParameters default_ap;
360 memset(ap, 0, sizeof(default_ap));
363 if(!ap->prealloced_context)
364 ic = avformat_alloc_context();
368 err = AVERROR(ENOMEM);
373 ic->duration = AV_NOPTS_VALUE;
374 ic->start_time = AV_NOPTS_VALUE;
375 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
377 /* allocate private data */
378 if (fmt->priv_data_size > 0) {
379 ic->priv_data = av_mallocz(fmt->priv_data_size);
380 if (!ic->priv_data) {
381 err = AVERROR(ENOMEM);
385 ic->priv_data = NULL;
388 if (ic->iformat->read_header) {
389 err = ic->iformat->read_header(ic, ap);
394 if (pb && !ic->data_offset)
395 ic->data_offset = url_ftell(ic->pb);
397 #if LIBAVFORMAT_VERSION_MAJOR < 53
398 ff_metadata_demux_compat(ic);
406 av_freep(&ic->priv_data);
407 for(i=0;i<ic->nb_streams;i++) {
408 AVStream *st = ic->streams[i];
410 av_free(st->priv_data);
411 av_free(st->codec->extradata);
421 /** size of probe buffer, for guessing file type from file contents */
422 #define PROBE_BUF_MIN 2048
423 #define PROBE_BUF_MAX (1<<20)
425 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
428 AVFormatParameters *ap)
431 AVProbeData probe_data, *pd = &probe_data;
432 ByteIOContext *pb = NULL;
436 pd->filename = filename;
441 /* guess format if no file can be opened */
442 fmt = av_probe_input_format(pd, 0);
445 /* Do not open file if the format does not need it. XXX: specific
446 hack needed to handle RTSP/TCP */
447 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
448 /* if no file needed do not try to open one */
449 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
453 url_setbufsize(pb, buf_size);
456 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
457 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
458 /* read probe data */
459 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
460 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
461 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
462 if (url_fseek(pb, 0, SEEK_SET) < 0) {
464 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
470 /* guess file format */
471 fmt = av_probe_input_format2(pd, 1, &score);
476 /* if still no format found, error */
482 /* check filename in case an image number is expected */
483 if (fmt->flags & AVFMT_NEEDNUMBER) {
484 if (!av_filename_number_test(filename)) {
485 err = AVERROR_NUMEXPECTED;
489 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
502 /*******************************************************/
504 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
505 AVPacketList **plast_pktl){
506 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
511 (*plast_pktl)->next = pktl;
513 *packet_buffer = pktl;
515 /* add the packet in the buffered packet list */
521 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
527 AVPacketList *pktl = s->raw_packet_buffer;
531 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
532 s->raw_packet_buffer = pktl->next;
539 ret= s->iformat->read_packet(s, pkt);
542 st= s->streams[pkt->stream_index];
544 switch(st->codec->codec_type){
545 case CODEC_TYPE_VIDEO:
546 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
548 case CODEC_TYPE_AUDIO:
549 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
551 case CODEC_TYPE_SUBTITLE:
552 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
556 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
559 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
561 if(st->codec->codec_id == CODEC_ID_PROBE){
562 AVProbeData *pd = &st->probe_data;
564 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
565 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
566 pd->buf_size += pkt->size;
567 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
569 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
570 set_codec_from_probe_data(st, pd, 1);
571 if(st->codec->codec_id != CODEC_ID_PROBE){
580 /**********************************************************/
583 * Get the number of samples of an audio frame. Return -1 on error.
585 static int get_audio_frame_size(AVCodecContext *enc, int size)
589 if(enc->codec_id == CODEC_ID_VORBIS)
592 if (enc->frame_size <= 1) {
593 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
595 if (bits_per_sample) {
596 if (enc->channels == 0)
598 frame_size = (size << 3) / (bits_per_sample * enc->channels);
600 /* used for example by ADPCM codecs */
601 if (enc->bit_rate == 0)
603 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
606 frame_size = enc->frame_size;
613 * Return the frame duration in seconds. Return 0 if not available.
615 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
616 AVCodecParserContext *pc, AVPacket *pkt)
622 switch(st->codec->codec_type) {
623 case CODEC_TYPE_VIDEO:
624 if(st->time_base.num*1000LL > st->time_base.den){
625 *pnum = st->time_base.num;
626 *pden = st->time_base.den;
627 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
628 *pnum = st->codec->time_base.num;
629 *pden = st->codec->time_base.den;
630 if (pc && pc->repeat_pict) {
631 *pnum = (*pnum) * (1 + pc->repeat_pict);
635 case CODEC_TYPE_AUDIO:
636 frame_size = get_audio_frame_size(st->codec, pkt->size);
640 *pden = st->codec->sample_rate;
647 static int is_intra_only(AVCodecContext *enc){
648 if(enc->codec_type == CODEC_TYPE_AUDIO){
650 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
651 switch(enc->codec_id){
653 case CODEC_ID_MJPEGB:
655 case CODEC_ID_RAWVIDEO:
656 case CODEC_ID_DVVIDEO:
657 case CODEC_ID_HUFFYUV:
658 case CODEC_ID_FFVHUFF:
663 case CODEC_ID_JPEG2000:
671 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
672 int64_t dts, int64_t pts)
674 AVStream *st= s->streams[stream_index];
675 AVPacketList *pktl= s->packet_buffer;
677 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
680 st->first_dts= dts - st->cur_dts;
683 for(; pktl; pktl= pktl->next){
684 if(pktl->pkt.stream_index != stream_index)
686 //FIXME think more about this check
687 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
688 pktl->pkt.pts += st->first_dts;
690 if(pktl->pkt.dts != AV_NOPTS_VALUE)
691 pktl->pkt.dts += st->first_dts;
693 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
694 st->start_time= pktl->pkt.pts;
696 if (st->start_time == AV_NOPTS_VALUE)
697 st->start_time = pts;
700 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
702 AVPacketList *pktl= s->packet_buffer;
705 if(st->first_dts != AV_NOPTS_VALUE){
706 cur_dts= st->first_dts;
707 for(; pktl; pktl= pktl->next){
708 if(pktl->pkt.stream_index == pkt->stream_index){
709 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
711 cur_dts -= pkt->duration;
714 pktl= s->packet_buffer;
715 st->first_dts = cur_dts;
716 }else if(st->cur_dts)
719 for(; pktl; pktl= pktl->next){
720 if(pktl->pkt.stream_index != pkt->stream_index)
722 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
723 && !pktl->pkt.duration){
724 pktl->pkt.dts= cur_dts;
725 if(!st->codec->has_b_frames)
726 pktl->pkt.pts= cur_dts;
727 cur_dts += pkt->duration;
728 pktl->pkt.duration= pkt->duration;
732 if(st->first_dts == AV_NOPTS_VALUE)
733 st->cur_dts= cur_dts;
736 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
737 AVCodecParserContext *pc, AVPacket *pkt)
739 int num, den, presentation_delayed, delay, i;
742 /* do we have a video B-frame ? */
743 delay= st->codec->has_b_frames;
744 presentation_delayed = 0;
745 /* XXX: need has_b_frame, but cannot get it if the codec is
748 pc && pc->pict_type != FF_B_TYPE)
749 presentation_delayed = 1;
751 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
752 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
753 pkt->dts -= 1LL<<st->pts_wrap_bits;
756 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
757 // we take the conservative approach and discard both
758 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
759 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
760 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
761 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
764 if (pkt->duration == 0) {
765 compute_frame_duration(&num, &den, st, pc, pkt);
767 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
769 if(pkt->duration != 0 && s->packet_buffer)
770 update_initial_durations(s, st, pkt);
774 /* correct timestamps with byte offset if demuxers only have timestamps
775 on packet boundaries */
776 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
777 /* this will estimate bitrate based on this frame's duration and size */
778 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
779 if(pkt->pts != AV_NOPTS_VALUE)
781 if(pkt->dts != AV_NOPTS_VALUE)
785 if (pc && pc->dts_sync_point >= 0) {
786 // we have synchronization info from the parser
787 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
789 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
790 if (pkt->dts != AV_NOPTS_VALUE) {
791 // got DTS from the stream, update reference timestamp
792 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
793 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
794 } else if (st->reference_dts != AV_NOPTS_VALUE) {
795 // compute DTS based on reference timestamp
796 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
797 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
799 if (pc->dts_sync_point > 0)
800 st->reference_dts = pkt->dts; // new reference
804 /* This may be redundant, but it should not hurt. */
805 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
806 presentation_delayed = 1;
808 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
809 /* interpolate PTS and DTS if they are not present */
810 //We skip H264 currently because delay and has_b_frames are not reliably set
811 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
812 if (presentation_delayed) {
813 /* DTS = decompression timestamp */
814 /* PTS = presentation timestamp */
815 if (pkt->dts == AV_NOPTS_VALUE)
816 pkt->dts = st->last_IP_pts;
817 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
818 if (pkt->dts == AV_NOPTS_VALUE)
819 pkt->dts = st->cur_dts;
821 /* this is tricky: the dts must be incremented by the duration
822 of the frame we are displaying, i.e. the last I- or P-frame */
823 if (st->last_IP_duration == 0)
824 st->last_IP_duration = pkt->duration;
825 if(pkt->dts != AV_NOPTS_VALUE)
826 st->cur_dts = pkt->dts + st->last_IP_duration;
827 st->last_IP_duration = pkt->duration;
828 st->last_IP_pts= pkt->pts;
829 /* cannot compute PTS if not present (we can compute it only
830 by knowing the future */
831 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
832 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
833 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
834 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
835 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
836 pkt->pts += pkt->duration;
837 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
841 /* presentation is not delayed : PTS and DTS are the same */
842 if(pkt->pts == AV_NOPTS_VALUE)
844 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
845 if(pkt->pts == AV_NOPTS_VALUE)
846 pkt->pts = st->cur_dts;
848 if(pkt->pts != AV_NOPTS_VALUE)
849 st->cur_dts = pkt->pts + pkt->duration;
853 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
854 st->pts_buffer[0]= pkt->pts;
855 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
856 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
857 if(pkt->dts == AV_NOPTS_VALUE)
858 pkt->dts= st->pts_buffer[0];
859 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
860 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
862 if(pkt->dts > st->cur_dts)
863 st->cur_dts = pkt->dts;
866 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
869 if(is_intra_only(st->codec))
870 pkt->flags |= PKT_FLAG_KEY;
873 /* keyframe computation */
874 if (pc->key_frame == 1)
875 pkt->flags |= PKT_FLAG_KEY;
876 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
877 pkt->flags |= PKT_FLAG_KEY;
880 pkt->convergence_duration = pc->convergence_duration;
884 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
892 /* select current input stream component */
895 if (!st->need_parsing || !st->parser) {
896 /* no parsing needed: we just output the packet as is */
897 /* raw data support */
898 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
899 compute_pkt_fields(s, st, NULL, pkt);
901 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
902 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
903 ff_reduce_index(s, st->index);
904 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
907 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
908 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
909 st->cur_ptr, st->cur_len,
910 st->cur_pkt.pts, st->cur_pkt.dts,
912 st->cur_pkt.pts = AV_NOPTS_VALUE;
913 st->cur_pkt.dts = AV_NOPTS_VALUE;
914 /* increment read pointer */
918 /* return packet if any */
922 pkt->stream_index = st->index;
923 pkt->pts = st->parser->pts;
924 pkt->dts = st->parser->dts;
925 pkt->pos = st->parser->pos;
926 pkt->destruct = av_destruct_packet_nofree;
927 compute_pkt_fields(s, st, st->parser, pkt);
929 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
930 ff_reduce_index(s, st->index);
931 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
932 0, 0, AVINDEX_KEYFRAME);
939 av_free_packet(&st->cur_pkt);
944 /* read next packet */
945 ret = av_read_packet(s, &cur_pkt);
947 if (ret == AVERROR(EAGAIN))
949 /* return the last frames, if any */
950 for(i = 0; i < s->nb_streams; i++) {
952 if (st->parser && st->need_parsing) {
953 av_parser_parse2(st->parser, st->codec,
954 &pkt->data, &pkt->size,
956 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
962 /* no more packets: really terminate parsing */
965 st = s->streams[cur_pkt.stream_index];
966 st->cur_pkt= cur_pkt;
968 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
969 st->cur_pkt.dts != AV_NOPTS_VALUE &&
970 st->cur_pkt.pts < st->cur_pkt.dts){
971 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
972 st->cur_pkt.stream_index,
976 // av_free_packet(&st->cur_pkt);
980 if(s->debug & FF_FDEBUG_TS)
981 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
982 st->cur_pkt.stream_index,
989 st->cur_ptr = st->cur_pkt.data;
990 st->cur_len = st->cur_pkt.size;
991 if (st->need_parsing && !st->parser) {
992 st->parser = av_parser_init(st->codec->codec_id);
994 /* no parser available: just output the raw packets */
995 st->need_parsing = AVSTREAM_PARSE_NONE;
996 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
997 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
999 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1000 st->parser->next_frame_offset=
1001 st->parser->cur_offset= st->cur_pkt.pos;
1006 if(s->debug & FF_FDEBUG_TS)
1007 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1017 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1021 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1024 pktl = s->packet_buffer;
1026 AVPacket *next_pkt= &pktl->pkt;
1028 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1029 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1030 if( pktl->pkt.stream_index == next_pkt->stream_index
1031 && next_pkt->dts < pktl->pkt.dts
1032 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1033 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1034 next_pkt->pts= pktl->pkt.dts;
1038 pktl = s->packet_buffer;
1041 if( next_pkt->pts != AV_NOPTS_VALUE
1042 || next_pkt->dts == AV_NOPTS_VALUE
1044 /* read packet from packet buffer, if there is data */
1046 s->packet_buffer = pktl->next;
1052 int ret= av_read_frame_internal(s, pkt);
1054 if(pktl && ret != AVERROR(EAGAIN)){
1061 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1062 &s->packet_buffer_end)) < 0)
1063 return AVERROR(ENOMEM);
1065 assert(!s->packet_buffer);
1066 return av_read_frame_internal(s, pkt);
1071 /* XXX: suppress the packet queue */
1072 static void flush_packet_queue(AVFormatContext *s)
1077 pktl = s->packet_buffer;
1080 s->packet_buffer = pktl->next;
1081 av_free_packet(&pktl->pkt);
1086 /*******************************************************/
1089 int av_find_default_stream_index(AVFormatContext *s)
1091 int first_audio_index = -1;
1095 if (s->nb_streams <= 0)
1097 for(i = 0; i < s->nb_streams; i++) {
1099 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1102 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1103 first_audio_index = i;
1105 return first_audio_index >= 0 ? first_audio_index : 0;
1109 * Flush the frame reader.
1111 static void av_read_frame_flush(AVFormatContext *s)
1116 flush_packet_queue(s);
1120 /* for each stream, reset read state */
1121 for(i = 0; i < s->nb_streams; i++) {
1125 av_parser_close(st->parser);
1127 av_free_packet(&st->cur_pkt);
1129 st->last_IP_pts = AV_NOPTS_VALUE;
1130 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1131 st->reference_dts = AV_NOPTS_VALUE;
1138 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1141 for(i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1144 st->cur_dts = av_rescale(timestamp,
1145 st->time_base.den * (int64_t)ref_st->time_base.num,
1146 st->time_base.num * (int64_t)ref_st->time_base.den);
1150 void ff_reduce_index(AVFormatContext *s, int stream_index)
1152 AVStream *st= s->streams[stream_index];
1153 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1155 if((unsigned)st->nb_index_entries >= max_entries){
1157 for(i=0; 2*i<st->nb_index_entries; i++)
1158 st->index_entries[i]= st->index_entries[2*i];
1159 st->nb_index_entries= i;
1163 int av_add_index_entry(AVStream *st,
1164 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1166 AVIndexEntry *entries, *ie;
1169 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1172 entries = av_fast_realloc(st->index_entries,
1173 &st->index_entries_allocated_size,
1174 (st->nb_index_entries + 1) *
1175 sizeof(AVIndexEntry));
1179 st->index_entries= entries;
1181 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1184 index= st->nb_index_entries++;
1185 ie= &entries[index];
1186 assert(index==0 || ie[-1].timestamp < timestamp);
1188 ie= &entries[index];
1189 if(ie->timestamp != timestamp){
1190 if(ie->timestamp <= timestamp)
1192 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1193 st->nb_index_entries++;
1194 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1195 distance= ie->min_distance;
1199 ie->timestamp = timestamp;
1200 ie->min_distance= distance;
1207 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1210 AVIndexEntry *entries= st->index_entries;
1211 int nb_entries= st->nb_index_entries;
1220 timestamp = entries[m].timestamp;
1221 if(timestamp >= wanted_timestamp)
1223 if(timestamp <= wanted_timestamp)
1226 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1228 if(!(flags & AVSEEK_FLAG_ANY)){
1229 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1230 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1241 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1242 AVInputFormat *avif= s->iformat;
1243 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1244 int64_t ts_min, ts_max, ts;
1248 if (stream_index < 0)
1252 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1256 ts_min= AV_NOPTS_VALUE;
1257 pos_limit= -1; //gcc falsely says it may be uninitialized
1259 st= s->streams[stream_index];
1260 if(st->index_entries){
1263 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1264 index= FFMAX(index, 0);
1265 e= &st->index_entries[index];
1267 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1269 ts_min= e->timestamp;
1271 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1278 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1279 assert(index < st->nb_index_entries);
1281 e= &st->index_entries[index];
1282 assert(e->timestamp >= target_ts);
1284 ts_max= e->timestamp;
1285 pos_limit= pos_max - e->min_distance;
1287 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1288 pos_max,pos_limit, ts_max);
1293 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1298 url_fseek(s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1307 int64_t start_pos, filesize;
1311 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1314 if(ts_min == AV_NOPTS_VALUE){
1315 pos_min = s->data_offset;
1316 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1317 if (ts_min == AV_NOPTS_VALUE)
1321 if(ts_max == AV_NOPTS_VALUE){
1323 filesize = url_fsize(s->pb);
1324 pos_max = filesize - 1;
1327 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1329 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1330 if (ts_max == AV_NOPTS_VALUE)
1334 int64_t tmp_pos= pos_max + 1;
1335 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1336 if(tmp_ts == AV_NOPTS_VALUE)
1340 if(tmp_pos >= filesize)
1346 if(ts_min > ts_max){
1348 }else if(ts_min == ts_max){
1353 while (pos_min < pos_limit) {
1355 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1359 assert(pos_limit <= pos_max);
1362 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1363 // interpolate position (better than dichotomy)
1364 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1365 + pos_min - approximate_keyframe_distance;
1366 }else if(no_change==1){
1367 // bisection, if interpolation failed to change min or max pos last time
1368 pos = (pos_min + pos_limit)>>1;
1370 /* linear search if bisection failed, can only happen if there
1371 are very few or no keyframes between min/max */
1376 else if(pos > pos_limit)
1380 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1386 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1387 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1388 start_pos, no_change);
1390 if(ts == AV_NOPTS_VALUE){
1391 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1394 assert(ts != AV_NOPTS_VALUE);
1395 if (target_ts <= ts) {
1396 pos_limit = start_pos - 1;
1400 if (target_ts >= ts) {
1406 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1407 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1410 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1412 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1413 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1414 pos, ts_min, target_ts, ts_max);
1420 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1421 int64_t pos_min, pos_max;
1425 if (stream_index < 0)
1428 st= s->streams[stream_index];
1431 pos_min = s->data_offset;
1432 pos_max = url_fsize(s->pb) - 1;
1434 if (pos < pos_min) pos= pos_min;
1435 else if(pos > pos_max) pos= pos_max;
1437 url_fseek(s->pb, pos, SEEK_SET);
1440 av_update_cur_dts(s, st, ts);
1445 static int av_seek_frame_generic(AVFormatContext *s,
1446 int stream_index, int64_t timestamp, int flags)
1452 st = s->streams[stream_index];
1454 index = av_index_search_timestamp(st, timestamp, flags);
1456 if(index < 0 || index==st->nb_index_entries-1){
1460 if(st->nb_index_entries){
1461 assert(st->index_entries);
1462 ie= &st->index_entries[st->nb_index_entries-1];
1463 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1465 av_update_cur_dts(s, st, ie->timestamp);
1467 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1473 ret = av_read_frame(s, &pkt);
1474 }while(ret == AVERROR(EAGAIN));
1477 av_free_packet(&pkt);
1478 if(stream_index == pkt.stream_index){
1479 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1483 index = av_index_search_timestamp(st, timestamp, flags);
1488 av_read_frame_flush(s);
1489 if (s->iformat->read_seek){
1490 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1493 ie = &st->index_entries[index];
1494 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1496 av_update_cur_dts(s, st, ie->timestamp);
1501 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1506 av_read_frame_flush(s);
1508 if(flags & AVSEEK_FLAG_BYTE)
1509 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1511 if(stream_index < 0){
1512 stream_index= av_find_default_stream_index(s);
1513 if(stream_index < 0)
1516 st= s->streams[stream_index];
1517 /* timestamp for default must be expressed in AV_TIME_BASE units */
1518 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1521 /* first, we try the format specific seek */
1522 if (s->iformat->read_seek)
1523 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1530 if(s->iformat->read_timestamp)
1531 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1533 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1536 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1538 if(min_ts > ts || max_ts < ts)
1541 av_read_frame_flush(s);
1543 if (s->iformat->read_seek2)
1544 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1546 if(s->iformat->read_timestamp){
1547 //try to seek via read_timestamp()
1550 //Fallback to old API if new is not implemented but old is
1551 //Note the old has somewat different sematics
1552 if(s->iformat->read_seek || 1)
1553 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1555 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1558 /*******************************************************/
1561 * Returns TRUE if the stream has accurate duration in any stream.
1563 * @return TRUE if the stream has accurate duration for at least one component.
1565 static int av_has_duration(AVFormatContext *ic)
1570 for(i = 0;i < ic->nb_streams; i++) {
1571 st = ic->streams[i];
1572 if (st->duration != AV_NOPTS_VALUE)
1579 * Estimate the stream timings from the one of each components.
1581 * Also computes the global bitrate if possible.
1583 static void av_update_stream_timings(AVFormatContext *ic)
1585 int64_t start_time, start_time1, end_time, end_time1;
1586 int64_t duration, duration1;
1590 start_time = INT64_MAX;
1591 end_time = INT64_MIN;
1592 duration = INT64_MIN;
1593 for(i = 0;i < ic->nb_streams; i++) {
1594 st = ic->streams[i];
1595 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1596 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1597 if (start_time1 < start_time)
1598 start_time = start_time1;
1599 if (st->duration != AV_NOPTS_VALUE) {
1600 end_time1 = start_time1
1601 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1602 if (end_time1 > end_time)
1603 end_time = end_time1;
1606 if (st->duration != AV_NOPTS_VALUE) {
1607 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1608 if (duration1 > duration)
1609 duration = duration1;
1612 if (start_time != INT64_MAX) {
1613 ic->start_time = start_time;
1614 if (end_time != INT64_MIN) {
1615 if (end_time - start_time > duration)
1616 duration = end_time - start_time;
1619 if (duration != INT64_MIN) {
1620 ic->duration = duration;
1621 if (ic->file_size > 0) {
1622 /* compute the bitrate */
1623 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1624 (double)ic->duration;
1629 static void fill_all_stream_timings(AVFormatContext *ic)
1634 av_update_stream_timings(ic);
1635 for(i = 0;i < ic->nb_streams; i++) {
1636 st = ic->streams[i];
1637 if (st->start_time == AV_NOPTS_VALUE) {
1638 if(ic->start_time != AV_NOPTS_VALUE)
1639 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1640 if(ic->duration != AV_NOPTS_VALUE)
1641 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1646 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1648 int64_t filesize, duration;
1652 /* if bit_rate is already set, we believe it */
1653 if (ic->bit_rate == 0) {
1655 for(i=0;i<ic->nb_streams;i++) {
1656 st = ic->streams[i];
1657 bit_rate += st->codec->bit_rate;
1659 ic->bit_rate = bit_rate;
1662 /* if duration is already set, we believe it */
1663 if (ic->duration == AV_NOPTS_VALUE &&
1664 ic->bit_rate != 0 &&
1665 ic->file_size != 0) {
1666 filesize = ic->file_size;
1668 for(i = 0; i < ic->nb_streams; i++) {
1669 st = ic->streams[i];
1670 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1671 if (st->duration == AV_NOPTS_VALUE)
1672 st->duration = duration;
1678 #define DURATION_MAX_READ_SIZE 250000
1680 /* only usable for MPEG-PS streams */
1681 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1683 AVPacket pkt1, *pkt = &pkt1;
1685 int read_size, i, ret;
1687 int64_t filesize, offset, duration;
1691 /* flush packet queue */
1692 flush_packet_queue(ic);
1694 for(i=0;i<ic->nb_streams;i++) {
1695 st = ic->streams[i];
1697 av_parser_close(st->parser);
1699 av_free_packet(&st->cur_pkt);
1703 /* we read the first packets to get the first PTS (not fully
1704 accurate, but it is enough now) */
1705 url_fseek(ic->pb, 0, SEEK_SET);
1708 if (read_size >= DURATION_MAX_READ_SIZE)
1710 /* if all info is available, we can stop */
1711 for(i = 0;i < ic->nb_streams; i++) {
1712 st = ic->streams[i];
1713 if (st->start_time == AV_NOPTS_VALUE)
1716 if (i == ic->nb_streams)
1720 ret = av_read_packet(ic, pkt);
1721 }while(ret == AVERROR(EAGAIN));
1724 read_size += pkt->size;
1725 st = ic->streams[pkt->stream_index];
1726 if (pkt->pts != AV_NOPTS_VALUE) {
1727 if (st->start_time == AV_NOPTS_VALUE)
1728 st->start_time = pkt->pts;
1730 av_free_packet(pkt);
1733 /* estimate the end time (duration) */
1734 /* XXX: may need to support wrapping */
1735 filesize = ic->file_size;
1736 offset = filesize - DURATION_MAX_READ_SIZE;
1740 url_fseek(ic->pb, offset, SEEK_SET);
1743 if (read_size >= DURATION_MAX_READ_SIZE)
1747 ret = av_read_packet(ic, pkt);
1748 }while(ret == AVERROR(EAGAIN));
1751 read_size += pkt->size;
1752 st = ic->streams[pkt->stream_index];
1753 if (pkt->pts != AV_NOPTS_VALUE &&
1754 st->start_time != AV_NOPTS_VALUE) {
1755 end_time = pkt->pts;
1756 duration = end_time - st->start_time;
1758 if (st->duration == AV_NOPTS_VALUE ||
1759 st->duration < duration)
1760 st->duration = duration;
1763 av_free_packet(pkt);
1766 fill_all_stream_timings(ic);
1768 url_fseek(ic->pb, old_offset, SEEK_SET);
1769 for(i=0; i<ic->nb_streams; i++){
1771 st->cur_dts= st->first_dts;
1772 st->last_IP_pts = AV_NOPTS_VALUE;
1776 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1780 /* get the file size, if possible */
1781 if (ic->iformat->flags & AVFMT_NOFILE) {
1784 file_size = url_fsize(ic->pb);
1788 ic->file_size = file_size;
1790 if ((!strcmp(ic->iformat->name, "mpeg") ||
1791 !strcmp(ic->iformat->name, "mpegts")) &&
1792 file_size && !url_is_streamed(ic->pb)) {
1793 /* get accurate estimate from the PTSes */
1794 av_estimate_timings_from_pts(ic, old_offset);
1795 } else if (av_has_duration(ic)) {
1796 /* at least one component has timings - we use them for all
1798 fill_all_stream_timings(ic);
1800 /* less precise: use bitrate info */
1801 av_estimate_timings_from_bit_rate(ic);
1803 av_update_stream_timings(ic);
1809 for(i = 0;i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 printf("%d: start_time: %0.3f duration: %0.3f\n",
1812 i, (double)st->start_time / AV_TIME_BASE,
1813 (double)st->duration / AV_TIME_BASE);
1815 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1816 (double)ic->start_time / AV_TIME_BASE,
1817 (double)ic->duration / AV_TIME_BASE,
1818 ic->bit_rate / 1000);
1823 static int has_codec_parameters(AVCodecContext *enc)
1826 switch(enc->codec_type) {
1827 case CODEC_TYPE_AUDIO:
1828 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1829 if(!enc->frame_size &&
1830 (enc->codec_id == CODEC_ID_VORBIS ||
1831 enc->codec_id == CODEC_ID_AAC))
1834 case CODEC_TYPE_VIDEO:
1835 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1841 return enc->codec_id != CODEC_ID_NONE && val != 0;
1844 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1848 int got_picture, data_size, ret=0;
1851 if(!st->codec->codec){
1852 codec = avcodec_find_decoder(st->codec->codec_id);
1855 ret = avcodec_open(st->codec, codec);
1860 if(!has_codec_parameters(st->codec)){
1861 switch(st->codec->codec_type) {
1862 case CODEC_TYPE_VIDEO:
1863 ret = avcodec_decode_video(st->codec, &picture,
1864 &got_picture, data, size);
1866 case CODEC_TYPE_AUDIO:
1867 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1868 samples = av_malloc(data_size);
1871 ret = avcodec_decode_audio2(st->codec, samples,
1872 &data_size, data, size);
1883 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1885 while (tags->id != CODEC_ID_NONE) {
1893 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1896 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1897 if(tag == tags[i].tag)
1900 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1901 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1902 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1903 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1904 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1907 return CODEC_ID_NONE;
1910 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1913 for(i=0; tags && tags[i]; i++){
1914 int tag= codec_get_tag(tags[i], id);
1920 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1923 for(i=0; tags && tags[i]; i++){
1924 enum CodecID id= codec_get_id(tags[i], tag);
1925 if(id!=CODEC_ID_NONE) return id;
1927 return CODEC_ID_NONE;
1930 static void compute_chapters_end(AVFormatContext *s)
1934 for (i=0; i+1<s->nb_chapters; i++)
1935 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1936 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1937 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1938 s->chapters[i]->end = s->chapters[i+1]->start;
1941 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1942 assert(s->start_time != AV_NOPTS_VALUE);
1943 assert(s->duration > 0);
1944 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1946 s->chapters[i]->time_base);
1950 /* absolute maximum size we read until we abort */
1951 #define MAX_READ_SIZE 5000000
1953 #define MAX_STD_TIMEBASES (60*12+5)
1954 static int get_std_framerate(int i){
1955 if(i<60*12) return i*1001;
1956 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1960 * Is the time base unreliable.
1961 * This is a heuristic to balance between quick acceptance of the values in
1962 * the headers vs. some extra checks.
1963 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1964 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1965 * And there are "variable" fps files this needs to detect as well.
1967 static int tb_unreliable(AVCodecContext *c){
1968 if( c->time_base.den >= 101L*c->time_base.num
1969 || c->time_base.den < 5L*c->time_base.num
1970 /* || c->codec_tag == AV_RL32("DIVX")
1971 || c->codec_tag == AV_RL32("XVID")*/
1972 || c->codec_id == CODEC_ID_MPEG2VIDEO
1973 || c->codec_id == CODEC_ID_H264
1979 int av_find_stream_info(AVFormatContext *ic)
1981 int i, count, ret, read_size, j;
1983 AVPacket pkt1, *pkt;
1984 int64_t last_dts[MAX_STREAMS];
1985 int64_t duration_gcd[MAX_STREAMS]={0};
1986 int duration_count[MAX_STREAMS]={0};
1987 double (*duration_error)[MAX_STD_TIMEBASES];
1988 int64_t old_offset = url_ftell(ic->pb);
1989 int64_t codec_info_duration[MAX_STREAMS]={0};
1990 int codec_info_nb_frames[MAX_STREAMS]={0};
1992 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1993 if (!duration_error) return AVERROR(ENOMEM);
1995 for(i=0;i<ic->nb_streams;i++) {
1996 st = ic->streams[i];
1997 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1998 /* if(!st->time_base.num)
2000 if(!st->codec->time_base.num)
2001 st->codec->time_base= st->time_base;
2003 //only for the split stuff
2005 st->parser = av_parser_init(st->codec->codec_id);
2006 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2007 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2012 for(i=0;i<MAX_STREAMS;i++){
2013 last_dts[i]= AV_NOPTS_VALUE;
2019 if(url_interrupt_cb()){
2020 ret= AVERROR(EINTR);
2024 /* check if one codec still needs to be handled */
2025 for(i=0;i<ic->nb_streams;i++) {
2026 st = ic->streams[i];
2027 if (!has_codec_parameters(st->codec))
2029 /* variable fps and no guess at the real fps */
2030 if( tb_unreliable(st->codec)
2031 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2033 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2035 if(st->first_dts == AV_NOPTS_VALUE)
2038 if (i == ic->nb_streams) {
2039 /* NOTE: if the format has no header, then we need to read
2040 some packets to get most of the streams, so we cannot
2042 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2043 /* if we found the info for all the codecs, we can stop */
2048 /* we did not get all the codec info, but we read too much data */
2049 if (read_size >= MAX_READ_SIZE) {
2054 /* NOTE: a new stream can be added there if no header in file
2055 (AVFMTCTX_NOHEADER) */
2056 ret = av_read_frame_internal(ic, &pkt1);
2057 if(ret == AVERROR(EAGAIN))
2061 ret = -1; /* we could not have all the codec parameters before EOF */
2062 for(i=0;i<ic->nb_streams;i++) {
2063 st = ic->streams[i];
2064 if (!has_codec_parameters(st->codec)){
2066 avcodec_string(buf, sizeof(buf), st->codec, 0);
2067 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2075 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2076 if(av_dup_packet(pkt) < 0) {
2077 av_free(duration_error);
2078 return AVERROR(ENOMEM);
2081 read_size += pkt->size;
2083 st = ic->streams[pkt->stream_index];
2084 if(codec_info_nb_frames[st->index]>1) {
2085 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration)
2087 codec_info_duration[st->index] += pkt->duration;
2089 if (pkt->duration != 0)
2090 codec_info_nb_frames[st->index]++;
2093 int index= pkt->stream_index;
2094 int64_t last= last_dts[index];
2095 int64_t duration= pkt->dts - last;
2097 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2098 double dur= duration * av_q2d(st->time_base);
2100 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2101 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2102 if(duration_count[index] < 2)
2103 memset(duration_error[index], 0, sizeof(*duration_error));
2104 for(i=1; i<MAX_STD_TIMEBASES; i++){
2105 int framerate= get_std_framerate(i);
2106 int ticks= lrintf(dur*framerate/(1001*12));
2107 double error= dur - ticks*1001*12/(double)framerate;
2108 duration_error[index][i] += error*error;
2110 duration_count[index]++;
2111 // ignore the first 4 values, they might have some random jitter
2112 if (duration_count[index] > 3)
2113 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2115 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2116 last_dts[pkt->stream_index]= pkt->dts;
2118 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2119 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2121 st->codec->extradata_size= i;
2122 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2123 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2124 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2128 /* if still no information, we try to open the codec and to
2129 decompress the frame. We try to avoid that in most cases as
2130 it takes longer and uses more memory. For MPEG-4, we need to
2131 decompress for QuickTime. */
2132 if (!has_codec_parameters(st->codec) /*&&
2133 (st->codec->codec_id == CODEC_ID_FLV1 ||
2134 st->codec->codec_id == CODEC_ID_H264 ||
2135 st->codec->codec_id == CODEC_ID_H263 ||
2136 st->codec->codec_id == CODEC_ID_H261 ||
2137 st->codec->codec_id == CODEC_ID_VORBIS ||
2138 st->codec->codec_id == CODEC_ID_MJPEG ||
2139 st->codec->codec_id == CODEC_ID_PNG ||
2140 st->codec->codec_id == CODEC_ID_PAM ||
2141 st->codec->codec_id == CODEC_ID_PGM ||
2142 st->codec->codec_id == CODEC_ID_PGMYUV ||
2143 st->codec->codec_id == CODEC_ID_PBM ||
2144 st->codec->codec_id == CODEC_ID_PPM ||
2145 st->codec->codec_id == CODEC_ID_SHORTEN ||
2146 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2147 try_decode_frame(st, pkt->data, pkt->size);
2152 // close codecs which were opened in try_decode_frame()
2153 for(i=0;i<ic->nb_streams;i++) {
2154 st = ic->streams[i];
2155 if(st->codec->codec)
2156 avcodec_close(st->codec);
2158 for(i=0;i<ic->nb_streams;i++) {
2159 st = ic->streams[i];
2160 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2161 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2162 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2164 // the check for tb_unreliable() is not completely correct, since this is not about handling
2165 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2166 // ipmovie.c produces.
2167 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2168 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2169 if(duration_count[i]
2170 && tb_unreliable(st->codec) /*&&
2171 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2172 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2174 double best_error= 2*av_q2d(st->time_base);
2175 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2177 for(j=1; j<MAX_STD_TIMEBASES; j++){
2178 double error= duration_error[i][j] * get_std_framerate(j);
2179 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2180 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2181 if(error < best_error){
2183 num = get_std_framerate(j);
2186 // do not increase frame rate by more than 1 % in order to match a standard rate.
2187 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2188 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2191 if (!st->r_frame_rate.num){
2192 if( st->codec->time_base.den * (int64_t)st->time_base.num
2193 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2194 st->r_frame_rate.num = st->codec->time_base.den;
2195 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2197 st->r_frame_rate.num = st->time_base.den;
2198 st->r_frame_rate.den = st->time_base.num;
2201 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2202 if(!st->codec->bits_per_coded_sample)
2203 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2207 av_estimate_timings(ic, old_offset);
2209 compute_chapters_end(ic);
2212 /* correct DTS for B-frame streams with no timestamps */
2213 for(i=0;i<ic->nb_streams;i++) {
2214 st = ic->streams[i];
2215 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2217 ppktl = &ic->packet_buffer;
2219 if(ppkt1->stream_index != i)
2221 if(ppkt1->pkt->dts < 0)
2223 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2225 ppkt1->pkt->dts -= delta;
2230 st->cur_dts -= delta;
2236 av_free(duration_error);
2241 /*******************************************************/
2243 int av_read_play(AVFormatContext *s)
2245 if (s->iformat->read_play)
2246 return s->iformat->read_play(s);
2248 return av_url_read_fpause(s->pb, 0);
2249 return AVERROR(ENOSYS);
2252 int av_read_pause(AVFormatContext *s)
2254 if (s->iformat->read_pause)
2255 return s->iformat->read_pause(s);
2257 return av_url_read_fpause(s->pb, 1);
2258 return AVERROR(ENOSYS);
2261 void av_close_input_stream(AVFormatContext *s)
2266 if (s->iformat->read_close)
2267 s->iformat->read_close(s);
2268 for(i=0;i<s->nb_streams;i++) {
2269 /* free all data in a stream component */
2272 av_parser_close(st->parser);
2273 av_free_packet(&st->cur_pkt);
2275 av_metadata_free(&st->metadata);
2276 av_free(st->index_entries);
2277 av_free(st->codec->extradata);
2279 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2280 av_free(st->filename);
2282 av_free(st->priv_data);
2285 for(i=s->nb_programs-1; i>=0; i--) {
2286 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2287 av_freep(&s->programs[i]->provider_name);
2288 av_freep(&s->programs[i]->name);
2290 av_metadata_free(&s->programs[i]->metadata);
2291 av_freep(&s->programs[i]->stream_index);
2292 av_freep(&s->programs[i]);
2294 av_freep(&s->programs);
2295 flush_packet_queue(s);
2296 av_freep(&s->priv_data);
2297 while(s->nb_chapters--) {
2298 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2299 av_free(s->chapters[s->nb_chapters]->title);
2301 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2302 av_free(s->chapters[s->nb_chapters]);
2304 av_freep(&s->chapters);
2305 av_metadata_free(&s->metadata);
2309 void av_close_input_file(AVFormatContext *s)
2311 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2312 av_close_input_stream(s);
2317 AVStream *av_new_stream(AVFormatContext *s, int id)
2322 if (s->nb_streams >= MAX_STREAMS)
2325 st = av_mallocz(sizeof(AVStream));
2329 st->codec= avcodec_alloc_context();
2331 /* no default bitrate if decoding */
2332 st->codec->bit_rate = 0;
2334 st->index = s->nb_streams;
2336 st->start_time = AV_NOPTS_VALUE;
2337 st->duration = AV_NOPTS_VALUE;
2338 /* we set the current DTS to 0 so that formats without any timestamps
2339 but durations get some timestamps, formats with some unknown
2340 timestamps have their first few packets buffered and the
2341 timestamps corrected before they are returned to the user */
2343 st->first_dts = AV_NOPTS_VALUE;
2345 /* default pts setting is MPEG-like */
2346 av_set_pts_info(st, 33, 1, 90000);
2347 st->last_IP_pts = AV_NOPTS_VALUE;
2348 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2349 st->pts_buffer[i]= AV_NOPTS_VALUE;
2350 st->reference_dts = AV_NOPTS_VALUE;
2352 st->sample_aspect_ratio = (AVRational){0,1};
2354 s->streams[s->nb_streams++] = st;
2358 AVProgram *av_new_program(AVFormatContext *ac, int id)
2360 AVProgram *program=NULL;
2364 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2367 for(i=0; i<ac->nb_programs; i++)
2368 if(ac->programs[i]->id == id)
2369 program = ac->programs[i];
2372 program = av_mallocz(sizeof(AVProgram));
2375 dynarray_add(&ac->programs, &ac->nb_programs, program);
2376 program->discard = AVDISCARD_NONE;
2383 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2385 AVChapter *chapter = NULL;
2388 for(i=0; i<s->nb_chapters; i++)
2389 if(s->chapters[i]->id == id)
2390 chapter = s->chapters[i];
2393 chapter= av_mallocz(sizeof(AVChapter));
2396 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2398 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2399 av_free(chapter->title);
2401 av_metadata_set(&chapter->metadata, "title", title);
2403 chapter->time_base= time_base;
2404 chapter->start = start;
2410 /************************************************************/
2411 /* output media file */
2413 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2417 if (s->oformat->priv_data_size > 0) {
2418 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2420 return AVERROR(ENOMEM);
2422 s->priv_data = NULL;
2424 if (s->oformat->set_parameters) {
2425 ret = s->oformat->set_parameters(s, ap);
2432 int av_write_header(AVFormatContext *s)
2437 // some sanity checks
2438 for(i=0;i<s->nb_streams;i++) {
2441 switch (st->codec->codec_type) {
2442 case CODEC_TYPE_AUDIO:
2443 if(st->codec->sample_rate<=0){
2444 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2447 if(!st->codec->block_align)
2448 st->codec->block_align = st->codec->channels *
2449 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2451 case CODEC_TYPE_VIDEO:
2452 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2453 av_log(s, AV_LOG_ERROR, "time base not set\n");
2456 if(st->codec->width<=0 || st->codec->height<=0){
2457 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2460 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2461 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2467 if(s->oformat->codec_tag){
2468 if(st->codec->codec_tag){
2470 //check that tag + id is in the table
2471 //if neither is in the table -> OK
2472 //if tag is in the table with another id -> FAIL
2473 //if id is in the table with another tag -> FAIL unless strict < ?
2475 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2478 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2479 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2480 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2483 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2484 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2486 return AVERROR(ENOMEM);
2489 #if LIBAVFORMAT_VERSION_MAJOR < 53
2490 ff_metadata_mux_compat(s);
2493 if(s->oformat->write_header){
2494 ret = s->oformat->write_header(s);
2499 /* init PTS generation */
2500 for(i=0;i<s->nb_streams;i++) {
2501 int64_t den = AV_NOPTS_VALUE;
2504 switch (st->codec->codec_type) {
2505 case CODEC_TYPE_AUDIO:
2506 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2508 case CODEC_TYPE_VIDEO:
2509 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2514 if (den != AV_NOPTS_VALUE) {
2516 return AVERROR_INVALIDDATA;
2517 av_frac_init(&st->pts, 0, 0, den);
2523 //FIXME merge with compute_pkt_fields
2524 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2525 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2526 int num, den, frame_size, i;
2528 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2530 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2533 /* duration field */
2534 if (pkt->duration == 0) {
2535 compute_frame_duration(&num, &den, st, NULL, pkt);
2537 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2541 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2544 //XXX/FIXME this is a temporary hack until all encoders output pts
2545 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2547 // pkt->pts= st->cur_dts;
2548 pkt->pts= st->pts.val;
2551 //calculate dts from pts
2552 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2553 st->pts_buffer[0]= pkt->pts;
2554 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2555 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2556 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2557 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2559 pkt->dts= st->pts_buffer[0];
2562 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2563 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2566 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2567 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2571 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2572 st->cur_dts= pkt->dts;
2573 st->pts.val= pkt->dts;
2576 switch (st->codec->codec_type) {
2577 case CODEC_TYPE_AUDIO:
2578 frame_size = get_audio_frame_size(st->codec, pkt->size);
2580 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2581 likely equal to the encoder delay, but it would be better if we
2582 had the real timestamps from the encoder */
2583 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2584 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2587 case CODEC_TYPE_VIDEO:
2588 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2596 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2598 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2600 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2603 ret= s->oformat->write_packet(s, pkt);
2605 ret= url_ferror(s->pb);
2609 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2610 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2612 AVPacketList **next_point, *this_pktl;
2614 this_pktl = av_mallocz(sizeof(AVPacketList));
2615 this_pktl->pkt= *pkt;
2616 if(pkt->destruct == av_destruct_packet)
2617 pkt->destruct= NULL; // not shared -> must keep original from being freed
2619 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2621 next_point = &s->packet_buffer;
2623 if(compare(s, &(*next_point)->pkt, pkt))
2625 next_point= &(*next_point)->next;
2627 this_pktl->next= *next_point;
2628 *next_point= this_pktl;
2631 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2633 AVStream *st = s->streams[ pkt ->stream_index];
2634 AVStream *st2= s->streams[ next->stream_index];
2635 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2636 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2638 if (pkt->dts == AV_NOPTS_VALUE)
2641 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2644 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2647 int streams[MAX_STREAMS];
2650 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2653 memset(streams, 0, sizeof(streams));
2654 pktl= s->packet_buffer;
2656 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2657 if(streams[ pktl->pkt.stream_index ] == 0)
2659 streams[ pktl->pkt.stream_index ]++;
2663 if(stream_count && (s->nb_streams == stream_count || flush)){
2664 pktl= s->packet_buffer;
2667 s->packet_buffer= pktl->next;
2671 av_init_packet(out);
2677 * Interleaves an AVPacket correctly so it can be muxed.
2678 * @param out the interleaved packet will be output here
2679 * @param in the input packet
2680 * @param flush 1 if no further packets are available as input and all
2681 * remaining packets should be output
2682 * @return 1 if a packet was output, 0 if no packet could be output,
2683 * < 0 if an error occurred
2685 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2686 if(s->oformat->interleave_packet)
2687 return s->oformat->interleave_packet(s, out, in, flush);
2689 return av_interleave_packet_per_dts(s, out, in, flush);
2692 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2693 AVStream *st= s->streams[ pkt->stream_index];
2695 //FIXME/XXX/HACK drop zero sized packets
2696 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2699 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2700 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2703 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2708 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2709 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2712 ret= s->oformat->write_packet(s, &opkt);
2714 av_free_packet(&opkt);
2719 if(url_ferror(s->pb))
2720 return url_ferror(s->pb);
2724 int av_write_trailer(AVFormatContext *s)
2730 ret= av_interleave_packet(s, &pkt, NULL, 1);
2731 if(ret<0) //FIXME cleanup needed for ret<0 ?
2736 ret= s->oformat->write_packet(s, &pkt);
2738 av_free_packet(&pkt);
2742 if(url_ferror(s->pb))
2746 if(s->oformat->write_trailer)
2747 ret = s->oformat->write_trailer(s);
2750 ret=url_ferror(s->pb);
2751 for(i=0;i<s->nb_streams;i++)
2752 av_freep(&s->streams[i]->priv_data);
2753 av_freep(&s->priv_data);
2757 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2760 AVProgram *program=NULL;
2763 for(i=0; i<ac->nb_programs; i++){
2764 if(ac->programs[i]->id != progid)
2766 program = ac->programs[i];
2767 for(j=0; j<program->nb_stream_indexes; j++)
2768 if(program->stream_index[j] == idx)
2771 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2774 program->stream_index = tmp;
2775 program->stream_index[program->nb_stream_indexes++] = idx;
2780 static void print_fps(double d, const char *postfix){
2781 uint64_t v= lrintf(d*100);
2782 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2783 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2784 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2787 /* "user interface" functions */
2788 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2791 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2792 AVStream *st = ic->streams[i];
2793 int g = av_gcd(st->time_base.num, st->time_base.den);
2794 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2795 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2796 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2797 /* the pid is an important information, so we display it */
2798 /* XXX: add a generic system */
2799 if (flags & AVFMT_SHOW_IDS)
2800 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2802 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2803 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2804 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2805 if (st->sample_aspect_ratio.num && // default
2806 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2807 AVRational display_aspect_ratio;
2808 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2809 st->codec->width*st->sample_aspect_ratio.num,
2810 st->codec->height*st->sample_aspect_ratio.den,
2812 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2813 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2814 display_aspect_ratio.num, display_aspect_ratio.den);
2816 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2817 if(st->r_frame_rate.den && st->r_frame_rate.num)
2818 print_fps(av_q2d(st->r_frame_rate), "tbr");
2819 if(st->time_base.den && st->time_base.num)
2820 print_fps(1/av_q2d(st->time_base), "tbn");
2821 if(st->codec->time_base.den && st->codec->time_base.num)
2822 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2824 av_log(NULL, AV_LOG_INFO, "\n");
2827 void dump_format(AVFormatContext *ic,
2834 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2835 is_output ? "Output" : "Input",
2837 is_output ? ic->oformat->name : ic->iformat->name,
2838 is_output ? "to" : "from", url);
2840 av_log(NULL, AV_LOG_INFO, " Duration: ");
2841 if (ic->duration != AV_NOPTS_VALUE) {
2842 int hours, mins, secs, us;
2843 secs = ic->duration / AV_TIME_BASE;
2844 us = ic->duration % AV_TIME_BASE;
2849 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2850 (100 * us) / AV_TIME_BASE);
2852 av_log(NULL, AV_LOG_INFO, "N/A");
2854 if (ic->start_time != AV_NOPTS_VALUE) {
2856 av_log(NULL, AV_LOG_INFO, ", start: ");
2857 secs = ic->start_time / AV_TIME_BASE;
2858 us = ic->start_time % AV_TIME_BASE;
2859 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2860 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2862 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2864 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2866 av_log(NULL, AV_LOG_INFO, "N/A");
2868 av_log(NULL, AV_LOG_INFO, "\n");
2870 if(ic->nb_programs) {
2872 for(j=0; j<ic->nb_programs; j++) {
2873 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2875 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2876 name ? name->value : "");
2877 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2878 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2881 for(i=0;i<ic->nb_streams;i++)
2882 dump_stream_format(ic, i, index, is_output);
2885 #if LIBAVFORMAT_VERSION_MAJOR < 53
2886 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2888 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2891 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2893 AVRational frame_rate;
2894 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2895 *frame_rate_num= frame_rate.num;
2896 *frame_rate_den= frame_rate.den;
2901 int64_t av_gettime(void)
2904 gettimeofday(&tv,NULL);
2905 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2908 int64_t parse_date(const char *datestr, int duration)
2914 static const char * const date_fmt[] = {
2918 static const char * const time_fmt[] = {
2928 time_t now = time(0);
2930 len = strlen(datestr);
2932 lastch = datestr[len - 1];
2935 is_utc = (lastch == 'z' || lastch == 'Z');
2937 memset(&dt, 0, sizeof(dt));
2942 if (!strncasecmp(datestr, "now", len))
2943 return (int64_t) now * 1000000;
2945 /* parse the year-month-day part */
2946 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2947 q = small_strptime(p, date_fmt[i], &dt);
2953 /* if the year-month-day part is missing, then take the
2954 * current year-month-day time */
2959 dt = *localtime(&now);
2961 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2966 if (*p == 'T' || *p == 't' || *p == ' ')
2969 /* parse the hour-minute-second part */
2970 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2971 q = small_strptime(p, time_fmt[i], &dt);
2977 /* parse datestr as a duration */
2982 /* parse datestr as HH:MM:SS */
2983 q = small_strptime(p, time_fmt[0], &dt);
2985 /* parse datestr as S+ */
2986 dt.tm_sec = strtol(p, (char **)&q, 10);
2988 /* the parsing didn't succeed */
2995 /* Now we have all the fields that we can get */
3001 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3003 dt.tm_isdst = -1; /* unknown */
3013 /* parse the .m... part */
3017 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3020 val += n * (*q - '0');
3024 return negative ? -t : t;
3027 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3037 while (*p != '\0' && *p != '=' && *p != '&') {
3038 if ((q - tag) < sizeof(tag) - 1)
3046 while (*p != '&' && *p != '\0') {
3047 if ((q - arg) < arg_size - 1) {
3057 if (!strcmp(tag, tag1))
3066 int av_get_frame_filename(char *buf, int buf_size,
3067 const char *path, int number)
3070 char *q, buf1[20], c;
3071 int nd, len, percentd_found;
3083 while (isdigit(*p)) {
3084 nd = nd * 10 + *p++ - '0';
3087 } while (isdigit(c));
3096 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3098 if ((q - buf + len) > buf_size - 1)
3100 memcpy(q, buf1, len);
3108 if ((q - buf) < buf_size - 1)
3112 if (!percentd_found)
3121 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3124 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3126 for(i=0;i<size;i+=16) {
3133 PRINT(" %02x", buf[i+j]);
3138 for(j=0;j<len;j++) {
3140 if (c < ' ' || c > '~')
3149 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3151 hex_dump_internal(NULL, f, 0, buf, size);
3154 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3156 hex_dump_internal(avcl, NULL, level, buf, size);
3159 //FIXME needs to know the time_base
3160 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3162 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3163 PRINT("stream #%d:\n", pkt->stream_index);
3164 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3165 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3166 /* DTS is _always_ valid after av_read_frame() */
3168 if (pkt->dts == AV_NOPTS_VALUE)
3171 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3172 /* PTS may not be known if B-frames are present. */
3174 if (pkt->pts == AV_NOPTS_VALUE)
3177 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3179 PRINT(" size=%d\n", pkt->size);
3182 av_hex_dump(f, pkt->data, pkt->size);
3185 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3187 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3190 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3192 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3195 void url_split(char *proto, int proto_size,
3196 char *authorization, int authorization_size,
3197 char *hostname, int hostname_size,
3199 char *path, int path_size,
3202 const char *p, *ls, *at, *col, *brk;
3204 if (port_ptr) *port_ptr = -1;
3205 if (proto_size > 0) proto[0] = 0;
3206 if (authorization_size > 0) authorization[0] = 0;
3207 if (hostname_size > 0) hostname[0] = 0;
3208 if (path_size > 0) path[0] = 0;
3210 /* parse protocol */
3211 if ((p = strchr(url, ':'))) {
3212 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3217 /* no protocol means plain filename */
3218 av_strlcpy(path, url, path_size);
3222 /* separate path from hostname */
3223 ls = strchr(p, '/');
3225 ls = strchr(p, '?');
3227 av_strlcpy(path, ls, path_size);
3229 ls = &p[strlen(p)]; // XXX
3231 /* the rest is hostname, use that to parse auth/port */
3233 /* authorization (user[:pass]@hostname) */
3234 if ((at = strchr(p, '@')) && at < ls) {
3235 av_strlcpy(authorization, p,
3236 FFMIN(authorization_size, at + 1 - p));
3237 p = at + 1; /* skip '@' */
3240 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3242 av_strlcpy(hostname, p + 1,
3243 FFMIN(hostname_size, brk - p));
3244 if (brk[1] == ':' && port_ptr)
3245 *port_ptr = atoi(brk + 2);
3246 } else if ((col = strchr(p, ':')) && col < ls) {
3247 av_strlcpy(hostname, p,
3248 FFMIN(col + 1 - p, hostname_size));
3249 if (port_ptr) *port_ptr = atoi(col + 1);
3251 av_strlcpy(hostname, p,
3252 FFMIN(ls + 1 - p, hostname_size));
3256 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3259 static const char hex_table[16] = { '0', '1', '2', '3',
3262 'C', 'D', 'E', 'F' };
3264 for(i = 0; i < s; i++) {
3265 buff[i * 2] = hex_table[src[i] >> 4];
3266 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3272 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3273 int pts_num, int pts_den)
3275 unsigned int gcd= av_gcd(pts_num, pts_den);
3276 s->pts_wrap_bits = pts_wrap_bits;
3277 s->time_base.num = pts_num/gcd;
3278 s->time_base.den = pts_den/gcd;
3281 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);