2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = avformat_alloc_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
683 *pnum = (*pnum) * (1 + pc->repeat_pict);
687 case CODEC_TYPE_AUDIO:
688 frame_size = get_audio_frame_size(st->codec, pkt->size);
692 *pden = st->codec->sample_rate;
699 static int is_intra_only(AVCodecContext *enc){
700 if(enc->codec_type == CODEC_TYPE_AUDIO){
702 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
703 switch(enc->codec_id){
705 case CODEC_ID_MJPEGB:
707 case CODEC_ID_RAWVIDEO:
708 case CODEC_ID_DVVIDEO:
709 case CODEC_ID_HUFFYUV:
710 case CODEC_ID_FFVHUFF:
715 case CODEC_ID_JPEG2000:
723 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
724 int64_t dts, int64_t pts)
726 AVStream *st= s->streams[stream_index];
727 AVPacketList *pktl= s->packet_buffer;
729 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
732 st->first_dts= dts - st->cur_dts;
735 for(; pktl; pktl= pktl->next){
736 if(pktl->pkt.stream_index != stream_index)
738 //FIXME think more about this check
739 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
740 pktl->pkt.pts += st->first_dts;
742 if(pktl->pkt.dts != AV_NOPTS_VALUE)
743 pktl->pkt.dts += st->first_dts;
745 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
746 st->start_time= pktl->pkt.pts;
748 if (st->start_time == AV_NOPTS_VALUE)
749 st->start_time = pts;
752 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
754 AVPacketList *pktl= s->packet_buffer;
757 if(st->first_dts != AV_NOPTS_VALUE){
758 cur_dts= st->first_dts;
759 for(; pktl; pktl= pktl->next){
760 if(pktl->pkt.stream_index == pkt->stream_index){
761 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
763 cur_dts -= pkt->duration;
766 pktl= s->packet_buffer;
767 st->first_dts = cur_dts;
768 }else if(st->cur_dts)
771 for(; pktl; pktl= pktl->next){
772 if(pktl->pkt.stream_index != pkt->stream_index)
774 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
775 && !pktl->pkt.duration){
776 pktl->pkt.dts= cur_dts;
777 if(!st->codec->has_b_frames)
778 pktl->pkt.pts= cur_dts;
779 cur_dts += pkt->duration;
780 pktl->pkt.duration= pkt->duration;
784 if(st->first_dts == AV_NOPTS_VALUE)
785 st->cur_dts= cur_dts;
788 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
789 AVCodecParserContext *pc, AVPacket *pkt)
791 int num, den, presentation_delayed, delay, i;
794 /* do we have a video B-frame ? */
795 delay= st->codec->has_b_frames;
796 presentation_delayed = 0;
797 /* XXX: need has_b_frame, but cannot get it if the codec is
800 pc && pc->pict_type != FF_B_TYPE)
801 presentation_delayed = 1;
803 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
804 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
805 pkt->dts -= 1LL<<st->pts_wrap_bits;
808 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
809 // we take the conservative approach and discard both
810 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
811 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
812 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
813 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
816 if (pkt->duration == 0) {
817 compute_frame_duration(&num, &den, st, pc, pkt);
819 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
821 if(pkt->duration != 0 && s->packet_buffer)
822 update_initial_durations(s, st, pkt);
826 /* correct timestamps with byte offset if demuxers only have timestamps
827 on packet boundaries */
828 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
829 /* this will estimate bitrate based on this frame's duration and size */
830 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
831 if(pkt->pts != AV_NOPTS_VALUE)
833 if(pkt->dts != AV_NOPTS_VALUE)
837 if (pc && pc->dts_sync_point >= 0) {
838 // we have synchronization info from the parser
839 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
841 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
842 if (pkt->dts != AV_NOPTS_VALUE) {
843 // got DTS from the stream, update reference timestamp
844 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
845 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
846 } else if (st->reference_dts != AV_NOPTS_VALUE) {
847 // compute DTS based on reference timestamp
848 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
849 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
851 if (pc->dts_sync_point > 0)
852 st->reference_dts = pkt->dts; // new reference
856 /* This may be redundant, but it should not hurt. */
857 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
858 presentation_delayed = 1;
860 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
861 /* interpolate PTS and DTS if they are not present */
862 //We skip H264 currently because delay and has_b_frames are not reliably set
863 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
864 if (presentation_delayed) {
865 /* DTS = decompression timestamp */
866 /* PTS = presentation timestamp */
867 if (pkt->dts == AV_NOPTS_VALUE)
868 pkt->dts = st->last_IP_pts;
869 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
870 if (pkt->dts == AV_NOPTS_VALUE)
871 pkt->dts = st->cur_dts;
873 /* this is tricky: the dts must be incremented by the duration
874 of the frame we are displaying, i.e. the last I- or P-frame */
875 if (st->last_IP_duration == 0)
876 st->last_IP_duration = pkt->duration;
877 if(pkt->dts != AV_NOPTS_VALUE)
878 st->cur_dts = pkt->dts + st->last_IP_duration;
879 st->last_IP_duration = pkt->duration;
880 st->last_IP_pts= pkt->pts;
881 /* cannot compute PTS if not present (we can compute it only
882 by knowing the future */
883 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
884 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
885 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
886 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
887 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
888 pkt->pts += pkt->duration;
889 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
893 /* presentation is not delayed : PTS and DTS are the same */
894 if(pkt->pts == AV_NOPTS_VALUE)
896 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
897 if(pkt->pts == AV_NOPTS_VALUE)
898 pkt->pts = st->cur_dts;
900 if(pkt->pts != AV_NOPTS_VALUE)
901 st->cur_dts = pkt->pts + pkt->duration;
905 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
906 st->pts_buffer[0]= pkt->pts;
907 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
908 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
909 if(pkt->dts == AV_NOPTS_VALUE)
910 pkt->dts= st->pts_buffer[0];
911 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
912 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
914 if(pkt->dts > st->cur_dts)
915 st->cur_dts = pkt->dts;
918 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
921 if(is_intra_only(st->codec))
922 pkt->flags |= PKT_FLAG_KEY;
925 /* keyframe computation */
926 if (pc->key_frame == 1)
927 pkt->flags |= PKT_FLAG_KEY;
928 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
929 pkt->flags |= PKT_FLAG_KEY;
932 pkt->convergence_duration = pc->convergence_duration;
935 void av_destruct_packet_nofree(AVPacket *pkt)
937 pkt->data = NULL; pkt->size = 0;
940 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
948 /* select current input stream component */
951 if (!st->need_parsing || !st->parser) {
952 /* no parsing needed: we just output the packet as is */
953 /* raw data support */
954 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
955 compute_pkt_fields(s, st, NULL, pkt);
958 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
959 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
960 st->cur_ptr, st->cur_len,
961 st->cur_pkt.pts, st->cur_pkt.dts,
963 st->cur_pkt.pts = AV_NOPTS_VALUE;
964 st->cur_pkt.dts = AV_NOPTS_VALUE;
965 /* increment read pointer */
969 /* return packet if any */
973 pkt->stream_index = st->index;
974 pkt->pts = st->parser->pts;
975 pkt->dts = st->parser->dts;
976 pkt->pos = st->parser->pos;
977 pkt->destruct = av_destruct_packet_nofree;
978 compute_pkt_fields(s, st, st->parser, pkt);
980 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
981 ff_reduce_index(s, st->index);
982 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
983 0, 0, AVINDEX_KEYFRAME);
990 av_free_packet(&st->cur_pkt);
995 /* read next packet */
996 ret = av_read_packet(s, &cur_pkt);
998 if (ret == AVERROR(EAGAIN))
1000 /* return the last frames, if any */
1001 for(i = 0; i < s->nb_streams; i++) {
1003 if (st->parser && st->need_parsing) {
1004 av_parser_parse2(st->parser, st->codec,
1005 &pkt->data, &pkt->size,
1007 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1013 /* no more packets: really terminate parsing */
1016 st = s->streams[cur_pkt.stream_index];
1017 st->cur_pkt= cur_pkt;
1019 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1020 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1021 st->cur_pkt.pts < st->cur_pkt.dts){
1022 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1023 st->cur_pkt.stream_index,
1027 // av_free_packet(&st->cur_pkt);
1031 if(s->debug & FF_FDEBUG_TS)
1032 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1033 st->cur_pkt.stream_index,
1040 st->cur_ptr = st->cur_pkt.data;
1041 st->cur_len = st->cur_pkt.size;
1042 if (st->need_parsing && !st->parser) {
1043 st->parser = av_parser_init(st->codec->codec_id);
1045 /* no parser available: just output the raw packets */
1046 st->need_parsing = AVSTREAM_PARSE_NONE;
1047 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1048 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1050 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1051 st->parser->next_frame_offset=
1052 st->parser->cur_offset= st->cur_pkt.pos;
1057 if(s->debug & FF_FDEBUG_TS)
1058 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1068 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1072 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1075 pktl = s->packet_buffer;
1077 AVPacket *next_pkt= &pktl->pkt;
1079 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1080 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1081 if( pktl->pkt.stream_index == next_pkt->stream_index
1082 && next_pkt->dts < pktl->pkt.dts
1083 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1084 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1085 next_pkt->pts= pktl->pkt.dts;
1089 pktl = s->packet_buffer;
1092 if( next_pkt->pts != AV_NOPTS_VALUE
1093 || next_pkt->dts == AV_NOPTS_VALUE
1095 /* read packet from packet buffer, if there is data */
1097 s->packet_buffer = pktl->next;
1103 int ret= av_read_frame_internal(s, pkt);
1105 if(pktl && ret != AVERROR(EAGAIN)){
1112 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1113 &s->packet_buffer_end)) < 0)
1114 return AVERROR(ENOMEM);
1116 assert(!s->packet_buffer);
1117 return av_read_frame_internal(s, pkt);
1122 /* XXX: suppress the packet queue */
1123 static void flush_packet_queue(AVFormatContext *s)
1128 pktl = s->packet_buffer;
1131 s->packet_buffer = pktl->next;
1132 av_free_packet(&pktl->pkt);
1137 /*******************************************************/
1140 int av_find_default_stream_index(AVFormatContext *s)
1142 int first_audio_index = -1;
1146 if (s->nb_streams <= 0)
1148 for(i = 0; i < s->nb_streams; i++) {
1150 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1153 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1154 first_audio_index = i;
1156 return first_audio_index >= 0 ? first_audio_index : 0;
1160 * Flush the frame reader.
1162 static void av_read_frame_flush(AVFormatContext *s)
1167 flush_packet_queue(s);
1171 /* for each stream, reset read state */
1172 for(i = 0; i < s->nb_streams; i++) {
1176 av_parser_close(st->parser);
1178 av_free_packet(&st->cur_pkt);
1180 st->last_IP_pts = AV_NOPTS_VALUE;
1181 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1182 st->reference_dts = AV_NOPTS_VALUE;
1189 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1192 for(i = 0; i < s->nb_streams; i++) {
1193 AVStream *st = s->streams[i];
1195 st->cur_dts = av_rescale(timestamp,
1196 st->time_base.den * (int64_t)ref_st->time_base.num,
1197 st->time_base.num * (int64_t)ref_st->time_base.den);
1201 void ff_reduce_index(AVFormatContext *s, int stream_index)
1203 AVStream *st= s->streams[stream_index];
1204 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1206 if((unsigned)st->nb_index_entries >= max_entries){
1208 for(i=0; 2*i<st->nb_index_entries; i++)
1209 st->index_entries[i]= st->index_entries[2*i];
1210 st->nb_index_entries= i;
1214 int av_add_index_entry(AVStream *st,
1215 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1217 AVIndexEntry *entries, *ie;
1220 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1223 entries = av_fast_realloc(st->index_entries,
1224 &st->index_entries_allocated_size,
1225 (st->nb_index_entries + 1) *
1226 sizeof(AVIndexEntry));
1230 st->index_entries= entries;
1232 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1235 index= st->nb_index_entries++;
1236 ie= &entries[index];
1237 assert(index==0 || ie[-1].timestamp < timestamp);
1239 ie= &entries[index];
1240 if(ie->timestamp != timestamp){
1241 if(ie->timestamp <= timestamp)
1243 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1244 st->nb_index_entries++;
1245 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1246 distance= ie->min_distance;
1250 ie->timestamp = timestamp;
1251 ie->min_distance= distance;
1258 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1261 AVIndexEntry *entries= st->index_entries;
1262 int nb_entries= st->nb_index_entries;
1271 timestamp = entries[m].timestamp;
1272 if(timestamp >= wanted_timestamp)
1274 if(timestamp <= wanted_timestamp)
1277 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1279 if(!(flags & AVSEEK_FLAG_ANY)){
1280 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1281 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1292 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1293 AVInputFormat *avif= s->iformat;
1294 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1295 int64_t ts_min, ts_max, ts;
1299 if (stream_index < 0)
1303 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1307 ts_min= AV_NOPTS_VALUE;
1308 pos_limit= -1; //gcc falsely says it may be uninitialized
1310 st= s->streams[stream_index];
1311 if(st->index_entries){
1314 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1315 index= FFMAX(index, 0);
1316 e= &st->index_entries[index];
1318 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1320 ts_min= e->timestamp;
1322 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1329 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1330 assert(index < st->nb_index_entries);
1332 e= &st->index_entries[index];
1333 assert(e->timestamp >= target_ts);
1335 ts_max= e->timestamp;
1336 pos_limit= pos_max - e->min_distance;
1338 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1339 pos_max,pos_limit, ts_max);
1344 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1349 url_fseek(s->pb, pos, SEEK_SET);
1351 av_update_cur_dts(s, st, ts);
1356 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1358 int64_t start_pos, filesize;
1362 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1365 if(ts_min == AV_NOPTS_VALUE){
1366 pos_min = s->data_offset;
1367 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1368 if (ts_min == AV_NOPTS_VALUE)
1372 if(ts_max == AV_NOPTS_VALUE){
1374 filesize = url_fsize(s->pb);
1375 pos_max = filesize - 1;
1378 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1380 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1381 if (ts_max == AV_NOPTS_VALUE)
1385 int64_t tmp_pos= pos_max + 1;
1386 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1387 if(tmp_ts == AV_NOPTS_VALUE)
1391 if(tmp_pos >= filesize)
1397 if(ts_min > ts_max){
1399 }else if(ts_min == ts_max){
1404 while (pos_min < pos_limit) {
1406 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1410 assert(pos_limit <= pos_max);
1413 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1414 // interpolate position (better than dichotomy)
1415 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1416 + pos_min - approximate_keyframe_distance;
1417 }else if(no_change==1){
1418 // bisection, if interpolation failed to change min or max pos last time
1419 pos = (pos_min + pos_limit)>>1;
1421 /* linear search if bisection failed, can only happen if there
1422 are very few or no keyframes between min/max */
1427 else if(pos > pos_limit)
1431 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1437 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1438 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1439 start_pos, no_change);
1441 if(ts == AV_NOPTS_VALUE){
1442 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1445 assert(ts != AV_NOPTS_VALUE);
1446 if (target_ts <= ts) {
1447 pos_limit = start_pos - 1;
1451 if (target_ts >= ts) {
1457 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1458 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1461 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1463 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1464 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1465 pos, ts_min, target_ts, ts_max);
1471 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1472 int64_t pos_min, pos_max;
1476 if (stream_index < 0)
1479 st= s->streams[stream_index];
1482 pos_min = s->data_offset;
1483 pos_max = url_fsize(s->pb) - 1;
1485 if (pos < pos_min) pos= pos_min;
1486 else if(pos > pos_max) pos= pos_max;
1488 url_fseek(s->pb, pos, SEEK_SET);
1491 av_update_cur_dts(s, st, ts);
1496 static int av_seek_frame_generic(AVFormatContext *s,
1497 int stream_index, int64_t timestamp, int flags)
1503 st = s->streams[stream_index];
1505 index = av_index_search_timestamp(st, timestamp, flags);
1507 if(index < 0 || index==st->nb_index_entries-1){
1511 if(st->nb_index_entries){
1512 assert(st->index_entries);
1513 ie= &st->index_entries[st->nb_index_entries-1];
1514 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1516 av_update_cur_dts(s, st, ie->timestamp);
1518 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1524 ret = av_read_frame(s, &pkt);
1525 }while(ret == AVERROR(EAGAIN));
1528 av_free_packet(&pkt);
1529 if(stream_index == pkt.stream_index){
1530 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1534 index = av_index_search_timestamp(st, timestamp, flags);
1539 av_read_frame_flush(s);
1540 if (s->iformat->read_seek){
1541 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1544 ie = &st->index_entries[index];
1545 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1547 av_update_cur_dts(s, st, ie->timestamp);
1552 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1557 av_read_frame_flush(s);
1559 if(flags & AVSEEK_FLAG_BYTE)
1560 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1562 if(stream_index < 0){
1563 stream_index= av_find_default_stream_index(s);
1564 if(stream_index < 0)
1567 st= s->streams[stream_index];
1568 /* timestamp for default must be expressed in AV_TIME_BASE units */
1569 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1572 /* first, we try the format specific seek */
1573 if (s->iformat->read_seek)
1574 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1581 if(s->iformat->read_timestamp)
1582 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1584 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1587 /*******************************************************/
1590 * Returns TRUE if the stream has accurate duration in any stream.
1592 * @return TRUE if the stream has accurate duration for at least one component.
1594 static int av_has_duration(AVFormatContext *ic)
1599 for(i = 0;i < ic->nb_streams; i++) {
1600 st = ic->streams[i];
1601 if (st->duration != AV_NOPTS_VALUE)
1608 * Estimate the stream timings from the one of each components.
1610 * Also computes the global bitrate if possible.
1612 static void av_update_stream_timings(AVFormatContext *ic)
1614 int64_t start_time, start_time1, end_time, end_time1;
1615 int64_t duration, duration1;
1619 start_time = INT64_MAX;
1620 end_time = INT64_MIN;
1621 duration = INT64_MIN;
1622 for(i = 0;i < ic->nb_streams; i++) {
1623 st = ic->streams[i];
1624 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1625 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1626 if (start_time1 < start_time)
1627 start_time = start_time1;
1628 if (st->duration != AV_NOPTS_VALUE) {
1629 end_time1 = start_time1
1630 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1631 if (end_time1 > end_time)
1632 end_time = end_time1;
1635 if (st->duration != AV_NOPTS_VALUE) {
1636 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1637 if (duration1 > duration)
1638 duration = duration1;
1641 if (start_time != INT64_MAX) {
1642 ic->start_time = start_time;
1643 if (end_time != INT64_MIN) {
1644 if (end_time - start_time > duration)
1645 duration = end_time - start_time;
1648 if (duration != INT64_MIN) {
1649 ic->duration = duration;
1650 if (ic->file_size > 0) {
1651 /* compute the bitrate */
1652 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1653 (double)ic->duration;
1658 static void fill_all_stream_timings(AVFormatContext *ic)
1663 av_update_stream_timings(ic);
1664 for(i = 0;i < ic->nb_streams; i++) {
1665 st = ic->streams[i];
1666 if (st->start_time == AV_NOPTS_VALUE) {
1667 if(ic->start_time != AV_NOPTS_VALUE)
1668 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1669 if(ic->duration != AV_NOPTS_VALUE)
1670 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1675 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1677 int64_t filesize, duration;
1681 /* if bit_rate is already set, we believe it */
1682 if (ic->bit_rate == 0) {
1684 for(i=0;i<ic->nb_streams;i++) {
1685 st = ic->streams[i];
1686 bit_rate += st->codec->bit_rate;
1688 ic->bit_rate = bit_rate;
1691 /* if duration is already set, we believe it */
1692 if (ic->duration == AV_NOPTS_VALUE &&
1693 ic->bit_rate != 0 &&
1694 ic->file_size != 0) {
1695 filesize = ic->file_size;
1697 for(i = 0; i < ic->nb_streams; i++) {
1698 st = ic->streams[i];
1699 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1700 if (st->duration == AV_NOPTS_VALUE)
1701 st->duration = duration;
1707 #define DURATION_MAX_READ_SIZE 250000
1709 /* only usable for MPEG-PS streams */
1710 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1712 AVPacket pkt1, *pkt = &pkt1;
1714 int read_size, i, ret;
1716 int64_t filesize, offset, duration;
1720 /* flush packet queue */
1721 flush_packet_queue(ic);
1723 for(i=0;i<ic->nb_streams;i++) {
1724 st = ic->streams[i];
1726 av_parser_close(st->parser);
1728 av_free_packet(&st->cur_pkt);
1732 /* we read the first packets to get the first PTS (not fully
1733 accurate, but it is enough now) */
1734 url_fseek(ic->pb, 0, SEEK_SET);
1737 if (read_size >= DURATION_MAX_READ_SIZE)
1739 /* if all info is available, we can stop */
1740 for(i = 0;i < ic->nb_streams; i++) {
1741 st = ic->streams[i];
1742 if (st->start_time == AV_NOPTS_VALUE)
1745 if (i == ic->nb_streams)
1749 ret = av_read_packet(ic, pkt);
1750 }while(ret == AVERROR(EAGAIN));
1753 read_size += pkt->size;
1754 st = ic->streams[pkt->stream_index];
1755 if (pkt->pts != AV_NOPTS_VALUE) {
1756 if (st->start_time == AV_NOPTS_VALUE)
1757 st->start_time = pkt->pts;
1759 av_free_packet(pkt);
1762 /* estimate the end time (duration) */
1763 /* XXX: may need to support wrapping */
1764 filesize = ic->file_size;
1765 offset = filesize - DURATION_MAX_READ_SIZE;
1769 url_fseek(ic->pb, offset, SEEK_SET);
1772 if (read_size >= DURATION_MAX_READ_SIZE)
1776 ret = av_read_packet(ic, pkt);
1777 }while(ret == AVERROR(EAGAIN));
1780 read_size += pkt->size;
1781 st = ic->streams[pkt->stream_index];
1782 if (pkt->pts != AV_NOPTS_VALUE &&
1783 st->start_time != AV_NOPTS_VALUE) {
1784 end_time = pkt->pts;
1785 duration = end_time - st->start_time;
1787 if (st->duration == AV_NOPTS_VALUE ||
1788 st->duration < duration)
1789 st->duration = duration;
1792 av_free_packet(pkt);
1795 fill_all_stream_timings(ic);
1797 url_fseek(ic->pb, old_offset, SEEK_SET);
1798 for(i=0; i<ic->nb_streams; i++){
1800 st->cur_dts= st->first_dts;
1801 st->last_IP_pts = AV_NOPTS_VALUE;
1805 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1809 /* get the file size, if possible */
1810 if (ic->iformat->flags & AVFMT_NOFILE) {
1813 file_size = url_fsize(ic->pb);
1817 ic->file_size = file_size;
1819 if ((!strcmp(ic->iformat->name, "mpeg") ||
1820 !strcmp(ic->iformat->name, "mpegts")) &&
1821 file_size && !url_is_streamed(ic->pb)) {
1822 /* get accurate estimate from the PTSes */
1823 av_estimate_timings_from_pts(ic, old_offset);
1824 } else if (av_has_duration(ic)) {
1825 /* at least one component has timings - we use them for all
1827 fill_all_stream_timings(ic);
1829 /* less precise: use bitrate info */
1830 av_estimate_timings_from_bit_rate(ic);
1832 av_update_stream_timings(ic);
1838 for(i = 0;i < ic->nb_streams; i++) {
1839 st = ic->streams[i];
1840 printf("%d: start_time: %0.3f duration: %0.3f\n",
1841 i, (double)st->start_time / AV_TIME_BASE,
1842 (double)st->duration / AV_TIME_BASE);
1844 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1845 (double)ic->start_time / AV_TIME_BASE,
1846 (double)ic->duration / AV_TIME_BASE,
1847 ic->bit_rate / 1000);
1852 static int has_codec_parameters(AVCodecContext *enc)
1855 switch(enc->codec_type) {
1856 case CODEC_TYPE_AUDIO:
1857 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1858 if(!enc->frame_size &&
1859 (enc->codec_id == CODEC_ID_VORBIS ||
1860 enc->codec_id == CODEC_ID_AAC))
1863 case CODEC_TYPE_VIDEO:
1864 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1870 return enc->codec_id != CODEC_ID_NONE && val != 0;
1873 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1877 int got_picture, data_size, ret=0;
1880 if(!st->codec->codec){
1881 codec = avcodec_find_decoder(st->codec->codec_id);
1884 ret = avcodec_open(st->codec, codec);
1889 if(!has_codec_parameters(st->codec)){
1890 switch(st->codec->codec_type) {
1891 case CODEC_TYPE_VIDEO:
1892 ret = avcodec_decode_video(st->codec, &picture,
1893 &got_picture, data, size);
1895 case CODEC_TYPE_AUDIO:
1896 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1897 samples = av_malloc(data_size);
1900 ret = avcodec_decode_audio2(st->codec, samples,
1901 &data_size, data, size);
1912 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1914 while (tags->id != CODEC_ID_NONE) {
1922 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1925 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1926 if(tag == tags[i].tag)
1929 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1930 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1931 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1932 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1933 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1936 return CODEC_ID_NONE;
1939 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1942 for(i=0; tags && tags[i]; i++){
1943 int tag= codec_get_tag(tags[i], id);
1949 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1952 for(i=0; tags && tags[i]; i++){
1953 enum CodecID id= codec_get_id(tags[i], tag);
1954 if(id!=CODEC_ID_NONE) return id;
1956 return CODEC_ID_NONE;
1959 static void compute_chapters_end(AVFormatContext *s)
1963 for (i=0; i+1<s->nb_chapters; i++)
1964 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1965 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1966 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1967 s->chapters[i]->end = s->chapters[i+1]->start;
1970 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1971 assert(s->start_time != AV_NOPTS_VALUE);
1972 assert(s->duration > 0);
1973 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1975 s->chapters[i]->time_base);
1979 /* absolute maximum size we read until we abort */
1980 #define MAX_READ_SIZE 5000000
1982 #define MAX_STD_TIMEBASES (60*12+5)
1983 static int get_std_framerate(int i){
1984 if(i<60*12) return i*1001;
1985 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1989 * Is the time base unreliable.
1990 * This is a heuristic to balance between quick acceptance of the values in
1991 * the headers vs. some extra checks.
1992 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1993 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1994 * And there are "variable" fps files this needs to detect as well.
1996 static int tb_unreliable(AVCodecContext *c){
1997 if( c->time_base.den >= 101L*c->time_base.num
1998 || c->time_base.den < 5L*c->time_base.num
1999 /* || c->codec_tag == AV_RL32("DIVX")
2000 || c->codec_tag == AV_RL32("XVID")*/
2001 || c->codec_id == CODEC_ID_MPEG2VIDEO
2002 || c->codec_id == CODEC_ID_H264
2008 int av_find_stream_info(AVFormatContext *ic)
2010 int i, count, ret, read_size, j;
2012 AVPacket pkt1, *pkt;
2013 int64_t last_dts[MAX_STREAMS];
2014 int64_t duration_gcd[MAX_STREAMS]={0};
2015 int duration_count[MAX_STREAMS]={0};
2016 double (*duration_error)[MAX_STD_TIMEBASES];
2017 int64_t old_offset = url_ftell(ic->pb);
2018 int64_t codec_info_duration[MAX_STREAMS]={0};
2019 int codec_info_nb_frames[MAX_STREAMS]={0};
2021 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2022 if (!duration_error) return AVERROR(ENOMEM);
2024 for(i=0;i<ic->nb_streams;i++) {
2025 st = ic->streams[i];
2026 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2027 /* if(!st->time_base.num)
2029 if(!st->codec->time_base.num)
2030 st->codec->time_base= st->time_base;
2032 //only for the split stuff
2034 st->parser = av_parser_init(st->codec->codec_id);
2035 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2036 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2041 for(i=0;i<MAX_STREAMS;i++){
2042 last_dts[i]= AV_NOPTS_VALUE;
2048 if(url_interrupt_cb()){
2049 ret= AVERROR(EINTR);
2053 /* check if one codec still needs to be handled */
2054 for(i=0;i<ic->nb_streams;i++) {
2055 st = ic->streams[i];
2056 if (!has_codec_parameters(st->codec))
2058 /* variable fps and no guess at the real fps */
2059 if( tb_unreliable(st->codec)
2060 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2062 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2064 if(st->first_dts == AV_NOPTS_VALUE)
2067 if (i == ic->nb_streams) {
2068 /* NOTE: if the format has no header, then we need to read
2069 some packets to get most of the streams, so we cannot
2071 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2072 /* if we found the info for all the codecs, we can stop */
2077 /* we did not get all the codec info, but we read too much data */
2078 if (read_size >= MAX_READ_SIZE) {
2083 /* NOTE: a new stream can be added there if no header in file
2084 (AVFMTCTX_NOHEADER) */
2085 ret = av_read_frame_internal(ic, &pkt1);
2086 if(ret == AVERROR(EAGAIN))
2090 ret = -1; /* we could not have all the codec parameters before EOF */
2091 for(i=0;i<ic->nb_streams;i++) {
2092 st = ic->streams[i];
2093 if (!has_codec_parameters(st->codec)){
2095 avcodec_string(buf, sizeof(buf), st->codec, 0);
2096 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2104 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2105 if(av_dup_packet(pkt) < 0) {
2106 av_free(duration_error);
2107 return AVERROR(ENOMEM);
2110 read_size += pkt->size;
2112 st = ic->streams[pkt->stream_index];
2113 if(codec_info_nb_frames[st->index]>1)
2114 codec_info_duration[st->index] += pkt->duration;
2115 if (pkt->duration != 0)
2116 codec_info_nb_frames[st->index]++;
2119 int index= pkt->stream_index;
2120 int64_t last= last_dts[index];
2121 int64_t duration= pkt->dts - last;
2123 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2124 double dur= duration * av_q2d(st->time_base);
2126 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2127 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2128 if(duration_count[index] < 2)
2129 memset(duration_error[index], 0, sizeof(*duration_error));
2130 for(i=1; i<MAX_STD_TIMEBASES; i++){
2131 int framerate= get_std_framerate(i);
2132 int ticks= lrintf(dur*framerate/(1001*12));
2133 double error= dur - ticks*1001*12/(double)framerate;
2134 duration_error[index][i] += error*error;
2136 duration_count[index]++;
2137 // ignore the first 4 values, they might have some random jitter
2138 if (duration_count[index] > 3)
2139 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2141 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2142 last_dts[pkt->stream_index]= pkt->dts;
2144 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2145 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2147 st->codec->extradata_size= i;
2148 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2149 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2150 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2154 /* if still no information, we try to open the codec and to
2155 decompress the frame. We try to avoid that in most cases as
2156 it takes longer and uses more memory. For MPEG-4, we need to
2157 decompress for QuickTime. */
2158 if (!has_codec_parameters(st->codec) /*&&
2159 (st->codec->codec_id == CODEC_ID_FLV1 ||
2160 st->codec->codec_id == CODEC_ID_H264 ||
2161 st->codec->codec_id == CODEC_ID_H263 ||
2162 st->codec->codec_id == CODEC_ID_H261 ||
2163 st->codec->codec_id == CODEC_ID_VORBIS ||
2164 st->codec->codec_id == CODEC_ID_MJPEG ||
2165 st->codec->codec_id == CODEC_ID_PNG ||
2166 st->codec->codec_id == CODEC_ID_PAM ||
2167 st->codec->codec_id == CODEC_ID_PGM ||
2168 st->codec->codec_id == CODEC_ID_PGMYUV ||
2169 st->codec->codec_id == CODEC_ID_PBM ||
2170 st->codec->codec_id == CODEC_ID_PPM ||
2171 st->codec->codec_id == CODEC_ID_SHORTEN ||
2172 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2173 try_decode_frame(st, pkt->data, pkt->size);
2175 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2181 // close codecs which were opened in try_decode_frame()
2182 for(i=0;i<ic->nb_streams;i++) {
2183 st = ic->streams[i];
2184 if(st->codec->codec)
2185 avcodec_close(st->codec);
2187 for(i=0;i<ic->nb_streams;i++) {
2188 st = ic->streams[i];
2189 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2190 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2191 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2193 // the check for tb_unreliable() is not completely correct, since this is not about handling
2194 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2195 // ipmovie.c produces.
2196 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2197 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2198 if(duration_count[i]
2199 && tb_unreliable(st->codec) /*&&
2200 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2201 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2203 double best_error= 2*av_q2d(st->time_base);
2204 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2206 for(j=1; j<MAX_STD_TIMEBASES; j++){
2207 double error= duration_error[i][j] * get_std_framerate(j);
2208 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2209 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2210 if(error < best_error){
2212 num = get_std_framerate(j);
2215 // do not increase frame rate by more than 1 % in order to match a standard rate.
2216 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2217 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2220 if (!st->r_frame_rate.num){
2221 if( st->codec->time_base.den * (int64_t)st->time_base.num
2222 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2223 st->r_frame_rate.num = st->codec->time_base.den;
2224 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2226 st->r_frame_rate.num = st->time_base.den;
2227 st->r_frame_rate.den = st->time_base.num;
2230 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2231 if(!st->codec->bits_per_coded_sample)
2232 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2236 av_estimate_timings(ic, old_offset);
2238 compute_chapters_end(ic);
2241 /* correct DTS for B-frame streams with no timestamps */
2242 for(i=0;i<ic->nb_streams;i++) {
2243 st = ic->streams[i];
2244 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2246 ppktl = &ic->packet_buffer;
2248 if(ppkt1->stream_index != i)
2250 if(ppkt1->pkt->dts < 0)
2252 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2254 ppkt1->pkt->dts -= delta;
2259 st->cur_dts -= delta;
2265 av_free(duration_error);
2270 /*******************************************************/
2272 int av_read_play(AVFormatContext *s)
2274 if (s->iformat->read_play)
2275 return s->iformat->read_play(s);
2277 return av_url_read_fpause(s->pb, 0);
2278 return AVERROR(ENOSYS);
2281 int av_read_pause(AVFormatContext *s)
2283 if (s->iformat->read_pause)
2284 return s->iformat->read_pause(s);
2286 return av_url_read_fpause(s->pb, 1);
2287 return AVERROR(ENOSYS);
2290 void av_close_input_stream(AVFormatContext *s)
2295 if (s->iformat->read_close)
2296 s->iformat->read_close(s);
2297 for(i=0;i<s->nb_streams;i++) {
2298 /* free all data in a stream component */
2301 av_parser_close(st->parser);
2302 av_free_packet(&st->cur_pkt);
2304 av_metadata_free(&st->metadata);
2305 av_free(st->index_entries);
2306 av_free(st->codec->extradata);
2308 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2309 av_free(st->filename);
2311 av_free(st->priv_data);
2314 for(i=s->nb_programs-1; i>=0; i--) {
2315 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2316 av_freep(&s->programs[i]->provider_name);
2317 av_freep(&s->programs[i]->name);
2319 av_metadata_free(&s->programs[i]->metadata);
2320 av_freep(&s->programs[i]->stream_index);
2321 av_freep(&s->programs[i]);
2323 av_freep(&s->programs);
2324 flush_packet_queue(s);
2325 av_freep(&s->priv_data);
2326 while(s->nb_chapters--) {
2327 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2328 av_free(s->chapters[s->nb_chapters]->title);
2330 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2331 av_free(s->chapters[s->nb_chapters]);
2333 av_freep(&s->chapters);
2334 av_metadata_free(&s->metadata);
2338 void av_close_input_file(AVFormatContext *s)
2340 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2341 av_close_input_stream(s);
2346 AVStream *av_new_stream(AVFormatContext *s, int id)
2351 if (s->nb_streams >= MAX_STREAMS)
2354 st = av_mallocz(sizeof(AVStream));
2358 st->codec= avcodec_alloc_context();
2360 /* no default bitrate if decoding */
2361 st->codec->bit_rate = 0;
2363 st->index = s->nb_streams;
2365 st->start_time = AV_NOPTS_VALUE;
2366 st->duration = AV_NOPTS_VALUE;
2367 /* we set the current DTS to 0 so that formats without any timestamps
2368 but durations get some timestamps, formats with some unknown
2369 timestamps have their first few packets buffered and the
2370 timestamps corrected before they are returned to the user */
2372 st->first_dts = AV_NOPTS_VALUE;
2374 /* default pts setting is MPEG-like */
2375 av_set_pts_info(st, 33, 1, 90000);
2376 st->last_IP_pts = AV_NOPTS_VALUE;
2377 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2378 st->pts_buffer[i]= AV_NOPTS_VALUE;
2379 st->reference_dts = AV_NOPTS_VALUE;
2381 st->sample_aspect_ratio = (AVRational){0,1};
2383 s->streams[s->nb_streams++] = st;
2387 AVProgram *av_new_program(AVFormatContext *ac, int id)
2389 AVProgram *program=NULL;
2393 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2396 for(i=0; i<ac->nb_programs; i++)
2397 if(ac->programs[i]->id == id)
2398 program = ac->programs[i];
2401 program = av_mallocz(sizeof(AVProgram));
2404 dynarray_add(&ac->programs, &ac->nb_programs, program);
2405 program->discard = AVDISCARD_NONE;
2412 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2414 AVChapter *chapter = NULL;
2417 for(i=0; i<s->nb_chapters; i++)
2418 if(s->chapters[i]->id == id)
2419 chapter = s->chapters[i];
2422 chapter= av_mallocz(sizeof(AVChapter));
2425 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2427 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2428 av_free(chapter->title);
2430 av_metadata_set(&chapter->metadata, "title", title);
2432 chapter->time_base= time_base;
2433 chapter->start = start;
2439 /************************************************************/
2440 /* output media file */
2442 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2446 if (s->oformat->priv_data_size > 0) {
2447 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2449 return AVERROR(ENOMEM);
2451 s->priv_data = NULL;
2453 if (s->oformat->set_parameters) {
2454 ret = s->oformat->set_parameters(s, ap);
2461 int av_write_header(AVFormatContext *s)
2466 // some sanity checks
2467 for(i=0;i<s->nb_streams;i++) {
2470 switch (st->codec->codec_type) {
2471 case CODEC_TYPE_AUDIO:
2472 if(st->codec->sample_rate<=0){
2473 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2476 if(!st->codec->block_align)
2477 st->codec->block_align = st->codec->channels *
2478 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2480 case CODEC_TYPE_VIDEO:
2481 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2482 av_log(s, AV_LOG_ERROR, "time base not set\n");
2485 if(st->codec->width<=0 || st->codec->height<=0){
2486 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2489 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2490 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2496 if(s->oformat->codec_tag){
2497 if(st->codec->codec_tag){
2499 //check that tag + id is in the table
2500 //if neither is in the table -> OK
2501 //if tag is in the table with another id -> FAIL
2502 //if id is in the table with another tag -> FAIL unless strict < ?
2504 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2507 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2508 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2509 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2512 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2513 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2515 return AVERROR(ENOMEM);
2518 #if LIBAVFORMAT_VERSION_MAJOR < 53
2519 ff_metadata_mux_compat(s);
2522 if(s->oformat->write_header){
2523 ret = s->oformat->write_header(s);
2528 /* init PTS generation */
2529 for(i=0;i<s->nb_streams;i++) {
2530 int64_t den = AV_NOPTS_VALUE;
2533 switch (st->codec->codec_type) {
2534 case CODEC_TYPE_AUDIO:
2535 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2537 case CODEC_TYPE_VIDEO:
2538 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2543 if (den != AV_NOPTS_VALUE) {
2545 return AVERROR_INVALIDDATA;
2546 av_frac_init(&st->pts, 0, 0, den);
2552 //FIXME merge with compute_pkt_fields
2553 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2554 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2555 int num, den, frame_size, i;
2557 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2559 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2562 /* duration field */
2563 if (pkt->duration == 0) {
2564 compute_frame_duration(&num, &den, st, NULL, pkt);
2566 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2570 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2573 //XXX/FIXME this is a temporary hack until all encoders output pts
2574 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2576 // pkt->pts= st->cur_dts;
2577 pkt->pts= st->pts.val;
2580 //calculate dts from pts
2581 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2582 st->pts_buffer[0]= pkt->pts;
2583 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2584 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2585 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2586 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2588 pkt->dts= st->pts_buffer[0];
2591 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2592 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2595 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2596 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2600 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2601 st->cur_dts= pkt->dts;
2602 st->pts.val= pkt->dts;
2605 switch (st->codec->codec_type) {
2606 case CODEC_TYPE_AUDIO:
2607 frame_size = get_audio_frame_size(st->codec, pkt->size);
2609 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2610 likely equal to the encoder delay, but it would be better if we
2611 had the real timestamps from the encoder */
2612 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2613 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2616 case CODEC_TYPE_VIDEO:
2617 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2625 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2627 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2629 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2632 ret= s->oformat->write_packet(s, pkt);
2634 ret= url_ferror(s->pb);
2638 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2639 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2641 AVPacketList **next_point, *this_pktl;
2643 this_pktl = av_mallocz(sizeof(AVPacketList));
2644 this_pktl->pkt= *pkt;
2645 if(pkt->destruct == av_destruct_packet)
2646 pkt->destruct= NULL; // not shared -> must keep original from being freed
2648 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2650 next_point = &s->packet_buffer;
2652 if(compare(s, &(*next_point)->pkt, pkt))
2654 next_point= &(*next_point)->next;
2656 this_pktl->next= *next_point;
2657 *next_point= this_pktl;
2660 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2662 AVStream *st = s->streams[ pkt ->stream_index];
2663 AVStream *st2= s->streams[ next->stream_index];
2664 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2665 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2667 if (pkt->dts == AV_NOPTS_VALUE)
2670 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2673 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2676 int streams[MAX_STREAMS];
2679 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2682 memset(streams, 0, sizeof(streams));
2683 pktl= s->packet_buffer;
2685 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2686 if(streams[ pktl->pkt.stream_index ] == 0)
2688 streams[ pktl->pkt.stream_index ]++;
2692 if(stream_count && (s->nb_streams == stream_count || flush)){
2693 pktl= s->packet_buffer;
2696 s->packet_buffer= pktl->next;
2700 av_init_packet(out);
2706 * Interleaves an AVPacket correctly so it can be muxed.
2707 * @param out the interleaved packet will be output here
2708 * @param in the input packet
2709 * @param flush 1 if no further packets are available as input and all
2710 * remaining packets should be output
2711 * @return 1 if a packet was output, 0 if no packet could be output,
2712 * < 0 if an error occurred
2714 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2715 if(s->oformat->interleave_packet)
2716 return s->oformat->interleave_packet(s, out, in, flush);
2718 return av_interleave_packet_per_dts(s, out, in, flush);
2721 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2722 AVStream *st= s->streams[ pkt->stream_index];
2724 //FIXME/XXX/HACK drop zero sized packets
2725 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2728 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2729 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2732 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2737 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2738 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2741 ret= s->oformat->write_packet(s, &opkt);
2743 av_free_packet(&opkt);
2748 if(url_ferror(s->pb))
2749 return url_ferror(s->pb);
2753 int av_write_trailer(AVFormatContext *s)
2759 ret= av_interleave_packet(s, &pkt, NULL, 1);
2760 if(ret<0) //FIXME cleanup needed for ret<0 ?
2765 ret= s->oformat->write_packet(s, &pkt);
2767 av_free_packet(&pkt);
2771 if(url_ferror(s->pb))
2775 if(s->oformat->write_trailer)
2776 ret = s->oformat->write_trailer(s);
2779 ret=url_ferror(s->pb);
2780 for(i=0;i<s->nb_streams;i++)
2781 av_freep(&s->streams[i]->priv_data);
2782 av_freep(&s->priv_data);
2786 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2789 AVProgram *program=NULL;
2792 for(i=0; i<ac->nb_programs; i++){
2793 if(ac->programs[i]->id != progid)
2795 program = ac->programs[i];
2796 for(j=0; j<program->nb_stream_indexes; j++)
2797 if(program->stream_index[j] == idx)
2800 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2803 program->stream_index = tmp;
2804 program->stream_index[program->nb_stream_indexes++] = idx;
2809 static void print_fps(double d, const char *postfix){
2810 uint64_t v= lrintf(d*100);
2811 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2812 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2813 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2816 /* "user interface" functions */
2817 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2820 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2821 AVStream *st = ic->streams[i];
2822 int g = av_gcd(st->time_base.num, st->time_base.den);
2823 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2824 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2825 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2826 /* the pid is an important information, so we display it */
2827 /* XXX: add a generic system */
2828 if (flags & AVFMT_SHOW_IDS)
2829 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2831 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2832 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2833 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2834 if (st->sample_aspect_ratio.num && // default
2835 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2836 AVRational display_aspect_ratio;
2837 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2838 st->codec->width*st->sample_aspect_ratio.num,
2839 st->codec->height*st->sample_aspect_ratio.den,
2841 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2842 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2843 display_aspect_ratio.num, display_aspect_ratio.den);
2845 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2846 if(st->r_frame_rate.den && st->r_frame_rate.num)
2847 print_fps(av_q2d(st->r_frame_rate), "tbr");
2848 if(st->time_base.den && st->time_base.num)
2849 print_fps(1/av_q2d(st->time_base), "tbn");
2850 if(st->codec->time_base.den && st->codec->time_base.num)
2851 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2853 av_log(NULL, AV_LOG_INFO, "\n");
2856 void dump_format(AVFormatContext *ic,
2863 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2864 is_output ? "Output" : "Input",
2866 is_output ? ic->oformat->name : ic->iformat->name,
2867 is_output ? "to" : "from", url);
2869 av_log(NULL, AV_LOG_INFO, " Duration: ");
2870 if (ic->duration != AV_NOPTS_VALUE) {
2871 int hours, mins, secs, us;
2872 secs = ic->duration / AV_TIME_BASE;
2873 us = ic->duration % AV_TIME_BASE;
2878 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2879 (100 * us) / AV_TIME_BASE);
2881 av_log(NULL, AV_LOG_INFO, "N/A");
2883 if (ic->start_time != AV_NOPTS_VALUE) {
2885 av_log(NULL, AV_LOG_INFO, ", start: ");
2886 secs = ic->start_time / AV_TIME_BASE;
2887 us = ic->start_time % AV_TIME_BASE;
2888 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2889 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2891 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2893 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2895 av_log(NULL, AV_LOG_INFO, "N/A");
2897 av_log(NULL, AV_LOG_INFO, "\n");
2899 if(ic->nb_programs) {
2901 for(j=0; j<ic->nb_programs; j++) {
2902 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2904 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2905 name ? name->value : "");
2906 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2907 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2910 for(i=0;i<ic->nb_streams;i++)
2911 dump_stream_format(ic, i, index, is_output);
2914 #if LIBAVFORMAT_VERSION_MAJOR < 53
2915 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2917 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2920 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2922 AVRational frame_rate;
2923 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2924 *frame_rate_num= frame_rate.num;
2925 *frame_rate_den= frame_rate.den;
2930 int64_t av_gettime(void)
2933 gettimeofday(&tv,NULL);
2934 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2937 int64_t parse_date(const char *datestr, int duration)
2943 static const char * const date_fmt[] = {
2947 static const char * const time_fmt[] = {
2957 time_t now = time(0);
2959 len = strlen(datestr);
2961 lastch = datestr[len - 1];
2964 is_utc = (lastch == 'z' || lastch == 'Z');
2966 memset(&dt, 0, sizeof(dt));
2971 if (!strncasecmp(datestr, "now", len))
2972 return (int64_t) now * 1000000;
2974 /* parse the year-month-day part */
2975 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2976 q = small_strptime(p, date_fmt[i], &dt);
2982 /* if the year-month-day part is missing, then take the
2983 * current year-month-day time */
2988 dt = *localtime(&now);
2990 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2995 if (*p == 'T' || *p == 't' || *p == ' ')
2998 /* parse the hour-minute-second part */
2999 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3000 q = small_strptime(p, time_fmt[i], &dt);
3006 /* parse datestr as a duration */
3011 /* parse datestr as HH:MM:SS */
3012 q = small_strptime(p, time_fmt[0], &dt);
3014 /* parse datestr as S+ */
3015 dt.tm_sec = strtol(p, (char **)&q, 10);
3017 /* the parsing didn't succeed */
3024 /* Now we have all the fields that we can get */
3030 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3032 dt.tm_isdst = -1; /* unknown */
3042 /* parse the .m... part */
3046 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3049 val += n * (*q - '0');
3053 return negative ? -t : t;
3056 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3066 while (*p != '\0' && *p != '=' && *p != '&') {
3067 if ((q - tag) < sizeof(tag) - 1)
3075 while (*p != '&' && *p != '\0') {
3076 if ((q - arg) < arg_size - 1) {
3086 if (!strcmp(tag, tag1))
3095 int av_get_frame_filename(char *buf, int buf_size,
3096 const char *path, int number)
3099 char *q, buf1[20], c;
3100 int nd, len, percentd_found;
3112 while (isdigit(*p)) {
3113 nd = nd * 10 + *p++ - '0';
3116 } while (isdigit(c));
3125 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3127 if ((q - buf + len) > buf_size - 1)
3129 memcpy(q, buf1, len);
3137 if ((q - buf) < buf_size - 1)
3141 if (!percentd_found)
3150 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3153 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3155 for(i=0;i<size;i+=16) {
3162 PRINT(" %02x", buf[i+j]);
3167 for(j=0;j<len;j++) {
3169 if (c < ' ' || c > '~')
3178 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3180 hex_dump_internal(NULL, f, 0, buf, size);
3183 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3185 hex_dump_internal(avcl, NULL, level, buf, size);
3188 //FIXME needs to know the time_base
3189 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3191 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3192 PRINT("stream #%d:\n", pkt->stream_index);
3193 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3194 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3195 /* DTS is _always_ valid after av_read_frame() */
3197 if (pkt->dts == AV_NOPTS_VALUE)
3200 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3201 /* PTS may not be known if B-frames are present. */
3203 if (pkt->pts == AV_NOPTS_VALUE)
3206 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3208 PRINT(" size=%d\n", pkt->size);
3211 av_hex_dump(f, pkt->data, pkt->size);
3214 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3216 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3219 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3221 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3224 void url_split(char *proto, int proto_size,
3225 char *authorization, int authorization_size,
3226 char *hostname, int hostname_size,
3228 char *path, int path_size,
3231 const char *p, *ls, *at, *col, *brk;
3233 if (port_ptr) *port_ptr = -1;
3234 if (proto_size > 0) proto[0] = 0;
3235 if (authorization_size > 0) authorization[0] = 0;
3236 if (hostname_size > 0) hostname[0] = 0;
3237 if (path_size > 0) path[0] = 0;
3239 /* parse protocol */
3240 if ((p = strchr(url, ':'))) {
3241 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3246 /* no protocol means plain filename */
3247 av_strlcpy(path, url, path_size);
3251 /* separate path from hostname */
3252 ls = strchr(p, '/');
3254 ls = strchr(p, '?');
3256 av_strlcpy(path, ls, path_size);
3258 ls = &p[strlen(p)]; // XXX
3260 /* the rest is hostname, use that to parse auth/port */
3262 /* authorization (user[:pass]@hostname) */
3263 if ((at = strchr(p, '@')) && at < ls) {
3264 av_strlcpy(authorization, p,
3265 FFMIN(authorization_size, at + 1 - p));
3266 p = at + 1; /* skip '@' */
3269 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3271 av_strlcpy(hostname, p + 1,
3272 FFMIN(hostname_size, brk - p));
3273 if (brk[1] == ':' && port_ptr)
3274 *port_ptr = atoi(brk + 2);
3275 } else if ((col = strchr(p, ':')) && col < ls) {
3276 av_strlcpy(hostname, p,
3277 FFMIN(col + 1 - p, hostname_size));
3278 if (port_ptr) *port_ptr = atoi(col + 1);
3280 av_strlcpy(hostname, p,
3281 FFMIN(ls + 1 - p, hostname_size));
3285 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3288 static const char hex_table[16] = { '0', '1', '2', '3',
3291 'C', 'D', 'E', 'F' };
3293 for(i = 0; i < s; i++) {
3294 buff[i * 2] = hex_table[src[i] >> 4];
3295 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3301 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3302 int pts_num, int pts_den)
3304 unsigned int gcd= av_gcd(pts_num, pts_den);
3305 s->pts_wrap_bits = pts_wrap_bits;
3306 s->time_base.num = pts_num/gcd;
3307 s->time_base.den = pts_den/gcd;
3310 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);